text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def __WaitForInstance(instance, desired_state):
""" Blocks until instance is in desired_state. """
print 'Waiting for instance %s to change to %s' % (instance.id, desired_state)
while True:
try:
instance.update()
state = instance.state
sys.stdout.write('.')
sys.stdout.flush()
if state == desired_state:
break
except boto_exception.EC2ResponseError as e:
logging.info(e)
#except boto_exception.ResponseError as e: # This is an alias
# logging.info(e)
time.sleep(5)
return
|
[
"def",
"__WaitForInstance",
"(",
"instance",
",",
"desired_state",
")",
":",
"print",
"'Waiting for instance %s to change to %s'",
"%",
"(",
"instance",
".",
"id",
",",
"desired_state",
")",
"while",
"True",
":",
"try",
":",
"instance",
".",
"update",
"(",
")",
"state",
"=",
"instance",
".",
"state",
"sys",
".",
"stdout",
".",
"write",
"(",
"'.'",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"state",
"==",
"desired_state",
":",
"break",
"except",
"boto_exception",
".",
"EC2ResponseError",
"as",
"e",
":",
"logging",
".",
"info",
"(",
"e",
")",
"#except boto_exception.ResponseError as e: # This is an alias",
"# logging.info(e)",
"time",
".",
"sleep",
"(",
"5",
")",
"return"
] | 33.705882 | 17 |
def _example_order_book(quote_ctx):
"""
获取摆盘数据,输出 买价,买量,买盘经纪个数,卖价,卖量,卖盘经纪个数
"""
stock_code_list = ["US.AAPL", "HK.00700"]
# subscribe "ORDER_BOOK"
ret_status, ret_data = quote_ctx.subscribe(stock_code_list, ft.SubType.ORDER_BOOK)
if ret_status != ft.RET_OK:
print(ret_data)
exit()
for stk_code in stock_code_list:
ret_status, ret_data = quote_ctx.get_order_book(stk_code)
if ret_status != ft.RET_OK:
print(stk_code, ret_data)
exit()
print("%s ORDER_BOOK" % stk_code)
print(ret_data)
print("\n\n")
|
[
"def",
"_example_order_book",
"(",
"quote_ctx",
")",
":",
"stock_code_list",
"=",
"[",
"\"US.AAPL\"",
",",
"\"HK.00700\"",
"]",
"# subscribe \"ORDER_BOOK\"",
"ret_status",
",",
"ret_data",
"=",
"quote_ctx",
".",
"subscribe",
"(",
"stock_code_list",
",",
"ft",
".",
"SubType",
".",
"ORDER_BOOK",
")",
"if",
"ret_status",
"!=",
"ft",
".",
"RET_OK",
":",
"print",
"(",
"ret_data",
")",
"exit",
"(",
")",
"for",
"stk_code",
"in",
"stock_code_list",
":",
"ret_status",
",",
"ret_data",
"=",
"quote_ctx",
".",
"get_order_book",
"(",
"stk_code",
")",
"if",
"ret_status",
"!=",
"ft",
".",
"RET_OK",
":",
"print",
"(",
"stk_code",
",",
"ret_data",
")",
"exit",
"(",
")",
"print",
"(",
"\"%s ORDER_BOOK\"",
"%",
"stk_code",
")",
"print",
"(",
"ret_data",
")",
"print",
"(",
"\"\\n\\n\"",
")"
] | 29.55 | 14.85 |
def list_ranges(self, share_name, directory_name, file_name,
start_range=None, end_range=None, timeout=None):
'''
Retrieves the valid ranges for a file.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param int start_range:
Specifies the start offset of bytes over which to list ranges.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
Specifies the end offset of bytes over which to list ranges.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: a list of valid ranges
:rtype: a list of :class:`.FileRange`
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(share_name, directory_name, file_name)
request.query = [
('comp', 'rangelist'),
('timeout', _int_to_str(timeout)),
]
if start_range is not None:
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False)
response = self._perform_request(request)
return _convert_xml_to_ranges(response)
|
[
"def",
"list_ranges",
"(",
"self",
",",
"share_name",
",",
"directory_name",
",",
"file_name",
",",
"start_range",
"=",
"None",
",",
"end_range",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'share_name'",
",",
"share_name",
")",
"_validate_not_none",
"(",
"'file_name'",
",",
"file_name",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'GET'",
"request",
".",
"host",
"=",
"self",
".",
"_get_host",
"(",
")",
"request",
".",
"path",
"=",
"_get_path",
"(",
"share_name",
",",
"directory_name",
",",
"file_name",
")",
"request",
".",
"query",
"=",
"[",
"(",
"'comp'",
",",
"'rangelist'",
")",
",",
"(",
"'timeout'",
",",
"_int_to_str",
"(",
"timeout",
")",
")",
",",
"]",
"if",
"start_range",
"is",
"not",
"None",
":",
"_validate_and_format_range_headers",
"(",
"request",
",",
"start_range",
",",
"end_range",
",",
"start_range_required",
"=",
"False",
",",
"end_range_required",
"=",
"False",
")",
"response",
"=",
"self",
".",
"_perform_request",
"(",
"request",
")",
"return",
"_convert_xml_to_ranges",
"(",
"response",
")"
] | 40.704545 | 15.704545 |
def add_tasks(batch_service_client, job_id, loads,
output_container_name, output_container_sas_token,
task_file, acount_name):
"""Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: A collection of input files. One task will be
created for each input file.
:param output_container_name: The ID of an Azure Blob storage container to
which the tasks will upload their results.
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
:param str task_file: A file name of the script
:param str account_name: A storage account
"""
_log.info('Adding {} tasks to job [{}]...'.format(len(loads), job_id))
# _log.info('Adding {} tasks to job [{}]...'.format(len(input_files), job_id))
tasks = list()
for (input_file, output_file, i, j) in loads:
command = ['python $AZ_BATCH_NODE_SHARED_DIR/{} '
'--filepath {} --output {} --storageaccount {} '
'--task_id {} --job_id {} '
'--storagecontainer {} --sastoken "{}"'.format(
os.path.basename(task_file),
input_file.file_path,
output_file,
acount_name,
i, j,
output_container_name,
output_container_sas_token)]
_log.debug('CMD : "{}"'.format(command[0]))
tasks.append(batch.models.TaskAddParameter(
id='topNtask{}-{}'.format(i, j),
command_line=command,
resource_files=[input_file]
)
)
batch_service_client.task.add_collection(job_id, tasks)
task_ids = [task.id for task in tasks]
_log.info('{} tasks were added.'.format(len(task_ids)))
return task_ids
|
[
"def",
"add_tasks",
"(",
"batch_service_client",
",",
"job_id",
",",
"loads",
",",
"output_container_name",
",",
"output_container_sas_token",
",",
"task_file",
",",
"acount_name",
")",
":",
"_log",
".",
"info",
"(",
"'Adding {} tasks to job [{}]...'",
".",
"format",
"(",
"len",
"(",
"loads",
")",
",",
"job_id",
")",
")",
"# _log.info('Adding {} tasks to job [{}]...'.format(len(input_files), job_id))",
"tasks",
"=",
"list",
"(",
")",
"for",
"(",
"input_file",
",",
"output_file",
",",
"i",
",",
"j",
")",
"in",
"loads",
":",
"command",
"=",
"[",
"'python $AZ_BATCH_NODE_SHARED_DIR/{} '",
"'--filepath {} --output {} --storageaccount {} '",
"'--task_id {} --job_id {} '",
"'--storagecontainer {} --sastoken \"{}\"'",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"task_file",
")",
",",
"input_file",
".",
"file_path",
",",
"output_file",
",",
"acount_name",
",",
"i",
",",
"j",
",",
"output_container_name",
",",
"output_container_sas_token",
")",
"]",
"_log",
".",
"debug",
"(",
"'CMD : \"{}\"'",
".",
"format",
"(",
"command",
"[",
"0",
"]",
")",
")",
"tasks",
".",
"append",
"(",
"batch",
".",
"models",
".",
"TaskAddParameter",
"(",
"id",
"=",
"'topNtask{}-{}'",
".",
"format",
"(",
"i",
",",
"j",
")",
",",
"command_line",
"=",
"command",
",",
"resource_files",
"=",
"[",
"input_file",
"]",
")",
")",
"batch_service_client",
".",
"task",
".",
"add_collection",
"(",
"job_id",
",",
"tasks",
")",
"task_ids",
"=",
"[",
"task",
".",
"id",
"for",
"task",
"in",
"tasks",
"]",
"_log",
".",
"info",
"(",
"'{} tasks were added.'",
".",
"format",
"(",
"len",
"(",
"task_ids",
")",
")",
")",
"return",
"task_ids"
] | 41.979592 | 18.653061 |
def tz_convert(self, tz):
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz)
return self._simple_new(self.asi8, dtype=dtype, freq=self.freq)
|
[
"def",
"tz_convert",
"(",
"self",
",",
"tz",
")",
":",
"tz",
"=",
"timezones",
".",
"maybe_get_tz",
"(",
"tz",
")",
"if",
"self",
".",
"tz",
"is",
"None",
":",
"# tz naive, use tz_localize",
"raise",
"TypeError",
"(",
"'Cannot convert tz-naive timestamps, use '",
"'tz_localize to localize'",
")",
"# No conversion since timestamps are all UTC to begin with",
"dtype",
"=",
"tz_to_dtype",
"(",
"tz",
")",
"return",
"self",
".",
"_simple_new",
"(",
"self",
".",
"asi8",
",",
"dtype",
"=",
"dtype",
",",
"freq",
"=",
"self",
".",
"freq",
")"
] | 36.256757 | 23.067568 |
def plot_ell(fignum, pars, col, lower, plot):
"""
function to calcualte/plot points on an ellipse about Pdec,Pdip with angle beta,gamma
Parameters
_________
fignum : matplotlib figure number
pars : list of [Pdec, Pinc, beta, Bdec, Binc, gamma, Gdec, Ginc ]
where P is direction, Bdec,Binc are beta direction, and Gdec,Ginc are gamma direction
col : color for ellipse
lower : boolean, if True, lower hemisphere projection
plot : boolean, if False, return the points, if False, make the plot
"""
plt.figure(num=fignum)
rad = old_div(np.pi, 180.)
Pdec, Pinc, beta, Bdec, Binc, gamma, Gdec, Ginc = pars[0], pars[
1], pars[2], pars[3], pars[4], pars[5], pars[6], pars[7]
if beta > 90. or gamma > 90:
beta = 180. - beta
gamma = 180. - gamma
Pdec = Pdec - 180.
Pinc = -Pinc
beta, gamma = beta * rad, gamma * rad # convert to radians
X_ell, Y_ell, X_up, Y_up, PTS = [], [], [], [], []
nums = 201
xnum = old_div(float(nums - 1.), 2.)
# set up t matrix
t = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
X = pmag.dir2cart((Pdec, Pinc, 1.0)) # convert to cartesian coordintes
if lower == 1 and X[2] < 0:
for i in range(3):
X[i] = -X[i]
# set up rotation matrix t
t[0][2] = X[0]
t[1][2] = X[1]
t[2][2] = X[2]
X = pmag.dir2cart((Bdec, Binc, 1.0))
if lower == 1 and X[2] < 0:
for i in range(3):
X[i] = -X[i]
t[0][0] = X[0]
t[1][0] = X[1]
t[2][0] = X[2]
X = pmag.dir2cart((Gdec, Ginc, 1.0))
if lower == 1 and X[2] < 0:
for i in range(3):
X[i] = -X[i]
t[0][1] = X[0]
t[1][1] = X[1]
t[2][1] = X[2]
# set up v matrix
v = [0, 0, 0]
for i in range(nums): # incremental point along ellipse
psi = float(i) * np.pi / xnum
v[0] = np.sin(beta) * np.cos(psi)
v[1] = np.sin(gamma) * np.sin(psi)
v[2] = np.sqrt(1. - v[0]**2 - v[1]**2)
elli = [0, 0, 0]
# calculate points on the ellipse
for j in range(3):
for k in range(3):
# cartesian coordinate j of ellipse
elli[j] = elli[j] + t[j][k] * v[k]
pts = pmag.cart2dir(elli)
PTS.append([pts[0], pts[1]])
# put on an equal area projection
R = old_div(np.sqrt(
1. - abs(elli[2])), (np.sqrt(elli[0]**2 + elli[1]**2)))
if elli[2] <= 0:
# for i in range(3): elli[i]=-elli[i]
X_up.append(elli[1] * R)
Y_up.append(elli[0] * R)
else:
X_ell.append(elli[1] * R)
Y_ell.append(elli[0] * R)
if plot == 1:
col = col[0]+'.'
if X_ell != []:
plt.plot(X_ell, Y_ell, col, markersize=3)
if X_up != []:
plt.plot(X_up, Y_up, col, markersize=3)
else:
return PTS
|
[
"def",
"plot_ell",
"(",
"fignum",
",",
"pars",
",",
"col",
",",
"lower",
",",
"plot",
")",
":",
"plt",
".",
"figure",
"(",
"num",
"=",
"fignum",
")",
"rad",
"=",
"old_div",
"(",
"np",
".",
"pi",
",",
"180.",
")",
"Pdec",
",",
"Pinc",
",",
"beta",
",",
"Bdec",
",",
"Binc",
",",
"gamma",
",",
"Gdec",
",",
"Ginc",
"=",
"pars",
"[",
"0",
"]",
",",
"pars",
"[",
"1",
"]",
",",
"pars",
"[",
"2",
"]",
",",
"pars",
"[",
"3",
"]",
",",
"pars",
"[",
"4",
"]",
",",
"pars",
"[",
"5",
"]",
",",
"pars",
"[",
"6",
"]",
",",
"pars",
"[",
"7",
"]",
"if",
"beta",
">",
"90.",
"or",
"gamma",
">",
"90",
":",
"beta",
"=",
"180.",
"-",
"beta",
"gamma",
"=",
"180.",
"-",
"gamma",
"Pdec",
"=",
"Pdec",
"-",
"180.",
"Pinc",
"=",
"-",
"Pinc",
"beta",
",",
"gamma",
"=",
"beta",
"*",
"rad",
",",
"gamma",
"*",
"rad",
"# convert to radians",
"X_ell",
",",
"Y_ell",
",",
"X_up",
",",
"Y_up",
",",
"PTS",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"nums",
"=",
"201",
"xnum",
"=",
"old_div",
"(",
"float",
"(",
"nums",
"-",
"1.",
")",
",",
"2.",
")",
"# set up t matrix",
"t",
"=",
"[",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
"]",
"X",
"=",
"pmag",
".",
"dir2cart",
"(",
"(",
"Pdec",
",",
"Pinc",
",",
"1.0",
")",
")",
"# convert to cartesian coordintes",
"if",
"lower",
"==",
"1",
"and",
"X",
"[",
"2",
"]",
"<",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"X",
"[",
"i",
"]",
"=",
"-",
"X",
"[",
"i",
"]",
"# set up rotation matrix t",
"t",
"[",
"0",
"]",
"[",
"2",
"]",
"=",
"X",
"[",
"0",
"]",
"t",
"[",
"1",
"]",
"[",
"2",
"]",
"=",
"X",
"[",
"1",
"]",
"t",
"[",
"2",
"]",
"[",
"2",
"]",
"=",
"X",
"[",
"2",
"]",
"X",
"=",
"pmag",
".",
"dir2cart",
"(",
"(",
"Bdec",
",",
"Binc",
",",
"1.0",
")",
")",
"if",
"lower",
"==",
"1",
"and",
"X",
"[",
"2",
"]",
"<",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"X",
"[",
"i",
"]",
"=",
"-",
"X",
"[",
"i",
"]",
"t",
"[",
"0",
"]",
"[",
"0",
"]",
"=",
"X",
"[",
"0",
"]",
"t",
"[",
"1",
"]",
"[",
"0",
"]",
"=",
"X",
"[",
"1",
"]",
"t",
"[",
"2",
"]",
"[",
"0",
"]",
"=",
"X",
"[",
"2",
"]",
"X",
"=",
"pmag",
".",
"dir2cart",
"(",
"(",
"Gdec",
",",
"Ginc",
",",
"1.0",
")",
")",
"if",
"lower",
"==",
"1",
"and",
"X",
"[",
"2",
"]",
"<",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"X",
"[",
"i",
"]",
"=",
"-",
"X",
"[",
"i",
"]",
"t",
"[",
"0",
"]",
"[",
"1",
"]",
"=",
"X",
"[",
"0",
"]",
"t",
"[",
"1",
"]",
"[",
"1",
"]",
"=",
"X",
"[",
"1",
"]",
"t",
"[",
"2",
"]",
"[",
"1",
"]",
"=",
"X",
"[",
"2",
"]",
"# set up v matrix",
"v",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"nums",
")",
":",
"# incremental point along ellipse",
"psi",
"=",
"float",
"(",
"i",
")",
"*",
"np",
".",
"pi",
"/",
"xnum",
"v",
"[",
"0",
"]",
"=",
"np",
".",
"sin",
"(",
"beta",
")",
"*",
"np",
".",
"cos",
"(",
"psi",
")",
"v",
"[",
"1",
"]",
"=",
"np",
".",
"sin",
"(",
"gamma",
")",
"*",
"np",
".",
"sin",
"(",
"psi",
")",
"v",
"[",
"2",
"]",
"=",
"np",
".",
"sqrt",
"(",
"1.",
"-",
"v",
"[",
"0",
"]",
"**",
"2",
"-",
"v",
"[",
"1",
"]",
"**",
"2",
")",
"elli",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"# calculate points on the ellipse",
"for",
"j",
"in",
"range",
"(",
"3",
")",
":",
"for",
"k",
"in",
"range",
"(",
"3",
")",
":",
"# cartesian coordinate j of ellipse",
"elli",
"[",
"j",
"]",
"=",
"elli",
"[",
"j",
"]",
"+",
"t",
"[",
"j",
"]",
"[",
"k",
"]",
"*",
"v",
"[",
"k",
"]",
"pts",
"=",
"pmag",
".",
"cart2dir",
"(",
"elli",
")",
"PTS",
".",
"append",
"(",
"[",
"pts",
"[",
"0",
"]",
",",
"pts",
"[",
"1",
"]",
"]",
")",
"# put on an equal area projection",
"R",
"=",
"old_div",
"(",
"np",
".",
"sqrt",
"(",
"1.",
"-",
"abs",
"(",
"elli",
"[",
"2",
"]",
")",
")",
",",
"(",
"np",
".",
"sqrt",
"(",
"elli",
"[",
"0",
"]",
"**",
"2",
"+",
"elli",
"[",
"1",
"]",
"**",
"2",
")",
")",
")",
"if",
"elli",
"[",
"2",
"]",
"<=",
"0",
":",
"# for i in range(3): elli[i]=-elli[i]",
"X_up",
".",
"append",
"(",
"elli",
"[",
"1",
"]",
"*",
"R",
")",
"Y_up",
".",
"append",
"(",
"elli",
"[",
"0",
"]",
"*",
"R",
")",
"else",
":",
"X_ell",
".",
"append",
"(",
"elli",
"[",
"1",
"]",
"*",
"R",
")",
"Y_ell",
".",
"append",
"(",
"elli",
"[",
"0",
"]",
"*",
"R",
")",
"if",
"plot",
"==",
"1",
":",
"col",
"=",
"col",
"[",
"0",
"]",
"+",
"'.'",
"if",
"X_ell",
"!=",
"[",
"]",
":",
"plt",
".",
"plot",
"(",
"X_ell",
",",
"Y_ell",
",",
"col",
",",
"markersize",
"=",
"3",
")",
"if",
"X_up",
"!=",
"[",
"]",
":",
"plt",
".",
"plot",
"(",
"X_up",
",",
"Y_up",
",",
"col",
",",
"markersize",
"=",
"3",
")",
"else",
":",
"return",
"PTS"
] | 34.207317 | 15.54878 |
def update(self, callback=None, errback=None, **kwargs):
"""
Update record configuration. Pass list of keywords and their values to
update. For the list of keywords available for zone configuration, see
:attr:`ns1.rest.records.Records.INT_FIELDS`,
:attr:`ns1.rest.records.Records.PASSTHRU_FIELDS`,
:attr:`ns1.rest.records.Records.BOOL_FIELDS`
"""
if not self.data:
raise RecordException('record not loaded')
def success(result, *args):
self._parseModel(result)
if callback:
return callback(self)
else:
return self
return self._rest.update(self.parentZone.zone, self.domain, self.type,
callback=success, errback=errback, **kwargs)
|
[
"def",
"update",
"(",
"self",
",",
"callback",
"=",
"None",
",",
"errback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"data",
":",
"raise",
"RecordException",
"(",
"'record not loaded'",
")",
"def",
"success",
"(",
"result",
",",
"*",
"args",
")",
":",
"self",
".",
"_parseModel",
"(",
"result",
")",
"if",
"callback",
":",
"return",
"callback",
"(",
"self",
")",
"else",
":",
"return",
"self",
"return",
"self",
".",
"_rest",
".",
"update",
"(",
"self",
".",
"parentZone",
".",
"zone",
",",
"self",
".",
"domain",
",",
"self",
".",
"type",
",",
"callback",
"=",
"success",
",",
"errback",
"=",
"errback",
",",
"*",
"*",
"kwargs",
")"
] | 40.25 | 19.05 |
def insert_before(self, text):
"""
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
"""
selection_state = self.selection
if selection_state:
selection_state = SelectionState(
original_cursor_position=selection_state.original_cursor_position + len(text),
type=selection_state.type)
return Document(
text=text + self.text,
cursor_position=self.cursor_position + len(text),
selection=selection_state)
|
[
"def",
"insert_before",
"(",
"self",
",",
"text",
")",
":",
"selection_state",
"=",
"self",
".",
"selection",
"if",
"selection_state",
":",
"selection_state",
"=",
"SelectionState",
"(",
"original_cursor_position",
"=",
"selection_state",
".",
"original_cursor_position",
"+",
"len",
"(",
"text",
")",
",",
"type",
"=",
"selection_state",
".",
"type",
")",
"return",
"Document",
"(",
"text",
"=",
"text",
"+",
"self",
".",
"text",
",",
"cursor_position",
"=",
"self",
".",
"cursor_position",
"+",
"len",
"(",
"text",
")",
",",
"selection",
"=",
"selection_state",
")"
] | 37.75 | 16.5 |
async def message(self, msg, msg_type=None, use_version=None):
"""
Loads/dumps message
:param msg:
:param msg_type:
:param use_version:
:return:
"""
elem_type = msg_type if msg_type is not None else msg.__class__
version = await self.version(elem_type, None, elem=msg) if use_version is None else use_version
if self.is_tracked():
return self.get_tracked()
if hasattr(elem_type, 'boost_serialize'):
msg = elem_type() if msg is None else msg
self.pop_track(use_version is None)
return await msg.boost_serialize(self, version=version)
if self.writing:
self.pop_track(use_version is None)
return await self.dump_message(msg, msg_type=msg_type)
else:
obj = await self.load_message(msg_type, msg=msg)
return self.track_obj(obj, use_version is None)
|
[
"async",
"def",
"message",
"(",
"self",
",",
"msg",
",",
"msg_type",
"=",
"None",
",",
"use_version",
"=",
"None",
")",
":",
"elem_type",
"=",
"msg_type",
"if",
"msg_type",
"is",
"not",
"None",
"else",
"msg",
".",
"__class__",
"version",
"=",
"await",
"self",
".",
"version",
"(",
"elem_type",
",",
"None",
",",
"elem",
"=",
"msg",
")",
"if",
"use_version",
"is",
"None",
"else",
"use_version",
"if",
"self",
".",
"is_tracked",
"(",
")",
":",
"return",
"self",
".",
"get_tracked",
"(",
")",
"if",
"hasattr",
"(",
"elem_type",
",",
"'boost_serialize'",
")",
":",
"msg",
"=",
"elem_type",
"(",
")",
"if",
"msg",
"is",
"None",
"else",
"msg",
"self",
".",
"pop_track",
"(",
"use_version",
"is",
"None",
")",
"return",
"await",
"msg",
".",
"boost_serialize",
"(",
"self",
",",
"version",
"=",
"version",
")",
"if",
"self",
".",
"writing",
":",
"self",
".",
"pop_track",
"(",
"use_version",
"is",
"None",
")",
"return",
"await",
"self",
".",
"dump_message",
"(",
"msg",
",",
"msg_type",
"=",
"msg_type",
")",
"else",
":",
"obj",
"=",
"await",
"self",
".",
"load_message",
"(",
"msg_type",
",",
"msg",
"=",
"msg",
")",
"return",
"self",
".",
"track_obj",
"(",
"obj",
",",
"use_version",
"is",
"None",
")"
] | 36.88 | 20.32 |
def stdev(requestContext, seriesList, points, windowTolerance=0.1):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N
datapoints. If the ratio of null points in the window is greater than
windowTolerance, skip the calculation. The default for windowTolerance is
0.1 (up to 10% of points in the window can be missing). Note that if this
is set to 0.0, it will cause large gaps in the output anywhere a single
point is missing.
Example::
&target=stdev(server*.instance*.threads.busy,30)
&target=stdev(server*.instance*.cpu.system,30,0.0)
"""
# For this we take the standard deviation in terms of the moving average
# and the moving average of series squares.
for seriesIndex, series in enumerate(seriesList):
stdevSeries = TimeSeries("stdev(%s,%d)" % (series.name, int(points)),
series.start, series.end, series.step, [])
stdevSeries.pathExpression = "stdev(%s,%d)" % (series.name,
int(points))
validPoints = 0
currentSum = 0
currentSumOfSquares = 0
for index, newValue in enumerate(series):
# Mark whether we've reached our window size - dont drop points
# out otherwise
if index < points:
bootstrapping = True
droppedValue = None
else:
bootstrapping = False
droppedValue = series[index - points]
# Track non-None points in window
if not bootstrapping and droppedValue is not None:
validPoints -= 1
if newValue is not None:
validPoints += 1
# Remove the value that just dropped out of the window
if not bootstrapping and droppedValue is not None:
currentSum -= droppedValue
currentSumOfSquares -= droppedValue**2
# Add in the value that just popped in the window
if newValue is not None:
currentSum += newValue
currentSumOfSquares += newValue**2
if (
validPoints > 0 and
float(validPoints) / points >= windowTolerance
):
try:
deviation = math.sqrt(validPoints * currentSumOfSquares -
currentSum**2) / validPoints
except ValueError:
deviation = None
stdevSeries.append(deviation)
else:
stdevSeries.append(None)
seriesList[seriesIndex] = stdevSeries
return seriesList
|
[
"def",
"stdev",
"(",
"requestContext",
",",
"seriesList",
",",
"points",
",",
"windowTolerance",
"=",
"0.1",
")",
":",
"# For this we take the standard deviation in terms of the moving average",
"# and the moving average of series squares.",
"for",
"seriesIndex",
",",
"series",
"in",
"enumerate",
"(",
"seriesList",
")",
":",
"stdevSeries",
"=",
"TimeSeries",
"(",
"\"stdev(%s,%d)\"",
"%",
"(",
"series",
".",
"name",
",",
"int",
"(",
"points",
")",
")",
",",
"series",
".",
"start",
",",
"series",
".",
"end",
",",
"series",
".",
"step",
",",
"[",
"]",
")",
"stdevSeries",
".",
"pathExpression",
"=",
"\"stdev(%s,%d)\"",
"%",
"(",
"series",
".",
"name",
",",
"int",
"(",
"points",
")",
")",
"validPoints",
"=",
"0",
"currentSum",
"=",
"0",
"currentSumOfSquares",
"=",
"0",
"for",
"index",
",",
"newValue",
"in",
"enumerate",
"(",
"series",
")",
":",
"# Mark whether we've reached our window size - dont drop points",
"# out otherwise",
"if",
"index",
"<",
"points",
":",
"bootstrapping",
"=",
"True",
"droppedValue",
"=",
"None",
"else",
":",
"bootstrapping",
"=",
"False",
"droppedValue",
"=",
"series",
"[",
"index",
"-",
"points",
"]",
"# Track non-None points in window",
"if",
"not",
"bootstrapping",
"and",
"droppedValue",
"is",
"not",
"None",
":",
"validPoints",
"-=",
"1",
"if",
"newValue",
"is",
"not",
"None",
":",
"validPoints",
"+=",
"1",
"# Remove the value that just dropped out of the window",
"if",
"not",
"bootstrapping",
"and",
"droppedValue",
"is",
"not",
"None",
":",
"currentSum",
"-=",
"droppedValue",
"currentSumOfSquares",
"-=",
"droppedValue",
"**",
"2",
"# Add in the value that just popped in the window",
"if",
"newValue",
"is",
"not",
"None",
":",
"currentSum",
"+=",
"newValue",
"currentSumOfSquares",
"+=",
"newValue",
"**",
"2",
"if",
"(",
"validPoints",
">",
"0",
"and",
"float",
"(",
"validPoints",
")",
"/",
"points",
">=",
"windowTolerance",
")",
":",
"try",
":",
"deviation",
"=",
"math",
".",
"sqrt",
"(",
"validPoints",
"*",
"currentSumOfSquares",
"-",
"currentSum",
"**",
"2",
")",
"/",
"validPoints",
"except",
"ValueError",
":",
"deviation",
"=",
"None",
"stdevSeries",
".",
"append",
"(",
"deviation",
")",
"else",
":",
"stdevSeries",
".",
"append",
"(",
"None",
")",
"seriesList",
"[",
"seriesIndex",
"]",
"=",
"stdevSeries",
"return",
"seriesList"
] | 38.785714 | 20.9 |
def print_tree_recursive(tree_obj, node_index, attribute_names=None):
"""
Recursively writes a string representation of a decision tree object.
Parameters
----------
tree_obj : sklearn.tree._tree.Tree object
A base decision tree object
node_index : int
Index of the node being printed
attribute_names : list
List of attribute names
Returns
-------
tree_str : str
String representation of decision tree in the same format as the parf library.
"""
tree_str = ""
if node_index == 0:
tree_str += "{0:d}\n".format(tree_obj.node_count)
if tree_obj.feature[node_index] >= 0:
if attribute_names is None:
attr_val = "{0:d}".format(tree_obj.feature[node_index])
else:
attr_val = attribute_names[tree_obj.feature[node_index]]
tree_str += "b {0:d} {1} {2:0.4f} {3:d} {4:1.5e}\n".format(node_index,
attr_val,
tree_obj.weighted_n_node_samples[node_index],
tree_obj.n_node_samples[node_index],
tree_obj.threshold[node_index])
else:
if tree_obj.max_n_classes > 1:
leaf_value = "{0:d}".format(tree_obj.value[node_index].argmax())
else:
leaf_value = "{0}".format(tree_obj.value[node_index][0][0])
tree_str += "l {0:d} {1} {2:0.4f} {3:d}\n".format(node_index,
leaf_value,
tree_obj.weighted_n_node_samples[node_index],
tree_obj.n_node_samples[node_index])
if tree_obj.children_left[node_index] > 0:
tree_str += print_tree_recursive(tree_obj, tree_obj.children_left[node_index], attribute_names)
if tree_obj.children_right[node_index] > 0:
tree_str += print_tree_recursive(tree_obj, tree_obj.children_right[node_index], attribute_names)
return tree_str
|
[
"def",
"print_tree_recursive",
"(",
"tree_obj",
",",
"node_index",
",",
"attribute_names",
"=",
"None",
")",
":",
"tree_str",
"=",
"\"\"",
"if",
"node_index",
"==",
"0",
":",
"tree_str",
"+=",
"\"{0:d}\\n\"",
".",
"format",
"(",
"tree_obj",
".",
"node_count",
")",
"if",
"tree_obj",
".",
"feature",
"[",
"node_index",
"]",
">=",
"0",
":",
"if",
"attribute_names",
"is",
"None",
":",
"attr_val",
"=",
"\"{0:d}\"",
".",
"format",
"(",
"tree_obj",
".",
"feature",
"[",
"node_index",
"]",
")",
"else",
":",
"attr_val",
"=",
"attribute_names",
"[",
"tree_obj",
".",
"feature",
"[",
"node_index",
"]",
"]",
"tree_str",
"+=",
"\"b {0:d} {1} {2:0.4f} {3:d} {4:1.5e}\\n\"",
".",
"format",
"(",
"node_index",
",",
"attr_val",
",",
"tree_obj",
".",
"weighted_n_node_samples",
"[",
"node_index",
"]",
",",
"tree_obj",
".",
"n_node_samples",
"[",
"node_index",
"]",
",",
"tree_obj",
".",
"threshold",
"[",
"node_index",
"]",
")",
"else",
":",
"if",
"tree_obj",
".",
"max_n_classes",
">",
"1",
":",
"leaf_value",
"=",
"\"{0:d}\"",
".",
"format",
"(",
"tree_obj",
".",
"value",
"[",
"node_index",
"]",
".",
"argmax",
"(",
")",
")",
"else",
":",
"leaf_value",
"=",
"\"{0}\"",
".",
"format",
"(",
"tree_obj",
".",
"value",
"[",
"node_index",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"tree_str",
"+=",
"\"l {0:d} {1} {2:0.4f} {3:d}\\n\"",
".",
"format",
"(",
"node_index",
",",
"leaf_value",
",",
"tree_obj",
".",
"weighted_n_node_samples",
"[",
"node_index",
"]",
",",
"tree_obj",
".",
"n_node_samples",
"[",
"node_index",
"]",
")",
"if",
"tree_obj",
".",
"children_left",
"[",
"node_index",
"]",
">",
"0",
":",
"tree_str",
"+=",
"print_tree_recursive",
"(",
"tree_obj",
",",
"tree_obj",
".",
"children_left",
"[",
"node_index",
"]",
",",
"attribute_names",
")",
"if",
"tree_obj",
".",
"children_right",
"[",
"node_index",
"]",
">",
"0",
":",
"tree_str",
"+=",
"print_tree_recursive",
"(",
"tree_obj",
",",
"tree_obj",
".",
"children_right",
"[",
"node_index",
"]",
",",
"attribute_names",
")",
"return",
"tree_str"
] | 47.977778 | 27.666667 |
def reset_store(self):
'''
Clears out the current store and gets a cookie. Set the cross site
request forgery token for each subsequent request.
:return: A response having cleared the current store.
:rtype: requests.Response
'''
response = self.__get('/Store/Reset')
token = self.session.cookies['XSRF-TOKEN']
self.session.headers.update({'X-XSRF-TOKEN': token})
return response
|
[
"def",
"reset_store",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"__get",
"(",
"'/Store/Reset'",
")",
"token",
"=",
"self",
".",
"session",
".",
"cookies",
"[",
"'XSRF-TOKEN'",
"]",
"self",
".",
"session",
".",
"headers",
".",
"update",
"(",
"{",
"'X-XSRF-TOKEN'",
":",
"token",
"}",
")",
"return",
"response"
] | 32 | 23.428571 |
def get_model(self):
"""
Returns the model instance of the ProbModel.
Return
---------------
model: an instance of BayesianModel.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.get_model()
"""
if self.probnet.get('type') == "BayesianNetwork":
model = BayesianModel()
model.add_nodes_from(self.probnet['Variables'].keys())
model.add_edges_from(self.probnet['edges'].keys())
tabular_cpds = []
cpds = self.probnet['Potentials']
for cpd in cpds:
var = list(cpd['Variables'].keys())[0]
states = self.probnet['Variables'][var]['States']
evidence = cpd['Variables'][var]
evidence_card = [len(self.probnet['Variables'][evidence_var]['States'])
for evidence_var in evidence]
arr = list(map(float, cpd['Values'].split()))
values = np.array(arr)
values = values.reshape((len(states), values.size//len(states)))
tabular_cpds.append(TabularCPD(var, len(states), values, evidence, evidence_card))
model.add_cpds(*tabular_cpds)
variables = model.nodes()
for var in variables:
for prop_name, prop_value in self.probnet['Variables'][var].items():
model.node[var][prop_name] = prop_value
edges = model.edges()
if nx.__version__.startswith('1'):
for edge in edges:
for prop_name, prop_value in self.probnet['edges'][edge].items():
model.edge[edge[0]][edge[1]][prop_name] = prop_value
else:
for edge in edges:
for prop_name, prop_value in self.probnet['edges'][edge].items():
model.adj[edge[0]][edge[1]][prop_name] = prop_value
return model
else:
raise ValueError("Please specify only Bayesian Network.")
|
[
"def",
"get_model",
"(",
"self",
")",
":",
"if",
"self",
".",
"probnet",
".",
"get",
"(",
"'type'",
")",
"==",
"\"BayesianNetwork\"",
":",
"model",
"=",
"BayesianModel",
"(",
")",
"model",
".",
"add_nodes_from",
"(",
"self",
".",
"probnet",
"[",
"'Variables'",
"]",
".",
"keys",
"(",
")",
")",
"model",
".",
"add_edges_from",
"(",
"self",
".",
"probnet",
"[",
"'edges'",
"]",
".",
"keys",
"(",
")",
")",
"tabular_cpds",
"=",
"[",
"]",
"cpds",
"=",
"self",
".",
"probnet",
"[",
"'Potentials'",
"]",
"for",
"cpd",
"in",
"cpds",
":",
"var",
"=",
"list",
"(",
"cpd",
"[",
"'Variables'",
"]",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"states",
"=",
"self",
".",
"probnet",
"[",
"'Variables'",
"]",
"[",
"var",
"]",
"[",
"'States'",
"]",
"evidence",
"=",
"cpd",
"[",
"'Variables'",
"]",
"[",
"var",
"]",
"evidence_card",
"=",
"[",
"len",
"(",
"self",
".",
"probnet",
"[",
"'Variables'",
"]",
"[",
"evidence_var",
"]",
"[",
"'States'",
"]",
")",
"for",
"evidence_var",
"in",
"evidence",
"]",
"arr",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"cpd",
"[",
"'Values'",
"]",
".",
"split",
"(",
")",
")",
")",
"values",
"=",
"np",
".",
"array",
"(",
"arr",
")",
"values",
"=",
"values",
".",
"reshape",
"(",
"(",
"len",
"(",
"states",
")",
",",
"values",
".",
"size",
"//",
"len",
"(",
"states",
")",
")",
")",
"tabular_cpds",
".",
"append",
"(",
"TabularCPD",
"(",
"var",
",",
"len",
"(",
"states",
")",
",",
"values",
",",
"evidence",
",",
"evidence_card",
")",
")",
"model",
".",
"add_cpds",
"(",
"*",
"tabular_cpds",
")",
"variables",
"=",
"model",
".",
"nodes",
"(",
")",
"for",
"var",
"in",
"variables",
":",
"for",
"prop_name",
",",
"prop_value",
"in",
"self",
".",
"probnet",
"[",
"'Variables'",
"]",
"[",
"var",
"]",
".",
"items",
"(",
")",
":",
"model",
".",
"node",
"[",
"var",
"]",
"[",
"prop_name",
"]",
"=",
"prop_value",
"edges",
"=",
"model",
".",
"edges",
"(",
")",
"if",
"nx",
".",
"__version__",
".",
"startswith",
"(",
"'1'",
")",
":",
"for",
"edge",
"in",
"edges",
":",
"for",
"prop_name",
",",
"prop_value",
"in",
"self",
".",
"probnet",
"[",
"'edges'",
"]",
"[",
"edge",
"]",
".",
"items",
"(",
")",
":",
"model",
".",
"edge",
"[",
"edge",
"[",
"0",
"]",
"]",
"[",
"edge",
"[",
"1",
"]",
"]",
"[",
"prop_name",
"]",
"=",
"prop_value",
"else",
":",
"for",
"edge",
"in",
"edges",
":",
"for",
"prop_name",
",",
"prop_value",
"in",
"self",
".",
"probnet",
"[",
"'edges'",
"]",
"[",
"edge",
"]",
".",
"items",
"(",
")",
":",
"model",
".",
"adj",
"[",
"edge",
"[",
"0",
"]",
"]",
"[",
"edge",
"[",
"1",
"]",
"]",
"[",
"prop_name",
"]",
"=",
"prop_value",
"return",
"model",
"else",
":",
"raise",
"ValueError",
"(",
"\"Please specify only Bayesian Network.\"",
")"
] | 40.74 | 21.38 |
def import_users(self):
""" save users to local DB """
self.message('saving users into local DB')
saved_users = self.saved_admins
# loop over all extracted unique email addresses
for email in self.email_set:
owner = self.users_dict[email].get('owner')
# if owner is not specified, build username from email
if owner.strip() == '':
owner, domain = email.split('@')
# replace any points with a space
owner = owner.replace('.', ' ')
# if owner has a space, assume he specified first and last name
if ' ' in owner:
owner_parts = owner.split(' ')
first_name = owner_parts[0]
last_name = owner_parts[1]
else:
first_name = owner
last_name = ''
# username must be slugified otherwise won't get into the DB
username = slugify(owner)
# check if user exists first
try:
# try looking by email
user = User.objects.get(email=email)
except User.DoesNotExist:
# otherwise init new
user = User()
user.username = username
# generate new password only for new users
user.password = self.generate_random_password()
user.is_active = True
# we'll create one user for each unique email address we've got
user.first_name = first_name.capitalize()
user.last_name = last_name.capitalize()
user.email = email
# extract date joined from old nodes
# find the oldest node of this user
oldest_node = OldNode.objects.filter(email=email).order_by('added')[0]
user.date_joined = oldest_node.added
# be sure username is unique
counter = 1
original_username = username
while True:
# do this check only if user is new
if not user.pk and User.objects.filter(username=user.username).count() > 0:
counter += 1
user.username = '%s%d' % (original_username, counter)
else:
break
try:
# validate data and save
user.full_clean()
user.save(sync_emailaddress=False)
except Exception:
# if user already exists use that instance
if(User.objects.filter(email=email).count() == 1):
user = User.objects.get(email=email)
# otherwise report error
else:
tb = traceback.format_exc()
self.message('Could not save user %s, got exception:\n\n%s' % (user.username, tb))
continue
# if we got a user to add
if user:
# store id
self.users_dict[email]['id'] = user.id
# append to saved users
saved_users.append(user)
self.verbose('Saved user %s (%s) with email <%s>' % (user.username, user.get_full_name(), user.email))
# mark email address as confirmed if feature is enabled
if EMAIL_CONFIRMATION and EmailAddress.objects.filter(email=user.email).count() is 0:
try:
email_address = EmailAddress(user=user, email=user.email, verified=True, primary=True)
email_address.full_clean()
email_address.save()
except Exception:
tb = traceback.format_exc()
self.message('Could not save email address for user %s, got exception:\n\n%s' % (user.username, tb))
self.message('saved %d users into local DB' % len(saved_users))
self.saved_users = saved_users
|
[
"def",
"import_users",
"(",
"self",
")",
":",
"self",
".",
"message",
"(",
"'saving users into local DB'",
")",
"saved_users",
"=",
"self",
".",
"saved_admins",
"# loop over all extracted unique email addresses",
"for",
"email",
"in",
"self",
".",
"email_set",
":",
"owner",
"=",
"self",
".",
"users_dict",
"[",
"email",
"]",
".",
"get",
"(",
"'owner'",
")",
"# if owner is not specified, build username from email",
"if",
"owner",
".",
"strip",
"(",
")",
"==",
"''",
":",
"owner",
",",
"domain",
"=",
"email",
".",
"split",
"(",
"'@'",
")",
"# replace any points with a space",
"owner",
"=",
"owner",
".",
"replace",
"(",
"'.'",
",",
"' '",
")",
"# if owner has a space, assume he specified first and last name",
"if",
"' '",
"in",
"owner",
":",
"owner_parts",
"=",
"owner",
".",
"split",
"(",
"' '",
")",
"first_name",
"=",
"owner_parts",
"[",
"0",
"]",
"last_name",
"=",
"owner_parts",
"[",
"1",
"]",
"else",
":",
"first_name",
"=",
"owner",
"last_name",
"=",
"''",
"# username must be slugified otherwise won't get into the DB",
"username",
"=",
"slugify",
"(",
"owner",
")",
"# check if user exists first",
"try",
":",
"# try looking by email",
"user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"email",
"=",
"email",
")",
"except",
"User",
".",
"DoesNotExist",
":",
"# otherwise init new",
"user",
"=",
"User",
"(",
")",
"user",
".",
"username",
"=",
"username",
"# generate new password only for new users",
"user",
".",
"password",
"=",
"self",
".",
"generate_random_password",
"(",
")",
"user",
".",
"is_active",
"=",
"True",
"# we'll create one user for each unique email address we've got",
"user",
".",
"first_name",
"=",
"first_name",
".",
"capitalize",
"(",
")",
"user",
".",
"last_name",
"=",
"last_name",
".",
"capitalize",
"(",
")",
"user",
".",
"email",
"=",
"email",
"# extract date joined from old nodes",
"# find the oldest node of this user",
"oldest_node",
"=",
"OldNode",
".",
"objects",
".",
"filter",
"(",
"email",
"=",
"email",
")",
".",
"order_by",
"(",
"'added'",
")",
"[",
"0",
"]",
"user",
".",
"date_joined",
"=",
"oldest_node",
".",
"added",
"# be sure username is unique",
"counter",
"=",
"1",
"original_username",
"=",
"username",
"while",
"True",
":",
"# do this check only if user is new",
"if",
"not",
"user",
".",
"pk",
"and",
"User",
".",
"objects",
".",
"filter",
"(",
"username",
"=",
"user",
".",
"username",
")",
".",
"count",
"(",
")",
">",
"0",
":",
"counter",
"+=",
"1",
"user",
".",
"username",
"=",
"'%s%d'",
"%",
"(",
"original_username",
",",
"counter",
")",
"else",
":",
"break",
"try",
":",
"# validate data and save",
"user",
".",
"full_clean",
"(",
")",
"user",
".",
"save",
"(",
"sync_emailaddress",
"=",
"False",
")",
"except",
"Exception",
":",
"# if user already exists use that instance",
"if",
"(",
"User",
".",
"objects",
".",
"filter",
"(",
"email",
"=",
"email",
")",
".",
"count",
"(",
")",
"==",
"1",
")",
":",
"user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"email",
"=",
"email",
")",
"# otherwise report error",
"else",
":",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"message",
"(",
"'Could not save user %s, got exception:\\n\\n%s'",
"%",
"(",
"user",
".",
"username",
",",
"tb",
")",
")",
"continue",
"# if we got a user to add",
"if",
"user",
":",
"# store id",
"self",
".",
"users_dict",
"[",
"email",
"]",
"[",
"'id'",
"]",
"=",
"user",
".",
"id",
"# append to saved users",
"saved_users",
".",
"append",
"(",
"user",
")",
"self",
".",
"verbose",
"(",
"'Saved user %s (%s) with email <%s>'",
"%",
"(",
"user",
".",
"username",
",",
"user",
".",
"get_full_name",
"(",
")",
",",
"user",
".",
"email",
")",
")",
"# mark email address as confirmed if feature is enabled",
"if",
"EMAIL_CONFIRMATION",
"and",
"EmailAddress",
".",
"objects",
".",
"filter",
"(",
"email",
"=",
"user",
".",
"email",
")",
".",
"count",
"(",
")",
"is",
"0",
":",
"try",
":",
"email_address",
"=",
"EmailAddress",
"(",
"user",
"=",
"user",
",",
"email",
"=",
"user",
".",
"email",
",",
"verified",
"=",
"True",
",",
"primary",
"=",
"True",
")",
"email_address",
".",
"full_clean",
"(",
")",
"email_address",
".",
"save",
"(",
")",
"except",
"Exception",
":",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"message",
"(",
"'Could not save email address for user %s, got exception:\\n\\n%s'",
"%",
"(",
"user",
".",
"username",
",",
"tb",
")",
")",
"self",
".",
"message",
"(",
"'saved %d users into local DB'",
"%",
"len",
"(",
"saved_users",
")",
")",
"self",
".",
"saved_users",
"=",
"saved_users"
] | 40.736842 | 19.157895 |
def plot(self, X=None, n=0, ax=None, envelopes=[1, 3], base_alpha=0.375,
return_prediction=False, return_std=True, full_output=False,
plot_kwargs={}, **kwargs):
"""Plots the Gaussian process using the current hyperparameters. Only for num_dim <= 2.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`), optional
The values to evaluate the Gaussian process at. If None, then 100
points between the minimum and maximum of the data's X are used for
a univariate Gaussian process and a 50x50 grid is used for a
bivariate Gaussian process. Default is None (use 100 points between
min and max).
n : int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
ax : axis instance, optional
Axis to plot the result on. If no axis is passed, one is created.
If the string 'gca' is passed, the current axis (from plt.gca())
is used. If X_dim = 2, the axis must be 3d.
envelopes: list of float, optional
+/-n*sigma envelopes to plot. Default is [1, 3].
base_alpha : float, optional
Alpha value to use for +/-1*sigma envelope. All other envelopes `env`
are drawn with `base_alpha`/`env`. Default is 0.375.
return_prediction : bool, optional
If True, the predicted values are also returned. Default is False.
return_std : bool, optional
If True, the standard deviation is computed and returned along with
the mean when `return_prediction` is True. Default is True.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
==== ==========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_sample` is True)
==== ==========================================================================
plot_kwargs : dict, optional
The entries in this dictionary are passed as kwargs to the plotting
command used to plot the mean. Use this to, for instance, change the
color, line width and line style.
**kwargs : extra arguments for predict, optional
Extra arguments that are passed to :py:meth:`predict`.
Returns
-------
ax : axis instance
The axis instance used.
mean : :py:class:`Array`, (`M`,)
Predicted GP mean. Only returned if `return_prediction` is True and `full_output` is False.
std : :py:class:`Array`, (`M`,)
Predicted standard deviation, only returned if `return_prediction` and `return_std` are True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples. Only returned if `return_prediction` and `full_output` are True.
"""
if self.num_dim > 2:
raise ValueError("Plotting is not supported for num_dim > 2!")
if self.num_dim == 1:
if X is None:
X = scipy.linspace(self.X.min(), self.X.max(), 100)
elif self.num_dim == 2:
if X is None:
x1 = scipy.linspace(self.X[:, 0].min(), self.X[:, 0].max(), 50)
x2 = scipy.linspace(self.X[:, 1].min(), self.X[:, 1].max(), 50)
X1, X2 = scipy.meshgrid(x1, x2)
X1 = X1.flatten()
X2 = X2.flatten()
X = scipy.hstack((scipy.atleast_2d(X1).T, scipy.atleast_2d(X2).T))
else:
X1 = scipy.asarray(X[:, 0]).flatten()
X2 = scipy.asarray(X[:, 1]).flatten()
if envelopes or (return_prediction and (return_std or full_output)):
out = self.predict(X, n=n, full_output=True, **kwargs)
mean = out['mean']
std = out['std']
else:
mean = self.predict(X, n=n, return_std=False, **kwargs)
std = None
if self.num_dim == 1:
univariate_envelope_plot(
X,
mean,
std,
ax=ax,
base_alpha=base_alpha,
envelopes=envelopes,
**plot_kwargs
)
elif self.num_dim == 2:
if ax is None:
f = plt.figure()
ax = f.add_subplot(111, projection='3d')
elif ax == 'gca':
ax = plt.gca()
if 'linewidths' not in kwargs:
kwargs['linewidths'] = 0
s = ax.plot_trisurf(X1, X2, mean, **plot_kwargs)
for i in envelopes:
kwargs.pop('alpha', base_alpha)
ax.plot_trisurf(X1, X2, mean - std, alpha=base_alpha / i, **kwargs)
ax.plot_trisurf(X1, X2, mean + std, alpha=base_alpha / i, **kwargs)
if return_prediction:
if full_output:
return (ax, out)
elif return_std:
return (ax, out['mean'], out['std'])
else:
return (ax, out['mean'])
else:
return ax
|
[
"def",
"plot",
"(",
"self",
",",
"X",
"=",
"None",
",",
"n",
"=",
"0",
",",
"ax",
"=",
"None",
",",
"envelopes",
"=",
"[",
"1",
",",
"3",
"]",
",",
"base_alpha",
"=",
"0.375",
",",
"return_prediction",
"=",
"False",
",",
"return_std",
"=",
"True",
",",
"full_output",
"=",
"False",
",",
"plot_kwargs",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"num_dim",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Plotting is not supported for num_dim > 2!\"",
")",
"if",
"self",
".",
"num_dim",
"==",
"1",
":",
"if",
"X",
"is",
"None",
":",
"X",
"=",
"scipy",
".",
"linspace",
"(",
"self",
".",
"X",
".",
"min",
"(",
")",
",",
"self",
".",
"X",
".",
"max",
"(",
")",
",",
"100",
")",
"elif",
"self",
".",
"num_dim",
"==",
"2",
":",
"if",
"X",
"is",
"None",
":",
"x1",
"=",
"scipy",
".",
"linspace",
"(",
"self",
".",
"X",
"[",
":",
",",
"0",
"]",
".",
"min",
"(",
")",
",",
"self",
".",
"X",
"[",
":",
",",
"0",
"]",
".",
"max",
"(",
")",
",",
"50",
")",
"x2",
"=",
"scipy",
".",
"linspace",
"(",
"self",
".",
"X",
"[",
":",
",",
"1",
"]",
".",
"min",
"(",
")",
",",
"self",
".",
"X",
"[",
":",
",",
"1",
"]",
".",
"max",
"(",
")",
",",
"50",
")",
"X1",
",",
"X2",
"=",
"scipy",
".",
"meshgrid",
"(",
"x1",
",",
"x2",
")",
"X1",
"=",
"X1",
".",
"flatten",
"(",
")",
"X2",
"=",
"X2",
".",
"flatten",
"(",
")",
"X",
"=",
"scipy",
".",
"hstack",
"(",
"(",
"scipy",
".",
"atleast_2d",
"(",
"X1",
")",
".",
"T",
",",
"scipy",
".",
"atleast_2d",
"(",
"X2",
")",
".",
"T",
")",
")",
"else",
":",
"X1",
"=",
"scipy",
".",
"asarray",
"(",
"X",
"[",
":",
",",
"0",
"]",
")",
".",
"flatten",
"(",
")",
"X2",
"=",
"scipy",
".",
"asarray",
"(",
"X",
"[",
":",
",",
"1",
"]",
")",
".",
"flatten",
"(",
")",
"if",
"envelopes",
"or",
"(",
"return_prediction",
"and",
"(",
"return_std",
"or",
"full_output",
")",
")",
":",
"out",
"=",
"self",
".",
"predict",
"(",
"X",
",",
"n",
"=",
"n",
",",
"full_output",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"mean",
"=",
"out",
"[",
"'mean'",
"]",
"std",
"=",
"out",
"[",
"'std'",
"]",
"else",
":",
"mean",
"=",
"self",
".",
"predict",
"(",
"X",
",",
"n",
"=",
"n",
",",
"return_std",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"std",
"=",
"None",
"if",
"self",
".",
"num_dim",
"==",
"1",
":",
"univariate_envelope_plot",
"(",
"X",
",",
"mean",
",",
"std",
",",
"ax",
"=",
"ax",
",",
"base_alpha",
"=",
"base_alpha",
",",
"envelopes",
"=",
"envelopes",
",",
"*",
"*",
"plot_kwargs",
")",
"elif",
"self",
".",
"num_dim",
"==",
"2",
":",
"if",
"ax",
"is",
"None",
":",
"f",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"f",
".",
"add_subplot",
"(",
"111",
",",
"projection",
"=",
"'3d'",
")",
"elif",
"ax",
"==",
"'gca'",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"'linewidths'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'linewidths'",
"]",
"=",
"0",
"s",
"=",
"ax",
".",
"plot_trisurf",
"(",
"X1",
",",
"X2",
",",
"mean",
",",
"*",
"*",
"plot_kwargs",
")",
"for",
"i",
"in",
"envelopes",
":",
"kwargs",
".",
"pop",
"(",
"'alpha'",
",",
"base_alpha",
")",
"ax",
".",
"plot_trisurf",
"(",
"X1",
",",
"X2",
",",
"mean",
"-",
"std",
",",
"alpha",
"=",
"base_alpha",
"/",
"i",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"plot_trisurf",
"(",
"X1",
",",
"X2",
",",
"mean",
"+",
"std",
",",
"alpha",
"=",
"base_alpha",
"/",
"i",
",",
"*",
"*",
"kwargs",
")",
"if",
"return_prediction",
":",
"if",
"full_output",
":",
"return",
"(",
"ax",
",",
"out",
")",
"elif",
"return_std",
":",
"return",
"(",
"ax",
",",
"out",
"[",
"'mean'",
"]",
",",
"out",
"[",
"'std'",
"]",
")",
"else",
":",
"return",
"(",
"ax",
",",
"out",
"[",
"'mean'",
"]",
")",
"else",
":",
"return",
"ax"
] | 47.042373 | 22.025424 |
def info(self, **kwargs):
"""
Get the primary information about a TV episode by combination of a
season and episode number.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
[
"def",
"info",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"self",
".",
"_get_series_id_season_number_episode_number_path",
"(",
"'info'",
")",
"response",
"=",
"self",
".",
"_GET",
"(",
"path",
",",
"kwargs",
")",
"self",
".",
"_set_attrs_to_values",
"(",
"response",
")",
"return",
"response"
] | 33.055556 | 19.611111 |
def _next_month(self):
"""Update calendar to show the next month."""
self._canvas.place_forget()
year, month = self._date.year, self._date.month
self._date = self._date + self.timedelta(
days=calendar.monthrange(year, month)[1] + 1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar()
|
[
"def",
"_next_month",
"(",
"self",
")",
":",
"self",
".",
"_canvas",
".",
"place_forget",
"(",
")",
"year",
",",
"month",
"=",
"self",
".",
"_date",
".",
"year",
",",
"self",
".",
"_date",
".",
"month",
"self",
".",
"_date",
"=",
"self",
".",
"_date",
"+",
"self",
".",
"timedelta",
"(",
"days",
"=",
"calendar",
".",
"monthrange",
"(",
"year",
",",
"month",
")",
"[",
"1",
"]",
"+",
"1",
")",
"self",
".",
"_date",
"=",
"self",
".",
"datetime",
"(",
"self",
".",
"_date",
".",
"year",
",",
"self",
".",
"_date",
".",
"month",
",",
"1",
")",
"self",
".",
"_build_calendar",
"(",
")"
] | 41.444444 | 16.222222 |
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
|
[
"def",
"pre_fork",
"(",
"self",
",",
"process_manager",
")",
":",
"salt",
".",
"transport",
".",
"mixins",
".",
"auth",
".",
"AESReqServerMixin",
".",
"pre_fork",
"(",
"self",
",",
"process_manager",
")",
"if",
"USE_LOAD_BALANCER",
":",
"self",
".",
"socket_queue",
"=",
"multiprocessing",
".",
"Queue",
"(",
")",
"process_manager",
".",
"add_process",
"(",
"LoadBalancerServer",
",",
"args",
"=",
"(",
"self",
".",
"opts",
",",
"self",
".",
"socket_queue",
")",
")",
"elif",
"not",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
":",
"self",
".",
"_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"self",
".",
"_socket",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"_set_tcp_keepalive",
"(",
"self",
".",
"_socket",
",",
"self",
".",
"opts",
")",
"self",
".",
"_socket",
".",
"setblocking",
"(",
"0",
")",
"self",
".",
"_socket",
".",
"bind",
"(",
"(",
"self",
".",
"opts",
"[",
"'interface'",
"]",
",",
"int",
"(",
"self",
".",
"opts",
"[",
"'ret_port'",
"]",
")",
")",
")"
] | 49.1875 | 21.8125 |
def unmount(self, path):
"""
Remove a mountpoint from the filesystem.
"""
del self._mountpoints[self._join_chunks(self._normalize_path(path))]
|
[
"def",
"unmount",
"(",
"self",
",",
"path",
")",
":",
"del",
"self",
".",
"_mountpoints",
"[",
"self",
".",
"_join_chunks",
"(",
"self",
".",
"_normalize_path",
"(",
"path",
")",
")",
"]"
] | 34 | 12 |
def _is_valid_url(url):
""" Helper function to validate that URLs are well formed, i.e that it contains a valid
protocol and a valid domain. It does not actually check if the URL exists
"""
try:
parsed = urlparse(url)
mandatory_parts = [parsed.scheme, parsed.netloc]
return all(mandatory_parts)
except:
return False
|
[
"def",
"_is_valid_url",
"(",
"url",
")",
":",
"try",
":",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"mandatory_parts",
"=",
"[",
"parsed",
".",
"scheme",
",",
"parsed",
".",
"netloc",
"]",
"return",
"all",
"(",
"mandatory_parts",
")",
"except",
":",
"return",
"False"
] | 39.8 | 15.8 |
def save(self):
"""save PlayerRecord settings to disk"""
data = str.encode( json.dumps(self.simpleAttrs, indent=4, sort_keys=True) )
with open(self.filename, "wb") as f:
f.write(data)
|
[
"def",
"save",
"(",
"self",
")",
":",
"data",
"=",
"str",
".",
"encode",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"simpleAttrs",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")",
")",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
")"
] | 43 | 17.4 |
def inverse(self):
"""Invert all instructions."""
for index, instruction in enumerate(self.instructions):
self.instructions[index] = instruction.inverse()
return self
|
[
"def",
"inverse",
"(",
"self",
")",
":",
"for",
"index",
",",
"instruction",
"in",
"enumerate",
"(",
"self",
".",
"instructions",
")",
":",
"self",
".",
"instructions",
"[",
"index",
"]",
"=",
"instruction",
".",
"inverse",
"(",
")",
"return",
"self"
] | 39.6 | 17.2 |
def active_io(self, iocb):
"""Called by a handler to notify the controller that a request is
being processed."""
if _debug: IOController._debug("active_io %r", iocb)
# requests should be idle or pending before coming active
if (iocb.ioState != IDLE) and (iocb.ioState != PENDING):
raise RuntimeError("invalid state transition (currently %d)" % (iocb.ioState,))
# change the state
iocb.ioState = ACTIVE
|
[
"def",
"active_io",
"(",
"self",
",",
"iocb",
")",
":",
"if",
"_debug",
":",
"IOController",
".",
"_debug",
"(",
"\"active_io %r\"",
",",
"iocb",
")",
"# requests should be idle or pending before coming active",
"if",
"(",
"iocb",
".",
"ioState",
"!=",
"IDLE",
")",
"and",
"(",
"iocb",
".",
"ioState",
"!=",
"PENDING",
")",
":",
"raise",
"RuntimeError",
"(",
"\"invalid state transition (currently %d)\"",
"%",
"(",
"iocb",
".",
"ioState",
",",
")",
")",
"# change the state",
"iocb",
".",
"ioState",
"=",
"ACTIVE"
] | 41.909091 | 21.727273 |
def numericshape(self):
"""Shape of the array of temporary values required for the numerical
solver actually being selected."""
try:
numericshape = [self.subseqs.seqs.model.numconsts.nmb_stages]
except AttributeError:
objecttools.augment_excmessage(
'The `numericshape` of a sequence like `%s` depends on the '
'configuration of the actual integration algorithm. '
'While trying to query the required configuration data '
'`nmb_stages` of the model associated with element `%s`'
% (self.name, objecttools.devicename(self)))
# noinspection PyUnboundLocalVariable
numericshape.extend(self.shape)
return tuple(numericshape)
|
[
"def",
"numericshape",
"(",
"self",
")",
":",
"try",
":",
"numericshape",
"=",
"[",
"self",
".",
"subseqs",
".",
"seqs",
".",
"model",
".",
"numconsts",
".",
"nmb_stages",
"]",
"except",
"AttributeError",
":",
"objecttools",
".",
"augment_excmessage",
"(",
"'The `numericshape` of a sequence like `%s` depends on the '",
"'configuration of the actual integration algorithm. '",
"'While trying to query the required configuration data '",
"'`nmb_stages` of the model associated with element `%s`'",
"%",
"(",
"self",
".",
"name",
",",
"objecttools",
".",
"devicename",
"(",
"self",
")",
")",
")",
"# noinspection PyUnboundLocalVariable",
"numericshape",
".",
"extend",
"(",
"self",
".",
"shape",
")",
"return",
"tuple",
"(",
"numericshape",
")"
] | 51.133333 | 16.866667 |
def p_function_expr_1(self, p):
"""
function_expr \
: FUNCTION LPAREN RPAREN LBRACE function_body RBRACE
| FUNCTION LPAREN formal_parameter_list RPAREN \
LBRACE function_body RBRACE
"""
if len(p) == 7:
p[0] = ast.FuncExpr(
identifier=None, parameters=None, elements=p[5])
else:
p[0] = ast.FuncExpr(
identifier=None, parameters=p[3], elements=p[6])
|
[
"def",
"p_function_expr_1",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"7",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"FuncExpr",
"(",
"identifier",
"=",
"None",
",",
"parameters",
"=",
"None",
",",
"elements",
"=",
"p",
"[",
"5",
"]",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"FuncExpr",
"(",
"identifier",
"=",
"None",
",",
"parameters",
"=",
"p",
"[",
"3",
"]",
",",
"elements",
"=",
"p",
"[",
"6",
"]",
")"
] | 36.230769 | 13.923077 |
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a GitHub user.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
# app = inliner.document.settings.env.app
#app.info('user link %r' % text)
ref = 'https://www.github.com/' + text
node = nodes.reference(rawtext, text, refuri=ref, **options)
return [node], []
|
[
"def",
"ghuser_role",
"(",
"name",
",",
"rawtext",
",",
"text",
",",
"lineno",
",",
"inliner",
",",
"options",
"=",
"{",
"}",
",",
"content",
"=",
"[",
"]",
")",
":",
"# app = inliner.document.settings.env.app",
"#app.info('user link %r' % text)",
"ref",
"=",
"'https://www.github.com/'",
"+",
"text",
"node",
"=",
"nodes",
".",
"reference",
"(",
"rawtext",
",",
"text",
",",
"refuri",
"=",
"ref",
",",
"*",
"*",
"options",
")",
"return",
"[",
"node",
"]",
",",
"[",
"]"
] | 43.25 | 18.75 |
def coverage(c, html=True):
"""
Run coverage with coverage.py.
"""
# NOTE: this MUST use coverage itself, and not pytest-cov, because the
# latter is apparently unable to prevent pytest plugins from being loaded
# before pytest-cov itself is able to start up coverage.py! The result is
# that coverage _always_ skips over all module level code, i.e. constants,
# 'def' lines, etc. Running coverage as the "outer" layer avoids this
# problem, thus no need for pytest-cov.
# NOTE: this does NOT hold true for NON-PYTEST code, so
# pytest-relaxed-USING modules can happily use pytest-cov.
c.run("coverage run --source=pytest_relaxed -m pytest")
if html:
c.run("coverage html")
c.run("open htmlcov/index.html")
|
[
"def",
"coverage",
"(",
"c",
",",
"html",
"=",
"True",
")",
":",
"# NOTE: this MUST use coverage itself, and not pytest-cov, because the",
"# latter is apparently unable to prevent pytest plugins from being loaded",
"# before pytest-cov itself is able to start up coverage.py! The result is",
"# that coverage _always_ skips over all module level code, i.e. constants,",
"# 'def' lines, etc. Running coverage as the \"outer\" layer avoids this",
"# problem, thus no need for pytest-cov.",
"# NOTE: this does NOT hold true for NON-PYTEST code, so",
"# pytest-relaxed-USING modules can happily use pytest-cov.",
"c",
".",
"run",
"(",
"\"coverage run --source=pytest_relaxed -m pytest\"",
")",
"if",
"html",
":",
"c",
".",
"run",
"(",
"\"coverage html\"",
")",
"c",
".",
"run",
"(",
"\"open htmlcov/index.html\"",
")"
] | 47.4375 | 18.6875 |
def binary_hash(self, project, patch_file):
""" Gathers sha256 hashes from binary lists """
global il
exception_file = None
try:
project_exceptions = il.get('project_exceptions')
except KeyError:
logger.info('project_exceptions missing in %s for %s', ignore_list, project)
for project_files in project_exceptions:
if project in project_files:
exception_file = project_files.get(project)
with open(exception_file, 'r') as f:
bl = yaml.safe_load(f)
for key, value in bl.items():
if key == 'binaries':
if patch_file in value:
hashvalue = value[patch_file]
return hashvalue
else:
for key, value in il.items():
if key == 'binaries':
if patch_file in value:
hashvalue = value[patch_file]
return hashvalue
else:
hashvalue = ""
return hashvalue
else:
logger.info('%s not found in %s', project, ignore_list)
logger.info('No project specific exceptions will be applied')
hashvalue = ""
return hashvalue
|
[
"def",
"binary_hash",
"(",
"self",
",",
"project",
",",
"patch_file",
")",
":",
"global",
"il",
"exception_file",
"=",
"None",
"try",
":",
"project_exceptions",
"=",
"il",
".",
"get",
"(",
"'project_exceptions'",
")",
"except",
"KeyError",
":",
"logger",
".",
"info",
"(",
"'project_exceptions missing in %s for %s'",
",",
"ignore_list",
",",
"project",
")",
"for",
"project_files",
"in",
"project_exceptions",
":",
"if",
"project",
"in",
"project_files",
":",
"exception_file",
"=",
"project_files",
".",
"get",
"(",
"project",
")",
"with",
"open",
"(",
"exception_file",
",",
"'r'",
")",
"as",
"f",
":",
"bl",
"=",
"yaml",
".",
"safe_load",
"(",
"f",
")",
"for",
"key",
",",
"value",
"in",
"bl",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'binaries'",
":",
"if",
"patch_file",
"in",
"value",
":",
"hashvalue",
"=",
"value",
"[",
"patch_file",
"]",
"return",
"hashvalue",
"else",
":",
"for",
"key",
",",
"value",
"in",
"il",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'binaries'",
":",
"if",
"patch_file",
"in",
"value",
":",
"hashvalue",
"=",
"value",
"[",
"patch_file",
"]",
"return",
"hashvalue",
"else",
":",
"hashvalue",
"=",
"\"\"",
"return",
"hashvalue",
"else",
":",
"logger",
".",
"info",
"(",
"'%s not found in %s'",
",",
"project",
",",
"ignore_list",
")",
"logger",
".",
"info",
"(",
"'No project specific exceptions will be applied'",
")",
"hashvalue",
"=",
"\"\"",
"return",
"hashvalue"
] | 44.264706 | 16.176471 |
def callproc(self, procname, parameters=(), quiet=False, expect_return_value=False):
"""Calls a MySQL stored procedure procname and returns the return values. This uses DictCursor.
To get return values back out of a stored procedure, prefix the parameter with a @ character.
"""
self.procedures_run += 1
i = 0
errcode = 0
caughte = None
out_param_indices = []
for j in range(len(parameters)):
p = parameters[j]
if type(p) == type('') and p[0] == '@':
assert(p.find(' ') == -1)
out_param_indices.append(j)
if procname not in self.list_stored_procedures():
raise Exception("The stored procedure '%s' does not exist." % procname)
if not re.match("^\s*\w+\s*$", procname):
raise Exception("Expected a stored procedure name in callproc but received '%s'." % procname)
while i < self.numTries:
i += 1
try:
self._get_connection()
cursor = self.connection.cursor()
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
self.lastrowid = int(cursor.lastrowid)
cursor.close()
# Get the out parameters
out_param_results = []
if out_param_indices:
out_param_results = self.execute('SELECT %s' % ", ".join(['@_%s_%d AS %s' % (procname, pindex, parameters[pindex][1:]) for pindex in out_param_indices]))
return out_param_results
except MySQLdb.OperationalError, e:
self._close_connection()
errcode = e[0]
caughte = e
continue
except:
self._close_connection()
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (
procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
|
[
"def",
"callproc",
"(",
"self",
",",
"procname",
",",
"parameters",
"=",
"(",
")",
",",
"quiet",
"=",
"False",
",",
"expect_return_value",
"=",
"False",
")",
":",
"self",
".",
"procedures_run",
"+=",
"1",
"i",
"=",
"0",
"errcode",
"=",
"0",
"caughte",
"=",
"None",
"out_param_indices",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"parameters",
")",
")",
":",
"p",
"=",
"parameters",
"[",
"j",
"]",
"if",
"type",
"(",
"p",
")",
"==",
"type",
"(",
"''",
")",
"and",
"p",
"[",
"0",
"]",
"==",
"'@'",
":",
"assert",
"(",
"p",
".",
"find",
"(",
"' '",
")",
"==",
"-",
"1",
")",
"out_param_indices",
".",
"append",
"(",
"j",
")",
"if",
"procname",
"not",
"in",
"self",
".",
"list_stored_procedures",
"(",
")",
":",
"raise",
"Exception",
"(",
"\"The stored procedure '%s' does not exist.\"",
"%",
"procname",
")",
"if",
"not",
"re",
".",
"match",
"(",
"\"^\\s*\\w+\\s*$\"",
",",
"procname",
")",
":",
"raise",
"Exception",
"(",
"\"Expected a stored procedure name in callproc but received '%s'.\"",
"%",
"procname",
")",
"while",
"i",
"<",
"self",
".",
"numTries",
":",
"i",
"+=",
"1",
"try",
":",
"self",
".",
"_get_connection",
"(",
")",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"if",
"type",
"(",
"parameters",
")",
"!=",
"type",
"(",
"(",
")",
")",
":",
"parameters",
"=",
"(",
"parameters",
",",
")",
"errcode",
"=",
"cursor",
".",
"callproc",
"(",
"procname",
",",
"parameters",
")",
"self",
".",
"lastrowid",
"=",
"int",
"(",
"cursor",
".",
"lastrowid",
")",
"cursor",
".",
"close",
"(",
")",
"# Get the out parameters",
"out_param_results",
"=",
"[",
"]",
"if",
"out_param_indices",
":",
"out_param_results",
"=",
"self",
".",
"execute",
"(",
"'SELECT %s'",
"%",
"\", \"",
".",
"join",
"(",
"[",
"'@_%s_%d AS %s'",
"%",
"(",
"procname",
",",
"pindex",
",",
"parameters",
"[",
"pindex",
"]",
"[",
"1",
":",
"]",
")",
"for",
"pindex",
"in",
"out_param_indices",
"]",
")",
")",
"return",
"out_param_results",
"except",
"MySQLdb",
".",
"OperationalError",
",",
"e",
":",
"self",
".",
"_close_connection",
"(",
")",
"errcode",
"=",
"e",
"[",
"0",
"]",
"caughte",
"=",
"e",
"continue",
"except",
":",
"self",
".",
"_close_connection",
"(",
")",
"traceback",
".",
"print_exc",
"(",
")",
"break",
"if",
"not",
"quiet",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\nSQL execution error call stored procedure %s at %s:\"",
"%",
"(",
"procname",
",",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\nErrorcode/Error: %d - '%s'.\\n\"",
"%",
"(",
"errcode",
",",
"str",
"(",
"caughte",
")",
")",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"raise",
"MySQLdb",
".",
"OperationalError",
"(",
"caughte",
")"
] | 42.773585 | 20.132075 |
def detach_from_all(self, bIgnoreExceptions = False):
"""
Detaches from all processes currently being debugged.
@note: To better handle last debugging event, call L{stop} instead.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when detaching.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}.
"""
for pid in self.get_debugee_pids():
self.detach(pid, bIgnoreExceptions = bIgnoreExceptions)
|
[
"def",
"detach_from_all",
"(",
"self",
",",
"bIgnoreExceptions",
"=",
"False",
")",
":",
"for",
"pid",
"in",
"self",
".",
"get_debugee_pids",
"(",
")",
":",
"self",
".",
"detach",
"(",
"pid",
",",
"bIgnoreExceptions",
"=",
"bIgnoreExceptions",
")"
] | 38.666667 | 19.6 |
def write_string(value, buff, byteorder='big'):
"""Write a string to a file-like object."""
data = value.encode('utf-8')
write_numeric(USHORT, len(data), buff, byteorder)
buff.write(data)
|
[
"def",
"write_string",
"(",
"value",
",",
"buff",
",",
"byteorder",
"=",
"'big'",
")",
":",
"data",
"=",
"value",
".",
"encode",
"(",
"'utf-8'",
")",
"write_numeric",
"(",
"USHORT",
",",
"len",
"(",
"data",
")",
",",
"buff",
",",
"byteorder",
")",
"buff",
".",
"write",
"(",
"data",
")"
] | 40.6 | 9.8 |
def to_vec4(self, isPoint):
"""Converts this vector3 into a vector4 instance."""
vec4 = Vector4()
vec4.x = self.x
vec4.y = self.y
vec4.z = self.z
if isPoint:
vec4.w = 1
else:
vec4.w = 0
return vec4
|
[
"def",
"to_vec4",
"(",
"self",
",",
"isPoint",
")",
":",
"vec4",
"=",
"Vector4",
"(",
")",
"vec4",
".",
"x",
"=",
"self",
".",
"x",
"vec4",
".",
"y",
"=",
"self",
".",
"y",
"vec4",
".",
"z",
"=",
"self",
".",
"z",
"if",
"isPoint",
":",
"vec4",
".",
"w",
"=",
"1",
"else",
":",
"vec4",
".",
"w",
"=",
"0",
"return",
"vec4"
] | 22.916667 | 18.75 |
def peakdelta(v, delta, x=None):
"""
Returns two arrays
function [maxtab, mintab]=peakdelta(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = peakdelta(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = peakdelta(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
|
[
"def",
"peakdelta",
"(",
"v",
",",
"delta",
",",
"x",
"=",
"None",
")",
":",
"maxtab",
"=",
"[",
"]",
"mintab",
"=",
"[",
"]",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"arange",
"(",
"len",
"(",
"v",
")",
")",
"v",
"=",
"asarray",
"(",
"v",
")",
"if",
"len",
"(",
"v",
")",
"!=",
"len",
"(",
"x",
")",
":",
"sys",
".",
"exit",
"(",
"'Input vectors v and x must have same length'",
")",
"if",
"not",
"isscalar",
"(",
"delta",
")",
":",
"sys",
".",
"exit",
"(",
"'Input argument delta must be a scalar'",
")",
"if",
"delta",
"<=",
"0",
":",
"sys",
".",
"exit",
"(",
"'Input argument delta must be positive'",
")",
"mn",
",",
"mx",
"=",
"Inf",
",",
"-",
"Inf",
"mnpos",
",",
"mxpos",
"=",
"NaN",
",",
"NaN",
"lookformax",
"=",
"True",
"for",
"i",
"in",
"arange",
"(",
"len",
"(",
"v",
")",
")",
":",
"this",
"=",
"v",
"[",
"i",
"]",
"if",
"this",
">",
"mx",
":",
"mx",
"=",
"this",
"mxpos",
"=",
"x",
"[",
"i",
"]",
"if",
"this",
"<",
"mn",
":",
"mn",
"=",
"this",
"mnpos",
"=",
"x",
"[",
"i",
"]",
"if",
"lookformax",
":",
"if",
"this",
"<",
"mx",
"-",
"delta",
":",
"maxtab",
".",
"append",
"(",
"(",
"mxpos",
",",
"mx",
")",
")",
"mn",
"=",
"this",
"mnpos",
"=",
"x",
"[",
"i",
"]",
"lookformax",
"=",
"False",
"else",
":",
"if",
"this",
">",
"mn",
"+",
"delta",
":",
"mintab",
".",
"append",
"(",
"(",
"mnpos",
",",
"mn",
")",
")",
"mx",
"=",
"this",
"mxpos",
"=",
"x",
"[",
"i",
"]",
"lookformax",
"=",
"True",
"return",
"array",
"(",
"maxtab",
")",
",",
"array",
"(",
"mintab",
")"
] | 28.179104 | 21.313433 |
def _to_numpy(Z):
"""Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray;
also handles converting sparse input to dense."""
if Z is None:
return Z
elif issparse(Z):
return Z.toarray()
elif isinstance(Z, np.ndarray):
return Z
elif isinstance(Z, list):
return np.array(Z)
elif isinstance(Z, torch.Tensor):
return Z.cpu().numpy()
else:
msg = (
f"Expected None, list, numpy.ndarray or torch.Tensor, "
f"got {type(Z)} instead."
)
raise Exception(msg)
|
[
"def",
"_to_numpy",
"(",
"Z",
")",
":",
"if",
"Z",
"is",
"None",
":",
"return",
"Z",
"elif",
"issparse",
"(",
"Z",
")",
":",
"return",
"Z",
".",
"toarray",
"(",
")",
"elif",
"isinstance",
"(",
"Z",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"Z",
"elif",
"isinstance",
"(",
"Z",
",",
"list",
")",
":",
"return",
"np",
".",
"array",
"(",
"Z",
")",
"elif",
"isinstance",
"(",
"Z",
",",
"torch",
".",
"Tensor",
")",
":",
"return",
"Z",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"else",
":",
"msg",
"=",
"(",
"f\"Expected None, list, numpy.ndarray or torch.Tensor, \"",
"f\"got {type(Z)} instead.\"",
")",
"raise",
"Exception",
"(",
"msg",
")"
] | 33.263158 | 13 |
def xarrayfunc(func):
"""Make a function compatible with xarray.DataArray.
This function is intended to be used as a decorator like::
>>> @dc.xarrayfunc
>>> def func(array):
... # do something
... return newarray
>>>
>>> result = func(array)
Args:
func (function): Function to be wrapped. The first argument
of the function must be an array to be processed.
Returns:
wrapper (function): Wrapped function.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if any(isinstance(arg, xr.DataArray) for arg in args):
newargs = []
for arg in args:
if isinstance(arg, xr.DataArray):
newargs.append(arg.values)
else:
newargs.append(arg)
return dc.full_like(args[0], func(*newargs, **kwargs))
else:
return func(*args, **kwargs)
return wrapper
|
[
"def",
"xarrayfunc",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"any",
"(",
"isinstance",
"(",
"arg",
",",
"xr",
".",
"DataArray",
")",
"for",
"arg",
"in",
"args",
")",
":",
"newargs",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"xr",
".",
"DataArray",
")",
":",
"newargs",
".",
"append",
"(",
"arg",
".",
"values",
")",
"else",
":",
"newargs",
".",
"append",
"(",
"arg",
")",
"return",
"dc",
".",
"full_like",
"(",
"args",
"[",
"0",
"]",
",",
"func",
"(",
"*",
"newargs",
",",
"*",
"*",
"kwargs",
")",
")",
"else",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | 28.029412 | 19.588235 |
def stratify_s(self):
"""
Stratifies the sample based on propensity score using the
bin selection procedure suggested by [1]_.
The bin selection algorithm is based on a sequence of
two-sample t tests performed on the log-odds ratio.
This method should only be executed after the propensity score
has been estimated.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
pscore_order = self.raw_data['pscore'].argsort()
pscore = self.raw_data['pscore'][pscore_order]
D = self.raw_data['D'][pscore_order]
logodds = np.log(pscore / (1-pscore))
K = self.raw_data['K']
blocks_uniq = set(select_blocks(pscore, logodds, D, K, 0, 1))
self.blocks = sorted(blocks_uniq)
self.stratify()
|
[
"def",
"stratify_s",
"(",
"self",
")",
":",
"pscore_order",
"=",
"self",
".",
"raw_data",
"[",
"'pscore'",
"]",
".",
"argsort",
"(",
")",
"pscore",
"=",
"self",
".",
"raw_data",
"[",
"'pscore'",
"]",
"[",
"pscore_order",
"]",
"D",
"=",
"self",
".",
"raw_data",
"[",
"'D'",
"]",
"[",
"pscore_order",
"]",
"logodds",
"=",
"np",
".",
"log",
"(",
"pscore",
"/",
"(",
"1",
"-",
"pscore",
")",
")",
"K",
"=",
"self",
".",
"raw_data",
"[",
"'K'",
"]",
"blocks_uniq",
"=",
"set",
"(",
"select_blocks",
"(",
"pscore",
",",
"logodds",
",",
"D",
",",
"K",
",",
"0",
",",
"1",
")",
")",
"self",
".",
"blocks",
"=",
"sorted",
"(",
"blocks_uniq",
")",
"self",
".",
"stratify",
"(",
")"
] | 28.214286 | 19.642857 |
def __send_command(self, command, args=[]):
'''Send a raw command.'''
self.ws.send(json.dumps({"op": command, "args": args}))
|
[
"def",
"__send_command",
"(",
"self",
",",
"command",
",",
"args",
"=",
"[",
"]",
")",
":",
"self",
".",
"ws",
".",
"send",
"(",
"json",
".",
"dumps",
"(",
"{",
"\"op\"",
":",
"command",
",",
"\"args\"",
":",
"args",
"}",
")",
")"
] | 46.333333 | 11 |
def stop_trace(frame=None, close_on_exit=False):
"""Stop tracing"""
log.info('Stopping trace')
wdb = Wdb.get(True) # Do not create an istance if there's None
if wdb and (not wdb.stepping or close_on_exit):
log.info('Stopping trace')
wdb.stop_trace(frame or sys._getframe().f_back)
if close_on_exit:
wdb.die()
return wdb
|
[
"def",
"stop_trace",
"(",
"frame",
"=",
"None",
",",
"close_on_exit",
"=",
"False",
")",
":",
"log",
".",
"info",
"(",
"'Stopping trace'",
")",
"wdb",
"=",
"Wdb",
".",
"get",
"(",
"True",
")",
"# Do not create an istance if there's None",
"if",
"wdb",
"and",
"(",
"not",
"wdb",
".",
"stepping",
"or",
"close_on_exit",
")",
":",
"log",
".",
"info",
"(",
"'Stopping trace'",
")",
"wdb",
".",
"stop_trace",
"(",
"frame",
"or",
"sys",
".",
"_getframe",
"(",
")",
".",
"f_back",
")",
"if",
"close_on_exit",
":",
"wdb",
".",
"die",
"(",
")",
"return",
"wdb"
] | 36.7 | 13.7 |
def hdd_disk_interface(self, hdd_disk_interface):
"""
Sets the hdd disk interface for this QEMU VM.
:param hdd_disk_interface: QEMU hdd disk interface
"""
self._hdd_disk_interface = hdd_disk_interface
log.info('QEMU VM "{name}" [{id}] has set the QEMU hdd disk interface to {interface}'.format(name=self._name,
id=self._id,
interface=self._hdd_disk_interface))
|
[
"def",
"hdd_disk_interface",
"(",
"self",
",",
"hdd_disk_interface",
")",
":",
"self",
".",
"_hdd_disk_interface",
"=",
"hdd_disk_interface",
"log",
".",
"info",
"(",
"'QEMU VM \"{name}\" [{id}] has set the QEMU hdd disk interface to {interface}'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"id",
"=",
"self",
".",
"_id",
",",
"interface",
"=",
"self",
".",
"_hdd_disk_interface",
")",
")"
] | 54.727273 | 34.545455 |
def set_euk_hmm(self, args):
'Set the hmm used by graftM to cross check for euks.'
if hasattr(args, 'euk_hmm_file'):
pass
elif not hasattr(args, 'euk_hmm_file'):
# set to path based on the location of bin/graftM, which has
# a more stable relative path to the HMM when installed through
# pip.
setattr(args, 'euk_hmm_file', os.path.join(os.path.dirname(inspect.stack()[-1][1]),'..','share', '18S.hmm'))
else:
raise Exception('Programming Error: setting the euk HMM')
|
[
"def",
"set_euk_hmm",
"(",
"self",
",",
"args",
")",
":",
"if",
"hasattr",
"(",
"args",
",",
"'euk_hmm_file'",
")",
":",
"pass",
"elif",
"not",
"hasattr",
"(",
"args",
",",
"'euk_hmm_file'",
")",
":",
"# set to path based on the location of bin/graftM, which has",
"# a more stable relative path to the HMM when installed through",
"# pip.",
"setattr",
"(",
"args",
",",
"'euk_hmm_file'",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"inspect",
".",
"stack",
"(",
")",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
")",
",",
"'..'",
",",
"'share'",
",",
"'18S.hmm'",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Programming Error: setting the euk HMM'",
")"
] | 50.909091 | 26.363636 |
def reset(self, source):
""" Reset scanner's state.
:param source: Source for parsing
"""
self.tokens = []
self.source = source
self.pos = 0
|
[
"def",
"reset",
"(",
"self",
",",
"source",
")",
":",
"self",
".",
"tokens",
"=",
"[",
"]",
"self",
".",
"source",
"=",
"source",
"self",
".",
"pos",
"=",
"0"
] | 20.222222 | 16.111111 |
def tradingStatusSSE(symbols=None, on_data=None, token='', version=''):
'''The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
'''
return _runSSE('trading-status', symbols, on_data, token, version)
|
[
"def",
"tradingStatusSSE",
"(",
"symbols",
"=",
"None",
",",
"on_data",
"=",
"None",
",",
"token",
"=",
"''",
",",
"version",
"=",
"''",
")",
":",
"return",
"_runSSE",
"(",
"'trading-status'",
",",
"symbols",
",",
"on_data",
",",
"token",
",",
"version",
")"
] | 63.222222 | 51.814815 |
def node_rank(self):
"""
Returns the maximum rank for each **topological node** in the
``DictGraph``. The rank of a node is defined as the number of edges
between the node and a node which has rank 0. A **topological node**
has rank 0 if it has no incoming edges.
"""
nodes = self.postorder()
node_rank = {}
for node in nodes:
max_rank = 0
for child in self[node].nodes():
some_rank = node_rank[child] + 1
max_rank = max(max_rank, some_rank)
node_rank[node] = max_rank
return node_rank
|
[
"def",
"node_rank",
"(",
"self",
")",
":",
"nodes",
"=",
"self",
".",
"postorder",
"(",
")",
"node_rank",
"=",
"{",
"}",
"for",
"node",
"in",
"nodes",
":",
"max_rank",
"=",
"0",
"for",
"child",
"in",
"self",
"[",
"node",
"]",
".",
"nodes",
"(",
")",
":",
"some_rank",
"=",
"node_rank",
"[",
"child",
"]",
"+",
"1",
"max_rank",
"=",
"max",
"(",
"max_rank",
",",
"some_rank",
")",
"node_rank",
"[",
"node",
"]",
"=",
"max_rank",
"return",
"node_rank"
] | 37 | 15.235294 |
def load_watch():
'''
Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was
recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a
smartwatch. It is a multivariate time series.
The study can be found here: https://arxiv.org/abs/1802.01489
Returns
-------
data : dict
data['X'] : list, length 140
| inertial sensor data, each element with shape [n_samples, 6]
| sampled at 50 Hz
data['y'] : array, length 140
target vector (exercise type)
data['side'] : array, length 140
the extremity side, 1 = right, 0 = left
data['subject'] : array, length 140
the subject (participant) number
data['X_labels'] : str list, length 6
ordered labels for the sensor data variables
data['y_labels'] :str list, length 7
ordered labels for the target (exercise type)
Examples
--------
>>> from seglearn.datasets import load_watch
>>> data = load_watch()
>>> print(data.keys())
'''
module_path = dirname(__file__)
data = np.load(module_path + "/data/watch_dataset.npy").item()
return data
|
[
"def",
"load_watch",
"(",
")",
":",
"module_path",
"=",
"dirname",
"(",
"__file__",
")",
"data",
"=",
"np",
".",
"load",
"(",
"module_path",
"+",
"\"/data/watch_dataset.npy\"",
")",
".",
"item",
"(",
")",
"return",
"data"
] | 35.735294 | 20.558824 |
def GetItemContainerLink(link):
"""Gets the document collection link
:param str link:
Resource link
:return:
Document collection link.
:rtype: str
"""
link = TrimBeginningAndEndingSlashes(link) + '/'
index = IndexOfNth(link, '/', 4)
if index != -1:
return link[0:index]
else:
raise ValueError('Unable to parse document collection link from ' + link)
|
[
"def",
"GetItemContainerLink",
"(",
"link",
")",
":",
"link",
"=",
"TrimBeginningAndEndingSlashes",
"(",
"link",
")",
"+",
"'/'",
"index",
"=",
"IndexOfNth",
"(",
"link",
",",
"'/'",
",",
"4",
")",
"if",
"index",
"!=",
"-",
"1",
":",
"return",
"link",
"[",
"0",
":",
"index",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Unable to parse document collection link from '",
"+",
"link",
")"
] | 21.473684 | 22.368421 |
def get_request_token(self, request, callback):
"""Fetch the OAuth request token. Only required for OAuth 1.0."""
callback = force_text(request.build_absolute_uri(callback))
try:
response = self.request('post', self.request_token_url, oauth_callback=callback)
response.raise_for_status()
except RequestException as e:
logger.error('Unable to fetch request token: {0}'.format(e))
return None
else:
return response.text
|
[
"def",
"get_request_token",
"(",
"self",
",",
"request",
",",
"callback",
")",
":",
"callback",
"=",
"force_text",
"(",
"request",
".",
"build_absolute_uri",
"(",
"callback",
")",
")",
"try",
":",
"response",
"=",
"self",
".",
"request",
"(",
"'post'",
",",
"self",
".",
"request_token_url",
",",
"oauth_callback",
"=",
"callback",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"RequestException",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Unable to fetch request token: {0}'",
".",
"format",
"(",
"e",
")",
")",
"return",
"None",
"else",
":",
"return",
"response",
".",
"text"
] | 46.090909 | 18.363636 |
def _create_get_request(self, resource, billomat_id='', command=None, params=None):
"""
Creates a get request and return the response data
"""
if not params:
params = {}
if not command:
command = ''
else:
command = '/' + command
assert (isinstance(resource, str))
if billomat_id:
assert (isinstance(billomat_id, int) or isinstance(billomat_id, str))
if isinstance(billomat_id, int):
billomat_id = str(billomat_id)
response = self.session.get(
url=self.api_url + resource + ('/' + billomat_id if billomat_id else '') + command,
params=params,
)
return self._handle_response(response)
|
[
"def",
"_create_get_request",
"(",
"self",
",",
"resource",
",",
"billomat_id",
"=",
"''",
",",
"command",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"not",
"params",
":",
"params",
"=",
"{",
"}",
"if",
"not",
"command",
":",
"command",
"=",
"''",
"else",
":",
"command",
"=",
"'/'",
"+",
"command",
"assert",
"(",
"isinstance",
"(",
"resource",
",",
"str",
")",
")",
"if",
"billomat_id",
":",
"assert",
"(",
"isinstance",
"(",
"billomat_id",
",",
"int",
")",
"or",
"isinstance",
"(",
"billomat_id",
",",
"str",
")",
")",
"if",
"isinstance",
"(",
"billomat_id",
",",
"int",
")",
":",
"billomat_id",
"=",
"str",
"(",
"billomat_id",
")",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
"=",
"self",
".",
"api_url",
"+",
"resource",
"+",
"(",
"'/'",
"+",
"billomat_id",
"if",
"billomat_id",
"else",
"''",
")",
"+",
"command",
",",
"params",
"=",
"params",
",",
")",
"return",
"self",
".",
"_handle_response",
"(",
"response",
")"
] | 31.291667 | 20.875 |
def query(self, query):
"""
Query bugzilla and return a list of matching bugs.
query must be a dict with fields like those in in querydata['fields'].
Returns a list of Bug objects.
Also see the _query() method for details about the underlying
implementation.
"""
try:
r = self._proxy.Bug.search(query)
except Fault as e:
# Try to give a hint in the error message if url_to_query
# isn't supported by this bugzilla instance
if ("query_format" not in str(e) or
"RHBugzilla" in str(e.__class__) or
self._check_version(5, 0)):
raise
raise BugzillaError("%s\nYour bugzilla instance does not "
"appear to support API queries derived from bugzilla "
"web URL queries." % e)
log.debug("Query returned %s bugs", len(r['bugs']))
return [Bug(self, dict=b,
autorefresh=self.bug_autorefresh) for b in r['bugs']]
|
[
"def",
"query",
"(",
"self",
",",
"query",
")",
":",
"try",
":",
"r",
"=",
"self",
".",
"_proxy",
".",
"Bug",
".",
"search",
"(",
"query",
")",
"except",
"Fault",
"as",
"e",
":",
"# Try to give a hint in the error message if url_to_query",
"# isn't supported by this bugzilla instance",
"if",
"(",
"\"query_format\"",
"not",
"in",
"str",
"(",
"e",
")",
"or",
"\"RHBugzilla\"",
"in",
"str",
"(",
"e",
".",
"__class__",
")",
"or",
"self",
".",
"_check_version",
"(",
"5",
",",
"0",
")",
")",
":",
"raise",
"raise",
"BugzillaError",
"(",
"\"%s\\nYour bugzilla instance does not \"",
"\"appear to support API queries derived from bugzilla \"",
"\"web URL queries.\"",
"%",
"e",
")",
"log",
".",
"debug",
"(",
"\"Query returned %s bugs\"",
",",
"len",
"(",
"r",
"[",
"'bugs'",
"]",
")",
")",
"return",
"[",
"Bug",
"(",
"self",
",",
"dict",
"=",
"b",
",",
"autorefresh",
"=",
"self",
".",
"bug_autorefresh",
")",
"for",
"b",
"in",
"r",
"[",
"'bugs'",
"]",
"]"
] | 40.8 | 17.92 |
def pickAChannel(self, ra_deg, dec_deg):
"""Returns the channel number closest to a given (ra, dec) coordinate.
"""
# Could improve speed by doing this in the projection plane
# instead of sky coords
cRa = self.currentRaDec[:, 3] # Ra of each channel corner
cDec = self.currentRaDec[:, 4] # dec of each channel corner
dist = cRa * 0
for i in range(len(dist)):
dist[i] = gcircle.sphericalAngSep(cRa[i], cDec[i], ra_deg, dec_deg)
i = np.argmin(dist)
return self.currentRaDec[i, 2]
|
[
"def",
"pickAChannel",
"(",
"self",
",",
"ra_deg",
",",
"dec_deg",
")",
":",
"# Could improve speed by doing this in the projection plane",
"# instead of sky coords",
"cRa",
"=",
"self",
".",
"currentRaDec",
"[",
":",
",",
"3",
"]",
"# Ra of each channel corner",
"cDec",
"=",
"self",
".",
"currentRaDec",
"[",
":",
",",
"4",
"]",
"# dec of each channel corner",
"dist",
"=",
"cRa",
"*",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"dist",
")",
")",
":",
"dist",
"[",
"i",
"]",
"=",
"gcircle",
".",
"sphericalAngSep",
"(",
"cRa",
"[",
"i",
"]",
",",
"cDec",
"[",
"i",
"]",
",",
"ra_deg",
",",
"dec_deg",
")",
"i",
"=",
"np",
".",
"argmin",
"(",
"dist",
")",
"return",
"self",
".",
"currentRaDec",
"[",
"i",
",",
"2",
"]"
] | 40.071429 | 17.714286 |
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
|
[
"def",
"_round_frac",
"(",
"x",
",",
"precision",
")",
":",
"if",
"not",
"np",
".",
"isfinite",
"(",
"x",
")",
"or",
"x",
"==",
"0",
":",
"return",
"x",
"else",
":",
"frac",
",",
"whole",
"=",
"np",
".",
"modf",
"(",
"x",
")",
"if",
"whole",
"==",
"0",
":",
"digits",
"=",
"-",
"int",
"(",
"np",
".",
"floor",
"(",
"np",
".",
"log10",
"(",
"abs",
"(",
"frac",
")",
")",
")",
")",
"-",
"1",
"+",
"precision",
"else",
":",
"digits",
"=",
"precision",
"return",
"np",
".",
"around",
"(",
"x",
",",
"digits",
")"
] | 27.538462 | 13.692308 |
def set_image(self, image):
"""Set display buffer to Python Image Library image. Red pixels (r=255,
g=0, b=0) will map to red LEDs, green pixels (r=0, g=255, b=0) will map to
green LEDs, and yellow pixels (r=255, g=255, b=0) will map to yellow LEDs.
All other pixel values will map to an unlit LED value.
"""
imwidth, imheight = image.size
if imwidth != 8 or imheight != 8:
raise ValueError('Image must be an 8x8 pixels in size.')
# Convert image to RGB and grab all the pixels.
pix = image.convert('RGB').load()
# Loop through each pixel and write the display buffer pixel.
for x in [0, 1, 2, 3, 4, 5, 6, 7]:
for y in [0, 1, 2, 3, 4, 5, 6, 7]:
color = pix[(x, y)]
# Handle the color of the pixel.
if color == (255, 0, 0):
self.set_pixel(x, y, RED)
elif color == (0, 255, 0):
self.set_pixel(x, y, GREEN)
elif color == (255, 255, 0):
self.set_pixel(x, y, YELLOW)
else:
# Unknown color, default to LED off.
self.set_pixel(x, y, OFF)
|
[
"def",
"set_image",
"(",
"self",
",",
"image",
")",
":",
"imwidth",
",",
"imheight",
"=",
"image",
".",
"size",
"if",
"imwidth",
"!=",
"8",
"or",
"imheight",
"!=",
"8",
":",
"raise",
"ValueError",
"(",
"'Image must be an 8x8 pixels in size.'",
")",
"# Convert image to RGB and grab all the pixels.",
"pix",
"=",
"image",
".",
"convert",
"(",
"'RGB'",
")",
".",
"load",
"(",
")",
"# Loop through each pixel and write the display buffer pixel.",
"for",
"x",
"in",
"[",
"0",
",",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
",",
"6",
",",
"7",
"]",
":",
"for",
"y",
"in",
"[",
"0",
",",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
",",
"6",
",",
"7",
"]",
":",
"color",
"=",
"pix",
"[",
"(",
"x",
",",
"y",
")",
"]",
"# Handle the color of the pixel.",
"if",
"color",
"==",
"(",
"255",
",",
"0",
",",
"0",
")",
":",
"self",
".",
"set_pixel",
"(",
"x",
",",
"y",
",",
"RED",
")",
"elif",
"color",
"==",
"(",
"0",
",",
"255",
",",
"0",
")",
":",
"self",
".",
"set_pixel",
"(",
"x",
",",
"y",
",",
"GREEN",
")",
"elif",
"color",
"==",
"(",
"255",
",",
"255",
",",
"0",
")",
":",
"self",
".",
"set_pixel",
"(",
"x",
",",
"y",
",",
"YELLOW",
")",
"else",
":",
"# Unknown color, default to LED off.",
"self",
".",
"set_pixel",
"(",
"x",
",",
"y",
",",
"OFF",
")"
] | 48.6 | 11.28 |
def _compute_site_term(self, C, vs30):
"""
Compute site term as a function of vs30: 4th, 5th and 6th terms in
equation 2 page 462.
"""
# for rock values the site term is zero
site_term = np.zeros_like(vs30)
# hard soil
site_term[(vs30 >= 360) & (vs30 < 800)] = C['aB']
# medium soil
site_term[(vs30 >= 180) & (vs30 < 360)] = C['aC']
# soft soil
site_term[vs30 < 180] = C['aD']
return site_term
|
[
"def",
"_compute_site_term",
"(",
"self",
",",
"C",
",",
"vs30",
")",
":",
"# for rock values the site term is zero",
"site_term",
"=",
"np",
".",
"zeros_like",
"(",
"vs30",
")",
"# hard soil",
"site_term",
"[",
"(",
"vs30",
">=",
"360",
")",
"&",
"(",
"vs30",
"<",
"800",
")",
"]",
"=",
"C",
"[",
"'aB'",
"]",
"# medium soil",
"site_term",
"[",
"(",
"vs30",
">=",
"180",
")",
"&",
"(",
"vs30",
"<",
"360",
")",
"]",
"=",
"C",
"[",
"'aC'",
"]",
"# soft soil",
"site_term",
"[",
"vs30",
"<",
"180",
"]",
"=",
"C",
"[",
"'aD'",
"]",
"return",
"site_term"
] | 26.888889 | 18.222222 |
def num_connected_components(self, unitary_only=False):
"""How many non-entangled subcircuits can the circuit be factored to.
Args:
unitary_only (bool): Compute only unitary part of graph.
Returns:
int: Number of connected components in circuit.
"""
# Convert registers to ints (as done in depth).
reg_offset = 0
reg_map = {}
if unitary_only:
regs = self.qregs
else:
regs = self.qregs+self.cregs
for reg in regs:
reg_map[reg.name] = reg_offset
reg_offset += reg.size
# Start with each qubit or cbit being its own subgraph.
sub_graphs = [[bit] for bit in range(reg_offset)]
num_sub_graphs = len(sub_graphs)
# Here we are traversing the gates and looking to see
# which of the sub_graphs the gate joins together.
for instr, qargs, cargs in self.data:
if unitary_only:
args = qargs
num_qargs = len(args)
else:
args = qargs+cargs
num_qargs = len(args) + (1 if instr.control else 0)
if num_qargs >= 2 and instr.name not in ['barrier', 'snapshot']:
graphs_touched = []
num_touched = 0
# Controls necessarily join all the cbits in the
# register that they use.
if instr.control and not unitary_only:
creg = instr.control[0]
creg_int = reg_map[creg.name]
for coff in range(creg.size):
temp_int = creg_int+coff
for k in range(num_sub_graphs):
if temp_int in sub_graphs[k]:
graphs_touched.append(k)
num_touched += 1
break
for item in args:
reg_int = reg_map[item[0].name]+item[1]
for k in range(num_sub_graphs):
if reg_int in sub_graphs[k]:
if k not in graphs_touched:
graphs_touched.append(k)
num_touched += 1
break
# If the gate touches more than one subgraph
# join those graphs together and return
# reduced number of subgraphs
if num_touched > 1:
connections = []
for idx in graphs_touched:
connections.extend(sub_graphs[idx])
_sub_graphs = []
for idx in range(num_sub_graphs):
if idx not in graphs_touched:
_sub_graphs.append(sub_graphs[idx])
_sub_graphs.append(connections)
sub_graphs = _sub_graphs
num_sub_graphs -= (num_touched-1)
# Cannot go lower than one so break
if num_sub_graphs == 1:
break
return num_sub_graphs
|
[
"def",
"num_connected_components",
"(",
"self",
",",
"unitary_only",
"=",
"False",
")",
":",
"# Convert registers to ints (as done in depth).",
"reg_offset",
"=",
"0",
"reg_map",
"=",
"{",
"}",
"if",
"unitary_only",
":",
"regs",
"=",
"self",
".",
"qregs",
"else",
":",
"regs",
"=",
"self",
".",
"qregs",
"+",
"self",
".",
"cregs",
"for",
"reg",
"in",
"regs",
":",
"reg_map",
"[",
"reg",
".",
"name",
"]",
"=",
"reg_offset",
"reg_offset",
"+=",
"reg",
".",
"size",
"# Start with each qubit or cbit being its own subgraph.",
"sub_graphs",
"=",
"[",
"[",
"bit",
"]",
"for",
"bit",
"in",
"range",
"(",
"reg_offset",
")",
"]",
"num_sub_graphs",
"=",
"len",
"(",
"sub_graphs",
")",
"# Here we are traversing the gates and looking to see",
"# which of the sub_graphs the gate joins together.",
"for",
"instr",
",",
"qargs",
",",
"cargs",
"in",
"self",
".",
"data",
":",
"if",
"unitary_only",
":",
"args",
"=",
"qargs",
"num_qargs",
"=",
"len",
"(",
"args",
")",
"else",
":",
"args",
"=",
"qargs",
"+",
"cargs",
"num_qargs",
"=",
"len",
"(",
"args",
")",
"+",
"(",
"1",
"if",
"instr",
".",
"control",
"else",
"0",
")",
"if",
"num_qargs",
">=",
"2",
"and",
"instr",
".",
"name",
"not",
"in",
"[",
"'barrier'",
",",
"'snapshot'",
"]",
":",
"graphs_touched",
"=",
"[",
"]",
"num_touched",
"=",
"0",
"# Controls necessarily join all the cbits in the",
"# register that they use.",
"if",
"instr",
".",
"control",
"and",
"not",
"unitary_only",
":",
"creg",
"=",
"instr",
".",
"control",
"[",
"0",
"]",
"creg_int",
"=",
"reg_map",
"[",
"creg",
".",
"name",
"]",
"for",
"coff",
"in",
"range",
"(",
"creg",
".",
"size",
")",
":",
"temp_int",
"=",
"creg_int",
"+",
"coff",
"for",
"k",
"in",
"range",
"(",
"num_sub_graphs",
")",
":",
"if",
"temp_int",
"in",
"sub_graphs",
"[",
"k",
"]",
":",
"graphs_touched",
".",
"append",
"(",
"k",
")",
"num_touched",
"+=",
"1",
"break",
"for",
"item",
"in",
"args",
":",
"reg_int",
"=",
"reg_map",
"[",
"item",
"[",
"0",
"]",
".",
"name",
"]",
"+",
"item",
"[",
"1",
"]",
"for",
"k",
"in",
"range",
"(",
"num_sub_graphs",
")",
":",
"if",
"reg_int",
"in",
"sub_graphs",
"[",
"k",
"]",
":",
"if",
"k",
"not",
"in",
"graphs_touched",
":",
"graphs_touched",
".",
"append",
"(",
"k",
")",
"num_touched",
"+=",
"1",
"break",
"# If the gate touches more than one subgraph",
"# join those graphs together and return",
"# reduced number of subgraphs",
"if",
"num_touched",
">",
"1",
":",
"connections",
"=",
"[",
"]",
"for",
"idx",
"in",
"graphs_touched",
":",
"connections",
".",
"extend",
"(",
"sub_graphs",
"[",
"idx",
"]",
")",
"_sub_graphs",
"=",
"[",
"]",
"for",
"idx",
"in",
"range",
"(",
"num_sub_graphs",
")",
":",
"if",
"idx",
"not",
"in",
"graphs_touched",
":",
"_sub_graphs",
".",
"append",
"(",
"sub_graphs",
"[",
"idx",
"]",
")",
"_sub_graphs",
".",
"append",
"(",
"connections",
")",
"sub_graphs",
"=",
"_sub_graphs",
"num_sub_graphs",
"-=",
"(",
"num_touched",
"-",
"1",
")",
"# Cannot go lower than one so break",
"if",
"num_sub_graphs",
"==",
"1",
":",
"break",
"return",
"num_sub_graphs"
] | 39.139241 | 15.64557 |
def _get_cache_key(self, obj):
"""Derive cache key for given object."""
if obj is not None:
# Make sure that key is REALLY unique.
return '{}-{}'.format(id(self), obj.pk)
return "{}-None".format(id(self))
|
[
"def",
"_get_cache_key",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
"is",
"not",
"None",
":",
"# Make sure that key is REALLY unique.",
"return",
"'{}-{}'",
".",
"format",
"(",
"id",
"(",
"self",
")",
",",
"obj",
".",
"pk",
")",
"return",
"\"{}-None\"",
".",
"format",
"(",
"id",
"(",
"self",
")",
")"
] | 35.285714 | 12.142857 |
def _xml_element_value(el: Element, int_tags: list):
"""
Gets XML Element value.
:param el: Element
:param int_tags: List of tags that should be treated as ints
:return: value of the element (int/str)
"""
# None
if el.text is None:
return None
# int
try:
if el.tag in int_tags:
return int(el.text)
except:
pass
# default to str if not empty
s = str(el.text).strip()
return s if s else None
|
[
"def",
"_xml_element_value",
"(",
"el",
":",
"Element",
",",
"int_tags",
":",
"list",
")",
":",
"# None",
"if",
"el",
".",
"text",
"is",
"None",
":",
"return",
"None",
"# int",
"try",
":",
"if",
"el",
".",
"tag",
"in",
"int_tags",
":",
"return",
"int",
"(",
"el",
".",
"text",
")",
"except",
":",
"pass",
"# default to str if not empty",
"s",
"=",
"str",
"(",
"el",
".",
"text",
")",
".",
"strip",
"(",
")",
"return",
"s",
"if",
"s",
"else",
"None"
] | 24.368421 | 16.263158 |
def _validate_sections(cls, sections):
"""Validates sections types and uniqueness."""
names = []
for section in sections:
if not hasattr(section, 'name'):
raise ConfigurationError('`sections` attribute requires a list of Section')
name = section.name
if name in names:
raise ConfigurationError('`%s` section name must be unique' % name)
names.append(name)
|
[
"def",
"_validate_sections",
"(",
"cls",
",",
"sections",
")",
":",
"names",
"=",
"[",
"]",
"for",
"section",
"in",
"sections",
":",
"if",
"not",
"hasattr",
"(",
"section",
",",
"'name'",
")",
":",
"raise",
"ConfigurationError",
"(",
"'`sections` attribute requires a list of Section'",
")",
"name",
"=",
"section",
".",
"name",
"if",
"name",
"in",
"names",
":",
"raise",
"ConfigurationError",
"(",
"'`%s` section name must be unique'",
"%",
"name",
")",
"names",
".",
"append",
"(",
"name",
")"
] | 34.615385 | 21.538462 |
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['AR'] = 'mwld'
env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES'
env['LIBDIRPREFIX'] = '-L'
env['LIBDIRSUFFIX'] = ''
env['LIBLINKPREFIX'] = '-l'
env['LIBLINKSUFFIX'] = '.lib'
env['LINK'] = 'mwld'
env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = '$LINKFLAGS'
env['SHLINKCOM'] = shlib_action
env['SHLIBEMITTER']= shlib_emitter
env['LDMODULEEMITTER']= shlib_emitter
|
[
"def",
"generate",
"(",
"env",
")",
":",
"SCons",
".",
"Tool",
".",
"createStaticLibBuilder",
"(",
"env",
")",
"SCons",
".",
"Tool",
".",
"createSharedLibBuilder",
"(",
"env",
")",
"SCons",
".",
"Tool",
".",
"createProgBuilder",
"(",
"env",
")",
"env",
"[",
"'AR'",
"]",
"=",
"'mwld'",
"env",
"[",
"'ARCOM'",
"]",
"=",
"'$AR $ARFLAGS -library -o $TARGET $SOURCES'",
"env",
"[",
"'LIBDIRPREFIX'",
"]",
"=",
"'-L'",
"env",
"[",
"'LIBDIRSUFFIX'",
"]",
"=",
"''",
"env",
"[",
"'LIBLINKPREFIX'",
"]",
"=",
"'-l'",
"env",
"[",
"'LIBLINKSUFFIX'",
"]",
"=",
"'.lib'",
"env",
"[",
"'LINK'",
"]",
"=",
"'mwld'",
"env",
"[",
"'LINKCOM'",
"]",
"=",
"'$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'",
"env",
"[",
"'SHLINK'",
"]",
"=",
"'$LINK'",
"env",
"[",
"'SHLINKFLAGS'",
"]",
"=",
"'$LINKFLAGS'",
"env",
"[",
"'SHLINKCOM'",
"]",
"=",
"shlib_action",
"env",
"[",
"'SHLIBEMITTER'",
"]",
"=",
"shlib_emitter",
"env",
"[",
"'LDMODULEEMITTER'",
"]",
"=",
"shlib_emitter"
] | 32.227273 | 15.863636 |
def call(cmd, shell=True, cwd=None, universal_newlines=True, stderr=STDOUT):
"""Just execute a specific command."""
return Shell._run(call, cmd, shell=shell, cwd=cwd, stderr=stderr,
universal_newlines=universal_newlines)
|
[
"def",
"call",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"cwd",
"=",
"None",
",",
"universal_newlines",
"=",
"True",
",",
"stderr",
"=",
"STDOUT",
")",
":",
"return",
"Shell",
".",
"_run",
"(",
"call",
",",
"cmd",
",",
"shell",
"=",
"shell",
",",
"cwd",
"=",
"cwd",
",",
"stderr",
"=",
"stderr",
",",
"universal_newlines",
"=",
"universal_newlines",
")"
] | 64.75 | 23.25 |
def p_extr_lic_name_1(self, p):
"""extr_lic_name : LICS_NAME extr_lic_name_value"""
try:
self.builder.set_lic_name(self.document, p[2])
except OrderError:
self.order_error('LicenseName', 'LicenseID', p.lineno(1))
except CardinalityError:
self.more_than_one_error('LicenseName', p.lineno(1))
|
[
"def",
"p_extr_lic_name_1",
"(",
"self",
",",
"p",
")",
":",
"try",
":",
"self",
".",
"builder",
".",
"set_lic_name",
"(",
"self",
".",
"document",
",",
"p",
"[",
"2",
"]",
")",
"except",
"OrderError",
":",
"self",
".",
"order_error",
"(",
"'LicenseName'",
",",
"'LicenseID'",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"except",
"CardinalityError",
":",
"self",
".",
"more_than_one_error",
"(",
"'LicenseName'",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | 43.875 | 16.25 |
def publish(build):
""" publish the package itself """
build.packages.install("wheel")
build.packages.install("twine")
build.executables.run([
"python", "setup.py",
"sdist", "bdist_wheel", "--universal", "--release"
])
build.executables.run([
"twine", "upload", "dist/*"
])
|
[
"def",
"publish",
"(",
"build",
")",
":",
"build",
".",
"packages",
".",
"install",
"(",
"\"wheel\"",
")",
"build",
".",
"packages",
".",
"install",
"(",
"\"twine\"",
")",
"build",
".",
"executables",
".",
"run",
"(",
"[",
"\"python\"",
",",
"\"setup.py\"",
",",
"\"sdist\"",
",",
"\"bdist_wheel\"",
",",
"\"--universal\"",
",",
"\"--release\"",
"]",
")",
"build",
".",
"executables",
".",
"run",
"(",
"[",
"\"twine\"",
",",
"\"upload\"",
",",
"\"dist/*\"",
"]",
")"
] | 28.636364 | 14.454545 |
def gc(cn, ns=None, lo=None, iq=None, ico=None, pl=None):
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.GetClass`.
Retrieve a class.
Parameters:
cn (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class to be retrieved (case independent).
If specified as a `CIMClassName` object, its `host` attribute will be
ignored.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the namespace of the `cn` parameter if
specified as a `CIMClassName`, or to the default namespace of the
connection.
lo (:class:`py:bool`):
LocalOnly flag: Exclude inherited properties.
`None` will cause the server default of `True` to be used.
iq (:class:`py:bool`):
IncludeQualifiers flag: Include qualifiers.
`None` will cause the server default of `True` to be used.
ico (:class:`py:bool`):
IncludeClassOrigin flag: Include class origin information for
properties and methods in the retrieved class.
`None` will cause the server default of `False` to be used.
pl (:term:`string` or :term:`py:iterable` of :term:`string`):
PropertyList: Names of properties to be included (if not otherwise
excluded). An empty iterable indicates to include no properties.
If `None`, all properties will be included.
Returns:
:class:`~pywbem.CIMClass`:
The retrieved class.
"""
return CONN.GetClass(cn, ns,
LocalOnly=lo,
IncludeQualifiers=iq,
IncludeClassOrigin=ico,
PropertyList=pl)
|
[
"def",
"gc",
"(",
"cn",
",",
"ns",
"=",
"None",
",",
"lo",
"=",
"None",
",",
"iq",
"=",
"None",
",",
"ico",
"=",
"None",
",",
"pl",
"=",
"None",
")",
":",
"return",
"CONN",
".",
"GetClass",
"(",
"cn",
",",
"ns",
",",
"LocalOnly",
"=",
"lo",
",",
"IncludeQualifiers",
"=",
"iq",
",",
"IncludeClassOrigin",
"=",
"ico",
",",
"PropertyList",
"=",
"pl",
")"
] | 32.166667 | 24.351852 |
def find_slack_bus(sub_network):
"""Find the slack bus in a connected sub-network."""
gens = sub_network.generators()
if len(gens) == 0:
logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name))
sub_network.slack_generator = None
sub_network.slack_bus = sub_network.buses_i()[0]
else:
slacks = gens[gens.control == "Slack"].index
if len(slacks) == 0:
sub_network.slack_generator = gens.index[0]
sub_network.network.generators.loc[sub_network.slack_generator,"control"] = "Slack"
logger.debug("No slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator))
elif len(slacks) == 1:
sub_network.slack_generator = slacks[0]
else:
sub_network.slack_generator = slacks[0]
sub_network.network.generators.loc[slacks[1:],"control"] = "PV"
logger.debug("More than one slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator))
sub_network.slack_bus = gens.bus[sub_network.slack_generator]
#also put it into the dataframe
sub_network.network.sub_networks.at[sub_network.name,"slack_bus"] = sub_network.slack_bus
logger.info("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus))
|
[
"def",
"find_slack_bus",
"(",
"sub_network",
")",
":",
"gens",
"=",
"sub_network",
".",
"generators",
"(",
")",
"if",
"len",
"(",
"gens",
")",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"No generators in sub-network {}, better hope power is already balanced\"",
".",
"format",
"(",
"sub_network",
".",
"name",
")",
")",
"sub_network",
".",
"slack_generator",
"=",
"None",
"sub_network",
".",
"slack_bus",
"=",
"sub_network",
".",
"buses_i",
"(",
")",
"[",
"0",
"]",
"else",
":",
"slacks",
"=",
"gens",
"[",
"gens",
".",
"control",
"==",
"\"Slack\"",
"]",
".",
"index",
"if",
"len",
"(",
"slacks",
")",
"==",
"0",
":",
"sub_network",
".",
"slack_generator",
"=",
"gens",
".",
"index",
"[",
"0",
"]",
"sub_network",
".",
"network",
".",
"generators",
".",
"loc",
"[",
"sub_network",
".",
"slack_generator",
",",
"\"control\"",
"]",
"=",
"\"Slack\"",
"logger",
".",
"debug",
"(",
"\"No slack generator found in sub-network {}, using {} as the slack generator\"",
".",
"format",
"(",
"sub_network",
".",
"name",
",",
"sub_network",
".",
"slack_generator",
")",
")",
"elif",
"len",
"(",
"slacks",
")",
"==",
"1",
":",
"sub_network",
".",
"slack_generator",
"=",
"slacks",
"[",
"0",
"]",
"else",
":",
"sub_network",
".",
"slack_generator",
"=",
"slacks",
"[",
"0",
"]",
"sub_network",
".",
"network",
".",
"generators",
".",
"loc",
"[",
"slacks",
"[",
"1",
":",
"]",
",",
"\"control\"",
"]",
"=",
"\"PV\"",
"logger",
".",
"debug",
"(",
"\"More than one slack generator found in sub-network {}, using {} as the slack generator\"",
".",
"format",
"(",
"sub_network",
".",
"name",
",",
"sub_network",
".",
"slack_generator",
")",
")",
"sub_network",
".",
"slack_bus",
"=",
"gens",
".",
"bus",
"[",
"sub_network",
".",
"slack_generator",
"]",
"#also put it into the dataframe",
"sub_network",
".",
"network",
".",
"sub_networks",
".",
"at",
"[",
"sub_network",
".",
"name",
",",
"\"slack_bus\"",
"]",
"=",
"sub_network",
".",
"slack_bus",
"logger",
".",
"info",
"(",
"\"Slack bus for sub-network {} is {}\"",
".",
"format",
"(",
"sub_network",
".",
"name",
",",
"sub_network",
".",
"slack_bus",
")",
")"
] | 45.1875 | 34.4375 |
def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
"""
return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values())))
|
[
"def",
"get_all_leaves",
"(",
"self",
",",
"item_ids",
"=",
"None",
",",
"language",
"=",
"None",
",",
"forbidden_item_ids",
"=",
"None",
")",
":",
"return",
"sorted",
"(",
"set",
"(",
"flatten",
"(",
"self",
".",
"get_leaves",
"(",
"item_ids",
",",
"language",
"=",
"language",
",",
"forbidden_item_ids",
"=",
"forbidden_item_ids",
")",
".",
"values",
"(",
")",
")",
")",
")"
] | 47.357143 | 30.214286 |
def run_all(logdir, verbose=False):
"""Perform random search over the hyperparameter space.
Arguments:
logdir: The top-level directory into which to write data. This
directory should be empty or nonexistent.
verbose: If true, print out each run's name as it begins.
"""
data = prepare_data()
rng = random.Random(0)
base_writer = tf.summary.create_file_writer(logdir)
with base_writer.as_default():
experiment = hp.Experiment(hparams=HPARAMS, metrics=METRICS)
experiment_string = experiment.summary_pb().SerializeToString()
tf.summary.experimental.write_raw_pb(experiment_string, step=0)
base_writer.flush()
base_writer.close()
sessions_per_group = 2
num_sessions = flags.FLAGS.num_session_groups * sessions_per_group
session_index = 0 # across all session groups
for group_index in xrange(flags.FLAGS.num_session_groups):
hparams = {h: sample_uniform(h.domain, rng) for h in HPARAMS}
hparams_string = str(hparams)
group_id = hashlib.sha256(hparams_string.encode("utf-8")).hexdigest()
for repeat_index in xrange(sessions_per_group):
session_id = str(session_index)
session_index += 1
if verbose:
print(
"--- Running training session %d/%d"
% (session_index, num_sessions)
)
print(hparams_string)
print("--- repeat #: %d" % (repeat_index + 1))
run(
data=data,
base_logdir=logdir,
session_id=session_id,
group_id=group_id,
hparams=hparams,
)
|
[
"def",
"run_all",
"(",
"logdir",
",",
"verbose",
"=",
"False",
")",
":",
"data",
"=",
"prepare_data",
"(",
")",
"rng",
"=",
"random",
".",
"Random",
"(",
"0",
")",
"base_writer",
"=",
"tf",
".",
"summary",
".",
"create_file_writer",
"(",
"logdir",
")",
"with",
"base_writer",
".",
"as_default",
"(",
")",
":",
"experiment",
"=",
"hp",
".",
"Experiment",
"(",
"hparams",
"=",
"HPARAMS",
",",
"metrics",
"=",
"METRICS",
")",
"experiment_string",
"=",
"experiment",
".",
"summary_pb",
"(",
")",
".",
"SerializeToString",
"(",
")",
"tf",
".",
"summary",
".",
"experimental",
".",
"write_raw_pb",
"(",
"experiment_string",
",",
"step",
"=",
"0",
")",
"base_writer",
".",
"flush",
"(",
")",
"base_writer",
".",
"close",
"(",
")",
"sessions_per_group",
"=",
"2",
"num_sessions",
"=",
"flags",
".",
"FLAGS",
".",
"num_session_groups",
"*",
"sessions_per_group",
"session_index",
"=",
"0",
"# across all session groups",
"for",
"group_index",
"in",
"xrange",
"(",
"flags",
".",
"FLAGS",
".",
"num_session_groups",
")",
":",
"hparams",
"=",
"{",
"h",
":",
"sample_uniform",
"(",
"h",
".",
"domain",
",",
"rng",
")",
"for",
"h",
"in",
"HPARAMS",
"}",
"hparams_string",
"=",
"str",
"(",
"hparams",
")",
"group_id",
"=",
"hashlib",
".",
"sha256",
"(",
"hparams_string",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")",
"for",
"repeat_index",
"in",
"xrange",
"(",
"sessions_per_group",
")",
":",
"session_id",
"=",
"str",
"(",
"session_index",
")",
"session_index",
"+=",
"1",
"if",
"verbose",
":",
"print",
"(",
"\"--- Running training session %d/%d\"",
"%",
"(",
"session_index",
",",
"num_sessions",
")",
")",
"print",
"(",
"hparams_string",
")",
"print",
"(",
"\"--- repeat #: %d\"",
"%",
"(",
"repeat_index",
"+",
"1",
")",
")",
"run",
"(",
"data",
"=",
"data",
",",
"base_logdir",
"=",
"logdir",
",",
"session_id",
"=",
"session_id",
",",
"group_id",
"=",
"group_id",
",",
"hparams",
"=",
"hparams",
",",
")"
] | 35.023256 | 18.27907 |
def unregister(self, items):
"""
Remove items from registry.
:param items:
"""
items = _listify(items)
# get all members of Registry except private, special or class
meta_names = (m for m in vars(self).iterkeys()
if (not m.startswith('_') and m not in dir(Registry)))
# check that meta names matches
# FIXME: this is so lame. replace this with something more robust
for m in meta_names:
if m not in self.meta_names:
raise AttributeError('Meta name %s not listed.')
# pop items from Registry and from meta
for it in items:
if it in self:
self.pop(it)
for m in (getattr(self, m_) for m_ in self.meta_names):
if it in m:
m.pop(it)
|
[
"def",
"unregister",
"(",
"self",
",",
"items",
")",
":",
"items",
"=",
"_listify",
"(",
"items",
")",
"# get all members of Registry except private, special or class",
"meta_names",
"=",
"(",
"m",
"for",
"m",
"in",
"vars",
"(",
"self",
")",
".",
"iterkeys",
"(",
")",
"if",
"(",
"not",
"m",
".",
"startswith",
"(",
"'_'",
")",
"and",
"m",
"not",
"in",
"dir",
"(",
"Registry",
")",
")",
")",
"# check that meta names matches",
"# FIXME: this is so lame. replace this with something more robust",
"for",
"m",
"in",
"meta_names",
":",
"if",
"m",
"not",
"in",
"self",
".",
"meta_names",
":",
"raise",
"AttributeError",
"(",
"'Meta name %s not listed.'",
")",
"# pop items from Registry and from meta",
"for",
"it",
"in",
"items",
":",
"if",
"it",
"in",
"self",
":",
"self",
".",
"pop",
"(",
"it",
")",
"for",
"m",
"in",
"(",
"getattr",
"(",
"self",
",",
"m_",
")",
"for",
"m_",
"in",
"self",
".",
"meta_names",
")",
":",
"if",
"it",
"in",
"m",
":",
"m",
".",
"pop",
"(",
"it",
")"
] | 37.681818 | 15.227273 |
def get(self, twig=None, check_visible=True, check_default=True, **kwargs):
"""
Get a single parameter from this ParameterSet. This works exactly the
same as filter except there must be only a single result, and the Parameter
itself is returned instead of a ParameterSet.
Also see :meth:`get_parameter` (which is simply an alias of this method)
:parameter str twig: (optional) the search twig - essentially a single
string with any delimiter (ie '@') that will be parsed
into any of the meta-tags. Example: instead of
b.filter(context='component', component='starA'), you
could do b.filter('starA@component').
:parameter bool check_visible: whether to hide invisible
parameters. These are usually parameters that do not
play a role unless the value of another parameter meets
some condition.
:parameter bool check_default: whether to exclude parameters which
have a _default tag (these are parameters which solely exist
to provide defaults for when new parameters or datasets are
added and the parameter needs to be copied appropriately).
Defaults to True.
:parameter **kwargs: meta-tags to search (ie. 'context', 'component',
'model', etc). See :func:`meta` for all possible options.
:return: the resulting :class:`Parameter`
:raises ValueError: if either 0 or more than 1 results are found
matching the search.
"""
kwargs['check_visible'] = check_visible
kwargs['check_default'] = check_default
# print "***", kwargs
ps = self.filter(twig=twig, **kwargs)
if not len(ps):
# TODO: custom exception?
raise ValueError("0 results found")
elif len(ps) != 1:
# TODO: custom exception?
raise ValueError("{} results found: {}".format(len(ps), ps.twigs))
else:
# then only 1 item, so return the parameter
return ps._params[0]
|
[
"def",
"get",
"(",
"self",
",",
"twig",
"=",
"None",
",",
"check_visible",
"=",
"True",
",",
"check_default",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'check_visible'",
"]",
"=",
"check_visible",
"kwargs",
"[",
"'check_default'",
"]",
"=",
"check_default",
"# print \"***\", kwargs",
"ps",
"=",
"self",
".",
"filter",
"(",
"twig",
"=",
"twig",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"len",
"(",
"ps",
")",
":",
"# TODO: custom exception?",
"raise",
"ValueError",
"(",
"\"0 results found\"",
")",
"elif",
"len",
"(",
"ps",
")",
"!=",
"1",
":",
"# TODO: custom exception?",
"raise",
"ValueError",
"(",
"\"{} results found: {}\"",
".",
"format",
"(",
"len",
"(",
"ps",
")",
",",
"ps",
".",
"twigs",
")",
")",
"else",
":",
"# then only 1 item, so return the parameter",
"return",
"ps",
".",
"_params",
"[",
"0",
"]"
] | 50.357143 | 22.357143 |
def find_venv_DST():
"""Find where this package should be installed to in this virtualenv.
For example: ``/path-to-venv/lib/python2.7/site-packages/package-name``
"""
dir_path = os.path.dirname(SRC)
if SYS_NAME == "Windows":
DST = os.path.join(dir_path, "Lib", "site-packages", PKG_NAME)
elif SYS_NAME in ["Darwin", "Linux"]:
python_version = find_linux_venv_py_version()
DST = os.path.join(dir_path, "lib", python_version, "site-packages", PKG_NAME)
return DST
|
[
"def",
"find_venv_DST",
"(",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"SRC",
")",
"if",
"SYS_NAME",
"==",
"\"Windows\"",
":",
"DST",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"\"Lib\"",
",",
"\"site-packages\"",
",",
"PKG_NAME",
")",
"elif",
"SYS_NAME",
"in",
"[",
"\"Darwin\"",
",",
"\"Linux\"",
"]",
":",
"python_version",
"=",
"find_linux_venv_py_version",
"(",
")",
"DST",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"\"lib\"",
",",
"python_version",
",",
"\"site-packages\"",
",",
"PKG_NAME",
")",
"return",
"DST"
] | 35.928571 | 21.928571 |
def titleCounts(readsAlignments):
"""
Count the number of times each title in a readsAlignments instance is
matched. This is useful for rapidly discovering what titles were matched
and with what frequency.
@param readsAlignments: A L{dark.alignments.ReadsAlignments} instance.
@return: A C{dict} whose keys are titles and whose values are the integer
counts of the number of reads that matched that title.
"""
titles = defaultdict(int)
for readAlignments in readsAlignments:
for alignment in readAlignments:
titles[alignment.subjectTitle] += 1
return titles
|
[
"def",
"titleCounts",
"(",
"readsAlignments",
")",
":",
"titles",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"readAlignments",
"in",
"readsAlignments",
":",
"for",
"alignment",
"in",
"readAlignments",
":",
"titles",
"[",
"alignment",
".",
"subjectTitle",
"]",
"+=",
"1",
"return",
"titles"
] | 38.25 | 19 |
def launch_debugger(frame, stream=None):
"""
Interrupt running process, and provide a python prompt for
interactive debugging.
"""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
import code, traceback
i = code.InteractiveConsole(d)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
|
[
"def",
"launch_debugger",
"(",
"frame",
",",
"stream",
"=",
"None",
")",
":",
"d",
"=",
"{",
"'_frame'",
":",
"frame",
"}",
"# Allow access to frame object.",
"d",
".",
"update",
"(",
"frame",
".",
"f_globals",
")",
"# Unless shadowed by global",
"d",
".",
"update",
"(",
"frame",
".",
"f_locals",
")",
"import",
"code",
",",
"traceback",
"i",
"=",
"code",
".",
"InteractiveConsole",
"(",
"d",
")",
"message",
"=",
"\"Signal received : entering python shell.\\nTraceback:\\n\"",
"message",
"+=",
"''",
".",
"join",
"(",
"traceback",
".",
"format_stack",
"(",
"frame",
")",
")",
"i",
".",
"interact",
"(",
"message",
")"
] | 30.75 | 17.75 |
def p_let_arr_substr_in_args(p):
""" statement : LET ARRAY_ID LP arguments TO RP EQ expr
| ARRAY_ID LP arguments TO RP EQ expr
"""
i = 2 if p[1].upper() == 'LET' else 1
id_ = p[i]
arg_list = p[i + 2]
substr = (arg_list.children.pop().value,
make_number(gl.MAX_STRSLICE_IDX, lineno=p.lineno(i + 3)))
expr_ = p[i + 6]
p[0] = make_array_substr_assign(p.lineno(i), id_, arg_list, substr, expr_)
|
[
"def",
"p_let_arr_substr_in_args",
"(",
"p",
")",
":",
"i",
"=",
"2",
"if",
"p",
"[",
"1",
"]",
".",
"upper",
"(",
")",
"==",
"'LET'",
"else",
"1",
"id_",
"=",
"p",
"[",
"i",
"]",
"arg_list",
"=",
"p",
"[",
"i",
"+",
"2",
"]",
"substr",
"=",
"(",
"arg_list",
".",
"children",
".",
"pop",
"(",
")",
".",
"value",
",",
"make_number",
"(",
"gl",
".",
"MAX_STRSLICE_IDX",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"i",
"+",
"3",
")",
")",
")",
"expr_",
"=",
"p",
"[",
"i",
"+",
"6",
"]",
"p",
"[",
"0",
"]",
"=",
"make_array_substr_assign",
"(",
"p",
".",
"lineno",
"(",
"i",
")",
",",
"id_",
",",
"arg_list",
",",
"substr",
",",
"expr_",
")"
] | 37 | 16.666667 |
def seek_file_end(file):
'''Seek to the end of the file.'''
try:
file.seek(0, 2)
except ValueError:
# gzip files don't support seek from end
while True:
data = file.read(4096)
if not data:
break
|
[
"def",
"seek_file_end",
"(",
"file",
")",
":",
"try",
":",
"file",
".",
"seek",
"(",
"0",
",",
"2",
")",
"except",
"ValueError",
":",
"# gzip files don't support seek from end",
"while",
"True",
":",
"data",
"=",
"file",
".",
"read",
"(",
"4096",
")",
"if",
"not",
"data",
":",
"break"
] | 26.1 | 15.5 |
def mount_medium(self, name, controller_port, device, medium, force):
"""Mounts a medium (:py:class:`IMedium` , identified
by the given UUID @a id) to the given storage controller
(:py:class:`IStorageController` , identified by @a name),
at the indicated port and device. The device must already exist;
see :py:func:`IMachine.attach_device` for how to attach a new device.
This method is intended only for managing removable media, where the
device is fixed but media is changeable at runtime (such as DVDs
and floppies). It cannot be used for fixed media such as hard disks.
The @a controllerPort and @a device parameters specify the device slot and
have have the same meaning as with :py:func:`IMachine.attach_device` .
The specified device slot can have a medium mounted, which will be
unmounted first. Specifying a zero UUID (or an empty string) for
@a medium does just an unmount.
See :py:class:`IMedium` for more detailed information about
attaching media.
in name of type str
Name of the storage controller to attach the medium to.
in controller_port of type int
Port to attach the medium to.
in device of type int
Device slot in the given port to attach the medium to.
in medium of type :class:`IMedium`
Medium to mount or @c null for an empty drive.
in force of type bool
Allows to force unmount/mount of a medium which is locked by
the device slot in the given port to attach the medium to.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to attach medium to an unregistered virtual machine.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
raises :class:`VBoxErrorObjectInUse`
Medium already attached to this or another virtual machine.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(controller_port, baseinteger):
raise TypeError("controller_port can only be an instance of type baseinteger")
if not isinstance(device, baseinteger):
raise TypeError("device can only be an instance of type baseinteger")
if not isinstance(medium, IMedium):
raise TypeError("medium can only be an instance of type IMedium")
if not isinstance(force, bool):
raise TypeError("force can only be an instance of type bool")
self._call("mountMedium",
in_p=[name, controller_port, device, medium, force])
|
[
"def",
"mount_medium",
"(",
"self",
",",
"name",
",",
"controller_port",
",",
"device",
",",
"medium",
",",
"force",
")",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"name can only be an instance of type basestring\"",
")",
"if",
"not",
"isinstance",
"(",
"controller_port",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"controller_port can only be an instance of type baseinteger\"",
")",
"if",
"not",
"isinstance",
"(",
"device",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"device can only be an instance of type baseinteger\"",
")",
"if",
"not",
"isinstance",
"(",
"medium",
",",
"IMedium",
")",
":",
"raise",
"TypeError",
"(",
"\"medium can only be an instance of type IMedium\"",
")",
"if",
"not",
"isinstance",
"(",
"force",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"\"force can only be an instance of type bool\"",
")",
"self",
".",
"_call",
"(",
"\"mountMedium\"",
",",
"in_p",
"=",
"[",
"name",
",",
"controller_port",
",",
"device",
",",
"medium",
",",
"force",
"]",
")"
] | 45.935484 | 24.274194 |
def generate_log_between_tags(self, older_tag, newer_tag):
"""
Generate log between 2 specified tags.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if new tag is
the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will be
excluded. May be title of unreleased section.
:rtype: str
:return: Generated ready-to-add tag section for newer tag.
"""
filtered_issues, filtered_pull_requests = \
self.filter_issues_for_tags(newer_tag, older_tag)
older_tag_name = older_tag["name"] if older_tag \
else self.detect_since_tag()
if not filtered_issues and not filtered_pull_requests:
# do not generate an unreleased section if it would be empty
return ""
return self.generate_log_for_tag(
filtered_pull_requests, filtered_issues,
newer_tag, older_tag_name)
|
[
"def",
"generate_log_between_tags",
"(",
"self",
",",
"older_tag",
",",
"newer_tag",
")",
":",
"filtered_issues",
",",
"filtered_pull_requests",
"=",
"self",
".",
"filter_issues_for_tags",
"(",
"newer_tag",
",",
"older_tag",
")",
"older_tag_name",
"=",
"older_tag",
"[",
"\"name\"",
"]",
"if",
"older_tag",
"else",
"self",
".",
"detect_since_tag",
"(",
")",
"if",
"not",
"filtered_issues",
"and",
"not",
"filtered_pull_requests",
":",
"# do not generate an unreleased section if it would be empty",
"return",
"\"\"",
"return",
"self",
".",
"generate_log_for_tag",
"(",
"filtered_pull_requests",
",",
"filtered_issues",
",",
"newer_tag",
",",
"older_tag_name",
")"
] | 43.423077 | 21.192308 |
def get_port_switch_bindings(port_id, switch_ip):
"""List all vm/vlan bindings on a Nexus switch port."""
LOG.debug("get_port_switch_bindings() called, "
"port:'%(port_id)s', switch:'%(switch_ip)s'",
{'port_id': port_id, 'switch_ip': switch_ip})
try:
return _lookup_all_nexus_bindings(port_id=port_id,
switch_ip=switch_ip)
except c_exc.NexusPortBindingNotFound:
pass
|
[
"def",
"get_port_switch_bindings",
"(",
"port_id",
",",
"switch_ip",
")",
":",
"LOG",
".",
"debug",
"(",
"\"get_port_switch_bindings() called, \"",
"\"port:'%(port_id)s', switch:'%(switch_ip)s'\"",
",",
"{",
"'port_id'",
":",
"port_id",
",",
"'switch_ip'",
":",
"switch_ip",
"}",
")",
"try",
":",
"return",
"_lookup_all_nexus_bindings",
"(",
"port_id",
"=",
"port_id",
",",
"switch_ip",
"=",
"switch_ip",
")",
"except",
"c_exc",
".",
"NexusPortBindingNotFound",
":",
"pass"
] | 45.9 | 16 |
def _titan_cn_file(cnr_file, work_dir, data):
"""Convert CNVkit or GATK4 normalized input into TitanCNA ready format.
"""
out_file = os.path.join(work_dir, "%s.cn" % (utils.splitext_plus(os.path.basename(cnr_file))[0]))
support_cols = {"cnvkit": ["chromosome", "start", "end", "log2"],
"gatk-cnv": ["CONTIG", "START", "END", "LOG2_COPY_RATIO"]}
cols = support_cols[cnvkit.bin_approach(data)]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
iterator = pd.read_table(cnr_file, sep="\t", iterator=True, header=0, comment="@")
with open(tx_out_file, "w") as handle:
for chunk in iterator:
chunk = chunk[cols]
chunk.columns = ["chrom", "start", "end", "logR"]
if cnvkit.bin_approach(data) == "cnvkit":
chunk['start'] += 1
chunk.to_csv(handle, mode="a", sep="\t", index=False)
return out_file
|
[
"def",
"_titan_cn_file",
"(",
"cnr_file",
",",
"work_dir",
",",
"data",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s.cn\"",
"%",
"(",
"utils",
".",
"splitext_plus",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"cnr_file",
")",
")",
"[",
"0",
"]",
")",
")",
"support_cols",
"=",
"{",
"\"cnvkit\"",
":",
"[",
"\"chromosome\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"\"log2\"",
"]",
",",
"\"gatk-cnv\"",
":",
"[",
"\"CONTIG\"",
",",
"\"START\"",
",",
"\"END\"",
",",
"\"LOG2_COPY_RATIO\"",
"]",
"}",
"cols",
"=",
"support_cols",
"[",
"cnvkit",
".",
"bin_approach",
"(",
"data",
")",
"]",
"if",
"not",
"utils",
".",
"file_uptodate",
"(",
"out_file",
",",
"cnr_file",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"iterator",
"=",
"pd",
".",
"read_table",
"(",
"cnr_file",
",",
"sep",
"=",
"\"\\t\"",
",",
"iterator",
"=",
"True",
",",
"header",
"=",
"0",
",",
"comment",
"=",
"\"@\"",
")",
"with",
"open",
"(",
"tx_out_file",
",",
"\"w\"",
")",
"as",
"handle",
":",
"for",
"chunk",
"in",
"iterator",
":",
"chunk",
"=",
"chunk",
"[",
"cols",
"]",
"chunk",
".",
"columns",
"=",
"[",
"\"chrom\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"\"logR\"",
"]",
"if",
"cnvkit",
".",
"bin_approach",
"(",
"data",
")",
"==",
"\"cnvkit\"",
":",
"chunk",
"[",
"'start'",
"]",
"+=",
"1",
"chunk",
".",
"to_csv",
"(",
"handle",
",",
"mode",
"=",
"\"a\"",
",",
"sep",
"=",
"\"\\t\"",
",",
"index",
"=",
"False",
")",
"return",
"out_file"
] | 56.833333 | 19.388889 |
def generate(self, output_dir, minimum_size):
"""Generates sequence reports and writes them to the output directory.
:param output_dir: directory to output reports to
:type output_dir: `str`
:param minimum_size: minimum size of n-grams to create sequences for
:type minimum_size: `int`
"""
self._output_dir = output_dir
# Get a list of the files in the matches, grouped by label
# (ordered by number of works).
labels = list(self._matches.groupby([constants.LABEL_FIELDNAME])[
constants.WORK_FIELDNAME].nunique().index)
original_ngrams = self._matches[
self._matches[
constants.SIZE_FIELDNAME] >= minimum_size].sort_values(
by=constants.SIZE_FIELDNAME, ascending=False)[
constants.NGRAM_FIELDNAME].unique()
ngrams = []
for original_ngram in original_ngrams:
ngrams.append(self._get_text(Text(original_ngram,
self._tokenizer)))
# Generate sequences for each witness in every combination of
# (different) labels.
for index, primary_label in enumerate(labels):
for secondary_label in labels[index+1:]:
self._generate_sequences(primary_label, secondary_label,
ngrams)
|
[
"def",
"generate",
"(",
"self",
",",
"output_dir",
",",
"minimum_size",
")",
":",
"self",
".",
"_output_dir",
"=",
"output_dir",
"# Get a list of the files in the matches, grouped by label",
"# (ordered by number of works).",
"labels",
"=",
"list",
"(",
"self",
".",
"_matches",
".",
"groupby",
"(",
"[",
"constants",
".",
"LABEL_FIELDNAME",
"]",
")",
"[",
"constants",
".",
"WORK_FIELDNAME",
"]",
".",
"nunique",
"(",
")",
".",
"index",
")",
"original_ngrams",
"=",
"self",
".",
"_matches",
"[",
"self",
".",
"_matches",
"[",
"constants",
".",
"SIZE_FIELDNAME",
"]",
">=",
"minimum_size",
"]",
".",
"sort_values",
"(",
"by",
"=",
"constants",
".",
"SIZE_FIELDNAME",
",",
"ascending",
"=",
"False",
")",
"[",
"constants",
".",
"NGRAM_FIELDNAME",
"]",
".",
"unique",
"(",
")",
"ngrams",
"=",
"[",
"]",
"for",
"original_ngram",
"in",
"original_ngrams",
":",
"ngrams",
".",
"append",
"(",
"self",
".",
"_get_text",
"(",
"Text",
"(",
"original_ngram",
",",
"self",
".",
"_tokenizer",
")",
")",
")",
"# Generate sequences for each witness in every combination of",
"# (different) labels.",
"for",
"index",
",",
"primary_label",
"in",
"enumerate",
"(",
"labels",
")",
":",
"for",
"secondary_label",
"in",
"labels",
"[",
"index",
"+",
"1",
":",
"]",
":",
"self",
".",
"_generate_sequences",
"(",
"primary_label",
",",
"secondary_label",
",",
"ngrams",
")"
] | 47.172414 | 16.931034 |
def factory(opts, **kwargs):
'''
Creates and returns the cache class.
If memory caching is enabled by opts MemCache class will be instantiated.
If not Cache class will be returned.
'''
if opts.get('memcache_expire_seconds', 0):
cls = MemCache
else:
cls = Cache
return cls(opts, **kwargs)
|
[
"def",
"factory",
"(",
"opts",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"opts",
".",
"get",
"(",
"'memcache_expire_seconds'",
",",
"0",
")",
":",
"cls",
"=",
"MemCache",
"else",
":",
"cls",
"=",
"Cache",
"return",
"cls",
"(",
"opts",
",",
"*",
"*",
"kwargs",
")"
] | 29.545455 | 18.272727 |
def assertDateTimesLagEqual(self, sequence, lag, msg=None):
'''Fail unless max element in ``sequence`` is separated from
the present by ``lag`` as determined by the '==' operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertEqual(target - max(sequence), lag, msg=msg)
|
[
"def",
"assertDateTimesLagEqual",
"(",
"self",
",",
"sequence",
",",
"lag",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"sequence",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not iterable'",
")",
"if",
"not",
"isinstance",
"(",
"lag",
",",
"timedelta",
")",
":",
"raise",
"TypeError",
"(",
"'Second argument is not a timedelta object'",
")",
"# Cannot compare datetime to date, so if dates are provided use",
"# date.today(), if datetimes are provided use datetime.today()",
"if",
"isinstance",
"(",
"max",
"(",
"sequence",
")",
",",
"datetime",
")",
":",
"target",
"=",
"datetime",
".",
"today",
"(",
")",
"elif",
"isinstance",
"(",
"max",
"(",
"sequence",
")",
",",
"date",
")",
":",
"target",
"=",
"date",
".",
"today",
"(",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Expected iterable of datetime or date objects'",
")",
"self",
".",
"assertEqual",
"(",
"target",
"-",
"max",
"(",
"sequence",
")",
",",
"lag",
",",
"msg",
"=",
"msg",
")"
] | 36.977273 | 22.295455 |
def on_background_source(self, *args):
"""When I get a new ``background_source``, load it as an
:class:`Image` and store that in ``background_image``.
"""
if self.background_source:
self.background_image = Image(source=self.background_source)
|
[
"def",
"on_background_source",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"background_source",
":",
"self",
".",
"background_image",
"=",
"Image",
"(",
"source",
"=",
"self",
".",
"background_source",
")"
] | 40.142857 | 14.571429 |
def fromTerm(cls, term):
"""Create a functor from a Term or term handle."""
if isinstance(term, Term):
term = term.handle
elif not isinstance(term, (c_void_p, int)):
raise ArgumentTypeError((str(Term), str(int)), str(type(term)))
f = functor_t()
if PL_get_functor(term, byref(f)):
# get args
args = []
arity = PL_functor_arity(f.value)
# let's have all args be consecutive
a0 = PL_new_term_refs(arity)
for i, a in enumerate(range(1, arity + 1)):
if PL_get_arg(a, term, a0 + i):
args.append(getTerm(a0 + i))
return cls(f.value, args=args, a0=a0)
|
[
"def",
"fromTerm",
"(",
"cls",
",",
"term",
")",
":",
"if",
"isinstance",
"(",
"term",
",",
"Term",
")",
":",
"term",
"=",
"term",
".",
"handle",
"elif",
"not",
"isinstance",
"(",
"term",
",",
"(",
"c_void_p",
",",
"int",
")",
")",
":",
"raise",
"ArgumentTypeError",
"(",
"(",
"str",
"(",
"Term",
")",
",",
"str",
"(",
"int",
")",
")",
",",
"str",
"(",
"type",
"(",
"term",
")",
")",
")",
"f",
"=",
"functor_t",
"(",
")",
"if",
"PL_get_functor",
"(",
"term",
",",
"byref",
"(",
"f",
")",
")",
":",
"# get args",
"args",
"=",
"[",
"]",
"arity",
"=",
"PL_functor_arity",
"(",
"f",
".",
"value",
")",
"# let's have all args be consecutive",
"a0",
"=",
"PL_new_term_refs",
"(",
"arity",
")",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"range",
"(",
"1",
",",
"arity",
"+",
"1",
")",
")",
":",
"if",
"PL_get_arg",
"(",
"a",
",",
"term",
",",
"a0",
"+",
"i",
")",
":",
"args",
".",
"append",
"(",
"getTerm",
"(",
"a0",
"+",
"i",
")",
")",
"return",
"cls",
"(",
"f",
".",
"value",
",",
"args",
"=",
"args",
",",
"a0",
"=",
"a0",
")"
] | 35.6 | 15.3 |
def update(self):
"""
Updates the bundle
"""
with self._lock:
# Was it active ?
restart = self._state == Bundle.ACTIVE
# Send the update event
self._fire_bundle_event(BundleEvent.UPDATE_BEGIN)
try:
# Stop the bundle
self.stop()
except:
# Something wrong occurred, notify listeners
self._fire_bundle_event(BundleEvent.UPDATE_FAILED)
raise
# Change the source file age
module_stat = None
module_file = getattr(self.__module, "__file__", None)
if module_file is not None and os.path.isfile(module_file):
try:
module_stat = os.stat(module_file)
# Change modification time to bypass weak time resolution
# of the underlying file system
os.utime(
module_file,
(module_stat.st_atime, module_stat.st_mtime + 1),
)
except OSError:
# Can't touch the file
_logger.warning(
"Failed to update the modification time of '%s'. "
"The bundle update might not reflect the latest "
"changes.",
module_file,
)
# Clean up the module constants (otherwise kept by reload)
# Keep special members (__name__, __file__, ...)
old_content = self.__module.__dict__.copy()
for name in list(self.__module.__dict__):
if not (name.startswith("__") and name.endswith("__")):
del self.__module.__dict__[name]
try:
# Reload the module
reload_module(self.__module)
except (ImportError, SyntaxError) as ex:
# Exception raised if the file is unreadable
_logger.exception("Error updating %s: %s", self.__name, ex)
# Reset module content
self.__module.__dict__.clear()
self.__module.__dict__.update(old_content)
if module_stat is not None:
try:
# Reset times
os.utime(
module_file,
(module_stat.st_atime, module_stat.st_mtime),
)
except OSError:
# Shouldn't occur, since we succeeded before the update
_logger.debug(
"Failed to reset the modification time of '%s'",
module_file,
)
if restart:
try:
# Re-start the bundle
self.start()
except:
# Something wrong occurred, notify listeners
self._fire_bundle_event(BundleEvent.UPDATE_FAILED)
raise
# Bundle update finished
self._fire_bundle_event(BundleEvent.UPDATED)
|
[
"def",
"update",
"(",
"self",
")",
":",
"with",
"self",
".",
"_lock",
":",
"# Was it active ?",
"restart",
"=",
"self",
".",
"_state",
"==",
"Bundle",
".",
"ACTIVE",
"# Send the update event",
"self",
".",
"_fire_bundle_event",
"(",
"BundleEvent",
".",
"UPDATE_BEGIN",
")",
"try",
":",
"# Stop the bundle",
"self",
".",
"stop",
"(",
")",
"except",
":",
"# Something wrong occurred, notify listeners",
"self",
".",
"_fire_bundle_event",
"(",
"BundleEvent",
".",
"UPDATE_FAILED",
")",
"raise",
"# Change the source file age",
"module_stat",
"=",
"None",
"module_file",
"=",
"getattr",
"(",
"self",
".",
"__module",
",",
"\"__file__\"",
",",
"None",
")",
"if",
"module_file",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"module_file",
")",
":",
"try",
":",
"module_stat",
"=",
"os",
".",
"stat",
"(",
"module_file",
")",
"# Change modification time to bypass weak time resolution",
"# of the underlying file system",
"os",
".",
"utime",
"(",
"module_file",
",",
"(",
"module_stat",
".",
"st_atime",
",",
"module_stat",
".",
"st_mtime",
"+",
"1",
")",
",",
")",
"except",
"OSError",
":",
"# Can't touch the file",
"_logger",
".",
"warning",
"(",
"\"Failed to update the modification time of '%s'. \"",
"\"The bundle update might not reflect the latest \"",
"\"changes.\"",
",",
"module_file",
",",
")",
"# Clean up the module constants (otherwise kept by reload)",
"# Keep special members (__name__, __file__, ...)",
"old_content",
"=",
"self",
".",
"__module",
".",
"__dict__",
".",
"copy",
"(",
")",
"for",
"name",
"in",
"list",
"(",
"self",
".",
"__module",
".",
"__dict__",
")",
":",
"if",
"not",
"(",
"name",
".",
"startswith",
"(",
"\"__\"",
")",
"and",
"name",
".",
"endswith",
"(",
"\"__\"",
")",
")",
":",
"del",
"self",
".",
"__module",
".",
"__dict__",
"[",
"name",
"]",
"try",
":",
"# Reload the module",
"reload_module",
"(",
"self",
".",
"__module",
")",
"except",
"(",
"ImportError",
",",
"SyntaxError",
")",
"as",
"ex",
":",
"# Exception raised if the file is unreadable",
"_logger",
".",
"exception",
"(",
"\"Error updating %s: %s\"",
",",
"self",
".",
"__name",
",",
"ex",
")",
"# Reset module content",
"self",
".",
"__module",
".",
"__dict__",
".",
"clear",
"(",
")",
"self",
".",
"__module",
".",
"__dict__",
".",
"update",
"(",
"old_content",
")",
"if",
"module_stat",
"is",
"not",
"None",
":",
"try",
":",
"# Reset times",
"os",
".",
"utime",
"(",
"module_file",
",",
"(",
"module_stat",
".",
"st_atime",
",",
"module_stat",
".",
"st_mtime",
")",
",",
")",
"except",
"OSError",
":",
"# Shouldn't occur, since we succeeded before the update",
"_logger",
".",
"debug",
"(",
"\"Failed to reset the modification time of '%s'\"",
",",
"module_file",
",",
")",
"if",
"restart",
":",
"try",
":",
"# Re-start the bundle",
"self",
".",
"start",
"(",
")",
"except",
":",
"# Something wrong occurred, notify listeners",
"self",
".",
"_fire_bundle_event",
"(",
"BundleEvent",
".",
"UPDATE_FAILED",
")",
"raise",
"# Bundle update finished",
"self",
".",
"_fire_bundle_event",
"(",
"BundleEvent",
".",
"UPDATED",
")"
] | 37.059524 | 18.464286 |
def search(self,q):
""" Search. """
import re
pattern = re.compile("%s" % q)
result = {}
for i in self.allstockno:
b = re.search(pattern, self.allstockno[i])
try:
b.group()
result[i] = self.allstockno[i]
except:
pass
return result
|
[
"def",
"search",
"(",
"self",
",",
"q",
")",
":",
"import",
"re",
"pattern",
"=",
"re",
".",
"compile",
"(",
"\"%s\"",
"%",
"q",
")",
"result",
"=",
"{",
"}",
"for",
"i",
"in",
"self",
".",
"allstockno",
":",
"b",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"self",
".",
"allstockno",
"[",
"i",
"]",
")",
"try",
":",
"b",
".",
"group",
"(",
")",
"result",
"[",
"i",
"]",
"=",
"self",
".",
"allstockno",
"[",
"i",
"]",
"except",
":",
"pass",
"return",
"result"
] | 20.285714 | 19.357143 |
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query):
"""
Return the request params we would send to the api.
"""
url, params = self._prepare_request(command, query)
return {
"url": url, "params": params, "files": files, "stream": use_long_polling,
"verify": True, # No self signed certificates. Telegram should be trustworthy anyway...
"timeout": request_timeout
}
|
[
"def",
"do",
"(",
"self",
",",
"command",
",",
"files",
"=",
"None",
",",
"use_long_polling",
"=",
"False",
",",
"request_timeout",
"=",
"None",
",",
"*",
"*",
"query",
")",
":",
"url",
",",
"params",
"=",
"self",
".",
"_prepare_request",
"(",
"command",
",",
"query",
")",
"return",
"{",
"\"url\"",
":",
"url",
",",
"\"params\"",
":",
"params",
",",
"\"files\"",
":",
"files",
",",
"\"stream\"",
":",
"use_long_polling",
",",
"\"verify\"",
":",
"True",
",",
"# No self signed certificates. Telegram should be trustworthy anyway...",
"\"timeout\"",
":",
"request_timeout",
"}"
] | 47.7 | 24.9 |
def add_logging_parser(main_parser):
"Build an argparse argument parser to parse the command line."
main_parser.set_defaults(setup_logging=set_logging_level)
verbosity_group = main_parser.add_mutually_exclusive_group(required=False)
verbosity_group.add_argument(
'--verbose',
'-v',
action='count',
help='Output more verbose logging. Can be specified multiple times.')
verbosity_group.add_argument(
'--quiet',
'-q',
action='count',
help='Output less information to the console during operation. Can be \
specified multiple times.')
main_parser.add_argument(
'--silence-urllib3',
action='store_true',
help='Silence urllib3 warnings. See '
'https://urllib3.readthedocs.org/en/latest/security.html for details.')
return verbosity_group
|
[
"def",
"add_logging_parser",
"(",
"main_parser",
")",
":",
"main_parser",
".",
"set_defaults",
"(",
"setup_logging",
"=",
"set_logging_level",
")",
"verbosity_group",
"=",
"main_parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"False",
")",
"verbosity_group",
".",
"add_argument",
"(",
"'--verbose'",
",",
"'-v'",
",",
"action",
"=",
"'count'",
",",
"help",
"=",
"'Output more verbose logging. Can be specified multiple times.'",
")",
"verbosity_group",
".",
"add_argument",
"(",
"'--quiet'",
",",
"'-q'",
",",
"action",
"=",
"'count'",
",",
"help",
"=",
"'Output less information to the console during operation. Can be \\\n specified multiple times.'",
")",
"main_parser",
".",
"add_argument",
"(",
"'--silence-urllib3'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Silence urllib3 warnings. See '",
"'https://urllib3.readthedocs.org/en/latest/security.html for details.'",
")",
"return",
"verbosity_group"
] | 33.88 | 22.52 |
def get_config(self):
"""Save configurations of metric. Can be recreated
from configs with metric.create(``**config``)
"""
config = self._kwargs.copy()
config.update({
'metric': self.__class__.__name__,
'name': self.name,
'output_names': self.output_names,
'label_names': self.label_names})
return config
|
[
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"self",
".",
"_kwargs",
".",
"copy",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'metric'",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"'name'",
":",
"self",
".",
"name",
",",
"'output_names'",
":",
"self",
".",
"output_names",
",",
"'label_names'",
":",
"self",
".",
"label_names",
"}",
")",
"return",
"config"
] | 35.454545 | 9 |
def add(self, *args):
"""
This function adds strings to the keyboard, while not exceeding row_width.
E.g. ReplyKeyboardMarkup#add("A", "B", "C") yields the json result {keyboard: [["A"], ["B"], ["C"]]}
when row_width is set to 1.
When row_width is set to 2, the following is the result of this function: {keyboard: [["A", "B"], ["C"]]}
See https://core.telegram.org/bots/api#replykeyboardmarkup
:param args: KeyboardButton to append to the keyboard
"""
i = 1
row = []
for button in args:
row.append(button.to_dic())
if i % self.row_width == 0:
self.keyboard.append(row)
row = []
i += 1
if len(row) > 0:
self.keyboard.append(row)
|
[
"def",
"add",
"(",
"self",
",",
"*",
"args",
")",
":",
"i",
"=",
"1",
"row",
"=",
"[",
"]",
"for",
"button",
"in",
"args",
":",
"row",
".",
"append",
"(",
"button",
".",
"to_dic",
"(",
")",
")",
"if",
"i",
"%",
"self",
".",
"row_width",
"==",
"0",
":",
"self",
".",
"keyboard",
".",
"append",
"(",
"row",
")",
"row",
"=",
"[",
"]",
"i",
"+=",
"1",
"if",
"len",
"(",
"row",
")",
">",
"0",
":",
"self",
".",
"keyboard",
".",
"append",
"(",
"row",
")"
] | 41.368421 | 19.894737 |
def _process_newline(self, char):
""" Process a newline character.
"""
state = self._state
# inside string, just append char to token
if state == self.ST_STRING:
self._token_chars.append(char)
else:
# otherwise, add new token
self._new_token()
self._line_no += 1 # update line counter
# finished with comment
if state == self.ST_COMMENT:
self._state = self.ST_TOKEN
|
[
"def",
"_process_newline",
"(",
"self",
",",
"char",
")",
":",
"state",
"=",
"self",
".",
"_state",
"# inside string, just append char to token",
"if",
"state",
"==",
"self",
".",
"ST_STRING",
":",
"self",
".",
"_token_chars",
".",
"append",
"(",
"char",
")",
"else",
":",
"# otherwise, add new token",
"self",
".",
"_new_token",
"(",
")",
"self",
".",
"_line_no",
"+=",
"1",
"# update line counter",
"# finished with comment",
"if",
"state",
"==",
"self",
".",
"ST_COMMENT",
":",
"self",
".",
"_state",
"=",
"self",
".",
"ST_TOKEN"
] | 26.5 | 14.444444 |
def files_have_same_point_format_id(las_files):
""" Returns true if all the files have the same points format id
"""
point_format_found = {las.header.point_format_id for las in las_files}
return len(point_format_found) == 1
|
[
"def",
"files_have_same_point_format_id",
"(",
"las_files",
")",
":",
"point_format_found",
"=",
"{",
"las",
".",
"header",
".",
"point_format_id",
"for",
"las",
"in",
"las_files",
"}",
"return",
"len",
"(",
"point_format_found",
")",
"==",
"1"
] | 47 | 8.4 |
def set_mask_selection(self, selection, value, fields=None):
"""Modify a selection of individual items, by providing a Boolean array of the
same shape as the array against which the selection is being made, where True
values indicate a selected item.
Parameters
----------
selection : ndarray, bool
A Boolean array of the same shape as the array against which the selection is
being made.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of items::
>>> sel = np.zeros_like(z, dtype=bool)
>>> sel[1, 1] = True
>>> sel[4, 4] = True
>>> z.set_mask_selection(sel, 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]])
For convenience, this functionality is also available via the `vindex` property.
E.g.::
>>> z.vindex[sel] = 2
>>> z[...]
array([[0, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 2]])
Notes
-----
Mask indexing is a form of vectorized or inner indexing, and is equivalent to
coordinate indexing. Internally the mask array is converted to coordinate
arrays by calling `np.nonzero`.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection,
get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,
set_coordinate_selection, vindex, oindex, __getitem__, __setitem__
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# setup indexer
indexer = MaskIndexer(selection, self)
self._set_selection(indexer, value, fields=fields)
|
[
"def",
"set_mask_selection",
"(",
"self",
",",
"selection",
",",
"value",
",",
"fields",
"=",
"None",
")",
":",
"# guard conditions",
"if",
"self",
".",
"_read_only",
":",
"err_read_only",
"(",
")",
"# refresh metadata",
"if",
"not",
"self",
".",
"_cache_metadata",
":",
"self",
".",
"_load_metadata_nosync",
"(",
")",
"# setup indexer",
"indexer",
"=",
"MaskIndexer",
"(",
"selection",
",",
"self",
")",
"self",
".",
"_set_selection",
"(",
"indexer",
",",
"value",
",",
"fields",
"=",
"fields",
")"
] | 32.648649 | 20.310811 |
def _compute_mean(self, C, g, mag, hypo_depth, dists, imt):
"""
Compute mean according to equation on Table 2, page 2275.
"""
delta = 0.00750 * 10 ** (0.507 * mag)
# computing R for different values of mag
if mag < 6.5:
R = np.sqrt(dists.rhypo ** 2 + delta ** 2)
else:
R = np.sqrt(dists.rrup ** 2 + delta ** 2)
mean = (
# 1st term
C['c1'] + C['c2'] * mag +
# 2nd term
C['c3'] * R -
# 3rd term
C['c4'] * np.log10(R) +
# 4th term
C['c5'] * hypo_depth
)
# convert from base 10 to base e
if imt == PGV():
mean = np.log(10 ** mean)
else:
# convert from cm/s**2 to g
mean = np.log((10 ** mean) * 1e-2 / g)
return mean
|
[
"def",
"_compute_mean",
"(",
"self",
",",
"C",
",",
"g",
",",
"mag",
",",
"hypo_depth",
",",
"dists",
",",
"imt",
")",
":",
"delta",
"=",
"0.00750",
"*",
"10",
"**",
"(",
"0.507",
"*",
"mag",
")",
"# computing R for different values of mag",
"if",
"mag",
"<",
"6.5",
":",
"R",
"=",
"np",
".",
"sqrt",
"(",
"dists",
".",
"rhypo",
"**",
"2",
"+",
"delta",
"**",
"2",
")",
"else",
":",
"R",
"=",
"np",
".",
"sqrt",
"(",
"dists",
".",
"rrup",
"**",
"2",
"+",
"delta",
"**",
"2",
")",
"mean",
"=",
"(",
"# 1st term",
"C",
"[",
"'c1'",
"]",
"+",
"C",
"[",
"'c2'",
"]",
"*",
"mag",
"+",
"# 2nd term",
"C",
"[",
"'c3'",
"]",
"*",
"R",
"-",
"# 3rd term",
"C",
"[",
"'c4'",
"]",
"*",
"np",
".",
"log10",
"(",
"R",
")",
"+",
"# 4th term",
"C",
"[",
"'c5'",
"]",
"*",
"hypo_depth",
")",
"# convert from base 10 to base e",
"if",
"imt",
"==",
"PGV",
"(",
")",
":",
"mean",
"=",
"np",
".",
"log",
"(",
"10",
"**",
"mean",
")",
"else",
":",
"# convert from cm/s**2 to g",
"mean",
"=",
"np",
".",
"log",
"(",
"(",
"10",
"**",
"mean",
")",
"*",
"1e-2",
"/",
"g",
")",
"return",
"mean"
] | 28.166667 | 16.233333 |
def request_sensor_list(self, req, msg):
"""Request the list of sensors.
The list of sensors is sent as a sequence of #sensor-list informs.
Parameters
----------
name : str, optional
Name of the sensor to list (the default is to list all sensors).
If name starts and ends with '/' it is treated as a regular
expression and all sensors whose names contain the regular
expression are returned.
Informs
-------
name : str
The name of the sensor being described.
description : str
Description of the named sensor.
units : str
Units for the value of the named sensor.
type : str
Type of the named sensor.
params : list of str, optional
Additional sensor parameters (type dependent). For integer and
float sensors the additional parameters are the minimum and maximum
sensor value. For discrete sensors the additional parameters are
the allowed values. For all other types no additional parameters
are sent.
Returns
-------
success : {'ok', 'fail'}
Whether sending the sensor list succeeded.
informs : int
Number of #sensor-list inform messages sent.
Examples
--------
::
?sensor-list
#sensor-list psu.voltage PSU\_voltage. V float 0.0 5.0
#sensor-list cpu.status CPU\_status. \@ discrete on off error
...
!sensor-list ok 5
?sensor-list cpu.power.on
#sensor-list cpu.power.on Whether\_CPU\_hase\_power. \@ boolean
!sensor-list ok 1
?sensor-list /voltage/
#sensor-list psu.voltage PSU\_voltage. V float 0.0 5.0
#sensor-list cpu.voltage CPU\_voltage. V float 0.0 3.0
!sensor-list ok 2
"""
exact, name_filter = construct_name_filter(msg.arguments[0]
if msg.arguments else None)
sensors = [(name, sensor) for name, sensor in
sorted(self._sensors.iteritems()) if name_filter(name)]
if exact and not sensors:
return req.make_reply("fail", "Unknown sensor name.")
self._send_sensor_value_informs(req, sensors)
return req.make_reply("ok", str(len(sensors)))
|
[
"def",
"request_sensor_list",
"(",
"self",
",",
"req",
",",
"msg",
")",
":",
"exact",
",",
"name_filter",
"=",
"construct_name_filter",
"(",
"msg",
".",
"arguments",
"[",
"0",
"]",
"if",
"msg",
".",
"arguments",
"else",
"None",
")",
"sensors",
"=",
"[",
"(",
"name",
",",
"sensor",
")",
"for",
"name",
",",
"sensor",
"in",
"sorted",
"(",
"self",
".",
"_sensors",
".",
"iteritems",
"(",
")",
")",
"if",
"name_filter",
"(",
"name",
")",
"]",
"if",
"exact",
"and",
"not",
"sensors",
":",
"return",
"req",
".",
"make_reply",
"(",
"\"fail\"",
",",
"\"Unknown sensor name.\"",
")",
"self",
".",
"_send_sensor_value_informs",
"(",
"req",
",",
"sensors",
")",
"return",
"req",
".",
"make_reply",
"(",
"\"ok\"",
",",
"str",
"(",
"len",
"(",
"sensors",
")",
")",
")"
] | 35.731343 | 23.014925 |
def get_vhost(self, name):
"""
Details about an individual vhost.
:param name: The vhost name
:type name: str
"""
return self._api_get('/api/vhosts/{0}'.format(
urllib.parse.quote_plus(name)
))
|
[
"def",
"get_vhost",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/vhosts/{0}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
")"
] | 25.3 | 12.3 |
def split_string(x: str, n: int) -> List[str]:
"""
Split string into chunks of length n
"""
# https://stackoverflow.com/questions/9475241/split-string-every-nth-character # noqa
return [x[i:i+n] for i in range(0, len(x), n)]
|
[
"def",
"split_string",
"(",
"x",
":",
"str",
",",
"n",
":",
"int",
")",
"->",
"List",
"[",
"str",
"]",
":",
"# https://stackoverflow.com/questions/9475241/split-string-every-nth-character # noqa",
"return",
"[",
"x",
"[",
"i",
":",
"i",
"+",
"n",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"x",
")",
",",
"n",
")",
"]"
] | 40 | 11 |
def set_root(self, index):
"""Set the given index as root index of the combobox
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
if not index.isValid():
self.setCurrentIndex(-1)
return
if self.model() != index.model():
self.setModel(index.model())
self.setRootModelIndex(index)
if self.model().rowCount(index):
self.setCurrentIndex(0)
else:
self.setCurrentIndex(-1)
|
[
"def",
"set_root",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"index",
".",
"isValid",
"(",
")",
":",
"self",
".",
"setCurrentIndex",
"(",
"-",
"1",
")",
"return",
"if",
"self",
".",
"model",
"(",
")",
"!=",
"index",
".",
"model",
"(",
")",
":",
"self",
".",
"setModel",
"(",
"index",
".",
"model",
"(",
")",
")",
"self",
".",
"setRootModelIndex",
"(",
"index",
")",
"if",
"self",
".",
"model",
"(",
")",
".",
"rowCount",
"(",
"index",
")",
":",
"self",
".",
"setCurrentIndex",
"(",
"0",
")",
"else",
":",
"self",
".",
"setCurrentIndex",
"(",
"-",
"1",
")"
] | 29.789474 | 9.842105 |
def convert_via_profile(self, data_np, order, inprof_name, outprof_name):
"""Convert the given RGB data from the working ICC profile
to the output profile in-place.
Parameters
----------
data_np : ndarray
RGB image data to be displayed.
order : str
Order of channels in the data (e.g. "BGRA").
inprof_name, outprof_name : str
ICC profile names (see :func:`ginga.util.rgb_cms.get_profiles`).
"""
# get rest of necessary conversion parameters
to_intent = self.t_.get('icc_output_intent', 'perceptual')
proofprof_name = self.t_.get('icc_proof_profile', None)
proof_intent = self.t_.get('icc_proof_intent', 'perceptual')
use_black_pt = self.t_.get('icc_black_point_compensation', False)
try:
rgbobj = RGBMap.RGBPlanes(data_np, order)
arr_np = rgbobj.get_array('RGB')
arr = rgb_cms.convert_profile_fromto(arr_np, inprof_name, outprof_name,
to_intent=to_intent,
proof_name=proofprof_name,
proof_intent=proof_intent,
use_black_pt=use_black_pt,
logger=self.logger)
ri, gi, bi = rgbobj.get_order_indexes('RGB')
out = data_np
out[..., ri] = arr[..., 0]
out[..., gi] = arr[..., 1]
out[..., bi] = arr[..., 2]
self.logger.debug("Converted from '%s' to '%s' profile" % (
inprof_name, outprof_name))
except Exception as e:
self.logger.warning("Error converting output from working profile: %s" % (str(e)))
# TODO: maybe should have a traceback here
self.logger.info("Output left unprofiled")
|
[
"def",
"convert_via_profile",
"(",
"self",
",",
"data_np",
",",
"order",
",",
"inprof_name",
",",
"outprof_name",
")",
":",
"# get rest of necessary conversion parameters",
"to_intent",
"=",
"self",
".",
"t_",
".",
"get",
"(",
"'icc_output_intent'",
",",
"'perceptual'",
")",
"proofprof_name",
"=",
"self",
".",
"t_",
".",
"get",
"(",
"'icc_proof_profile'",
",",
"None",
")",
"proof_intent",
"=",
"self",
".",
"t_",
".",
"get",
"(",
"'icc_proof_intent'",
",",
"'perceptual'",
")",
"use_black_pt",
"=",
"self",
".",
"t_",
".",
"get",
"(",
"'icc_black_point_compensation'",
",",
"False",
")",
"try",
":",
"rgbobj",
"=",
"RGBMap",
".",
"RGBPlanes",
"(",
"data_np",
",",
"order",
")",
"arr_np",
"=",
"rgbobj",
".",
"get_array",
"(",
"'RGB'",
")",
"arr",
"=",
"rgb_cms",
".",
"convert_profile_fromto",
"(",
"arr_np",
",",
"inprof_name",
",",
"outprof_name",
",",
"to_intent",
"=",
"to_intent",
",",
"proof_name",
"=",
"proofprof_name",
",",
"proof_intent",
"=",
"proof_intent",
",",
"use_black_pt",
"=",
"use_black_pt",
",",
"logger",
"=",
"self",
".",
"logger",
")",
"ri",
",",
"gi",
",",
"bi",
"=",
"rgbobj",
".",
"get_order_indexes",
"(",
"'RGB'",
")",
"out",
"=",
"data_np",
"out",
"[",
"...",
",",
"ri",
"]",
"=",
"arr",
"[",
"...",
",",
"0",
"]",
"out",
"[",
"...",
",",
"gi",
"]",
"=",
"arr",
"[",
"...",
",",
"1",
"]",
"out",
"[",
"...",
",",
"bi",
"]",
"=",
"arr",
"[",
"...",
",",
"2",
"]",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Converted from '%s' to '%s' profile\"",
"%",
"(",
"inprof_name",
",",
"outprof_name",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Error converting output from working profile: %s\"",
"%",
"(",
"str",
"(",
"e",
")",
")",
")",
"# TODO: maybe should have a traceback here",
"self",
".",
"logger",
".",
"info",
"(",
"\"Output left unprofiled\"",
")"
] | 41.326087 | 23.173913 |
def saveXml(self, xml):
"""
Saves the settings for this edit to the xml parent.
:param xparent | <xml.etree.ElementTree>
"""
# save grouping
xtree = ElementTree.SubElement(xml, 'tree')
self.uiRecordTREE.saveXml(xtree)
# save the query
query = self.query()
if query:
query.toXml(ElementTree.SubElement(xml, 'query'))
|
[
"def",
"saveXml",
"(",
"self",
",",
"xml",
")",
":",
"# save grouping\r",
"xtree",
"=",
"ElementTree",
".",
"SubElement",
"(",
"xml",
",",
"'tree'",
")",
"self",
".",
"uiRecordTREE",
".",
"saveXml",
"(",
"xtree",
")",
"# save the query\r",
"query",
"=",
"self",
".",
"query",
"(",
")",
"if",
"query",
":",
"query",
".",
"toXml",
"(",
"ElementTree",
".",
"SubElement",
"(",
"xml",
",",
"'query'",
")",
")"
] | 30.714286 | 15 |
def createdb(args):
"""
cldf createdb <DATASET> <SQLITE_DB_PATH>
Load CLDF dataset <DATASET> into a SQLite DB, where <DATASET> may be the path to
- a CLDF metadata file
- a CLDF core data file
"""
if len(args.args) < 2:
raise ParserError('not enough arguments')
ds = _get_dataset(args)
db = Database(ds, fname=args.args[1])
db.write_from_tg()
args.log.info('{0} loaded in {1}'.format(ds, db.fname))
|
[
"def",
"createdb",
"(",
"args",
")",
":",
"if",
"len",
"(",
"args",
".",
"args",
")",
"<",
"2",
":",
"raise",
"ParserError",
"(",
"'not enough arguments'",
")",
"ds",
"=",
"_get_dataset",
"(",
"args",
")",
"db",
"=",
"Database",
"(",
"ds",
",",
"fname",
"=",
"args",
".",
"args",
"[",
"1",
"]",
")",
"db",
".",
"write_from_tg",
"(",
")",
"args",
".",
"log",
".",
"info",
"(",
"'{0} loaded in {1}'",
".",
"format",
"(",
"ds",
",",
"db",
".",
"fname",
")",
")"
] | 31.285714 | 15 |
def _pythonized_comments(tokens):
"""
Similar to tokens but converts strings after a colon (:) to comments.
"""
is_after_colon = True
for token_type, token_text in tokens:
if is_after_colon and (token_type in pygments.token.String):
token_type = pygments.token.Comment
elif token_text == ':':
is_after_colon = True
elif token_type not in pygments.token.Comment:
is_whitespace = len(token_text.rstrip(' \f\n\r\t')) == 0
if not is_whitespace:
is_after_colon = False
yield token_type, token_text
|
[
"def",
"_pythonized_comments",
"(",
"tokens",
")",
":",
"is_after_colon",
"=",
"True",
"for",
"token_type",
",",
"token_text",
"in",
"tokens",
":",
"if",
"is_after_colon",
"and",
"(",
"token_type",
"in",
"pygments",
".",
"token",
".",
"String",
")",
":",
"token_type",
"=",
"pygments",
".",
"token",
".",
"Comment",
"elif",
"token_text",
"==",
"':'",
":",
"is_after_colon",
"=",
"True",
"elif",
"token_type",
"not",
"in",
"pygments",
".",
"token",
".",
"Comment",
":",
"is_whitespace",
"=",
"len",
"(",
"token_text",
".",
"rstrip",
"(",
"' \\f\\n\\r\\t'",
")",
")",
"==",
"0",
"if",
"not",
"is_whitespace",
":",
"is_after_colon",
"=",
"False",
"yield",
"token_type",
",",
"token_text"
] | 39.6 | 10.8 |
def get_render_data(self, **kwargs):
"""
Because of the way mixin inheritance works
we can't have a default implementation of
get_context_data on the this class, so this
calls that method if available and returns
the resulting context.
"""
if hasattr(self, 'get_context_data'):
data = self.get_context_data(**kwargs)
else:
data = kwargs
return data
|
[
"def",
"get_render_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'get_context_data'",
")",
":",
"data",
"=",
"self",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"data",
"=",
"kwargs",
"return",
"data"
] | 33.846154 | 10.153846 |
def value_counts(arg, metric_name='count'):
"""
Compute a frequency table for this value expression
Parameters
----------
Returns
-------
counts : TableExpr
Aggregated table
"""
base = ir.find_base_table(arg)
metric = base.count().name(metric_name)
try:
arg.get_name()
except com.ExpressionError:
arg = arg.name('unnamed')
return base.group_by(arg).aggregate(metric)
|
[
"def",
"value_counts",
"(",
"arg",
",",
"metric_name",
"=",
"'count'",
")",
":",
"base",
"=",
"ir",
".",
"find_base_table",
"(",
"arg",
")",
"metric",
"=",
"base",
".",
"count",
"(",
")",
".",
"name",
"(",
"metric_name",
")",
"try",
":",
"arg",
".",
"get_name",
"(",
")",
"except",
"com",
".",
"ExpressionError",
":",
"arg",
"=",
"arg",
".",
"name",
"(",
"'unnamed'",
")",
"return",
"base",
".",
"group_by",
"(",
"arg",
")",
".",
"aggregate",
"(",
"metric",
")"
] | 20.190476 | 19.333333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.