content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def run_help_command():
"""Execute the 'help' subcommand."""
if len(sys.argv) < 3:
help.print_help(sys.stdout)
sys.exit(0)
else:
help_command = sys.argv[2]
if help_command == 'review':
help.print_review_help(sys.stdout)
sys.exit(0)
elif help_command == 'decks':
help.print_decks_help(sys.stdout)
sys.exit(0)
elif help_command == 'import':
help.print_import_help(sys.stdout)
sys.exit(0)
elif help_command == 'update-decks':
help.print_update_decks_help(sys.stdout)
sys.exit(0)
elif help_command == 'update-deck':
help.print_update_deck_help(sys.stdout)
sys.exit(0)
elif help_command == 'remove':
help.print_remove_help(sys.stdout)
sys.exit(0)
elif help_command == 'serve':
help.print_serve_help(sys.stdout)
sys.exit(0)
else:
sys.stderr.write(
"Error: no help for unknown command '%s'\n" % help_command
)
exe = os.path.basename(sys.argv[0])
sys.stderr.write("Try '%s help'\n" % exe)
sys.exit(1) | 5,355,900 |
def calc_and_save_eta(steps, time, start, i, epoch, num_epochs, filename):
"""
Estimates the time remaining based on the elapsed time and epochs
:param steps: number of steps in an epoch
:param time: current time
:param start: start time
:param i: iteration through this epoch
:param epoch: epoch number
:param num_epochs: total no. of epochs
:param filename: the filename to save
"""
elap = time - start
progress = epoch * steps + i + 1
rem = num_epochs * steps - progress
ETA = rem / progress * elap
hrs = int(ETA / 3600)
minutes = int((ETA / 3600 % 1) * 60)
# save_res = np.array([epoch, num_epochs, i, steps, hrs, minutes])
# np.save(progress_dir + filename, save_res)
print('[%d/%d][%d/%d]\tETA: %d hrs %d mins'
% (epoch, num_epochs, i, steps,
hrs, minutes)) | 5,355,901 |
def main():
""" Пример использования bots longpoll
https://vk.com/dev/bots_longpoll
"""
vk_session = vk_api.VkApi(token='your_group_token')
longpoll = VkBotLongPoll(vk_session, 'your_group_id')
for event in longpoll.listen():
if event.type == VkBotEventType.MESSAGE_NEW:
print('Новое сообщение:')
print('Для меня от: ', end='')
print(event.obj.from_id)
print('Текст:', event.obj.text)
print()
elif event.type == VkBotEventType.MESSAGE_REPLY:
print('Новое сообщение:')
print('От меня для: ', end='')
print(event.obj.peer_id)
print('Текст:', event.obj.text)
print()
elif event.type == VkBotEventType.MESSAGE_TYPING_STATE:
print('Печатает ', end='')
print(event.obj.from_id, end=' ')
print('для ', end='')
print(event.obj.to_id)
print()
elif event.type == VkBotEventType.GROUP_JOIN:
print(event.obj.user_id, end=' ')
print('Вступил в группу!')
print()
elif event.type == VkBotEventType.GROUP_LEAVE:
print(event.obj.user_id, end=' ')
print('Покинул группу!')
print()
else:
print(event.type)
print() | 5,355,902 |
def load_json():
"""Load the translation dictionary."""
try:
with open(JSON_FILENAME, "r", encoding="utf8") as file:
known_names = json.load(file)
if "version" in known_names:
if known_names.get("version") < JSON_VERSION:
print("Unkown version: {}, current version: {}".format(
known_names.get("version"), JSON_VERSION))
raise Exception(
"Version mismatch. Backup the file and recreate.")
else:
print("No version number found")
known_names = {}
except FileNotFoundError:
known_names = {}
return known_names | 5,355,903 |
def get_username() -> str:
"""
Prompts the user to enter a username and then returns it
:return: The username entered by the user
"""
while True:
print("Please enter your username (without spaces)")
username = input().strip()
if ' ' not in username:
return username | 5,355,904 |
def test_dissimilarity_fn():
"""
Testing computed dissimilarity function by comparing to precomputed, the dissimilarity function can be either normalized cross correlation or sum square error function.
"""
# lncc diff images
tensor_true = np.array(range(12)).reshape((2, 1, 2, 3))
tensor_pred = 0.6 * np.ones((2, 1, 2, 3))
tensor_true = tf.convert_to_tensor(tensor_true, dtype=tf.float32)
tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32)
name_ncc = "lncc"
get_ncc = image.dissimilarity_fn(tensor_true, tensor_pred, name_ncc)
expect_ncc = [-0.68002254, -0.9608879]
assert is_equal_tf(get_ncc, expect_ncc)
# ssd diff images
tensor_true1 = np.zeros((2, 1, 2, 3))
tensor_pred1 = 0.6 * np.ones((2, 1, 2, 3))
tensor_true1 = tf.convert_to_tensor(tensor_true1, dtype=tf.float32)
tensor_pred1 = tf.convert_to_tensor(tensor_pred1, dtype=tf.float32)
name_ssd = "ssd"
get_ssd = image.dissimilarity_fn(tensor_true1, tensor_pred1, name_ssd)
expect_ssd = [0.36, 0.36]
assert is_equal_tf(get_ssd, expect_ssd)
# TODO gmi diff images
# lncc same image
get_zero_similarity_ncc = image.dissimilarity_fn(
tensor_pred1, tensor_pred1, name_ncc
)
assert is_equal_tf(get_zero_similarity_ncc, [-1, -1])
# ssd same image
get_zero_similarity_ssd = image.dissimilarity_fn(
tensor_true1, tensor_true1, name_ssd
)
assert is_equal_tf(get_zero_similarity_ssd, [0, 0])
# gmi same image
t = tf.ones([4, 3, 3, 3])
get_zero_similarity_gmi = image.dissimilarity_fn(t, t, "gmi")
assert is_equal_tf(get_zero_similarity_gmi, [0, 0, 0, 0])
# unknown func name
with pytest.raises(AssertionError):
image.dissimilarity_fn(
tensor_true1, tensor_pred1, "some random string that isn't ssd or lncc"
) | 5,355,905 |
def fixture_set_token(monkeypatch: typing.Any, token: str) -> None:
"""Set the token environment variable."""
monkeypatch.setenv("LABELS_TOKEN", token) | 5,355,906 |
def remove__defjob(args):
"""
:param argparse.Namespace args: should supply all the command-line options
:rtype:
"""
req_url = apiurl(args, "/machines/{machine_id}/defjob/".format(machine_id=args.id))
# print(req_url);
r = requests.delete(req_url)
if r.status_code == 200:
rj = r.json()
if rj["success"]:
print(
"default instance for machine {machine_id} removed.".format(
machine_id=args.id
)
)
else:
print(rj["msg"])
else:
print(r.text)
print("failed with error {r.status_code}".format(**locals())) | 5,355,907 |
def test_error_repr() -> None:
"""It has a string representation."""
content = ["foo", "bar"]
traceback = error.Traceback(content)
expected_output = f"Traceback(content={content})"
output = repr(traceback)
assert output == expected_output | 5,355,908 |
def condition(f):
"""
Decorator for conditions
"""
@wraps(f)
def try_execute(*args, **kwargs):
try:
res, m = f(*args, **kwargs)
m.conditions_results.append(res)
return m
except Exception as e:
raise ConditionError(e)
return try_execute | 5,355,909 |
def get_spatial_anomalies(
coarse_obs_path, fine_obs_rechunked_path, variable, connection_string
) -> xr.Dataset:
"""Calculate the seasonal cycle (12 timesteps) spatial anomaly associated
with aggregating the fine_obs to a given coarsened scale and then reinterpolating
it back to the original spatial resolution. The outputs of this function are
dependent on three parameters:
* a grid (as opposed to a specific GCM since some GCMs run on the same grid)
* the time period which fine_obs (and by construct coarse_obs) cover
* the variable
Parameters
----------
coarse_obs : xr.Dataset
Coarsened to a GCM resolution. Chunked along time.
fine_obs_rechunked_path : xr.Dataset
Original observation spatial resolution. Chunked along time.
variable: str
The variable included in the dataset.
Returns
-------
seasonal_cycle_spatial_anomalies : xr.Dataset
Spatial anomaly for each month (i.e. of shape (nlat, nlon, 12))
"""
# interpolate coarse_obs back to the original scale
[coarse_obs, fine_obs_rechunked] = load_paths([coarse_obs_path, fine_obs_rechunked_path])
obs_interpolated, _ = regrid_dataset(
ds=coarse_obs,
ds_path=coarse_obs_path,
target_grid_ds=fine_obs_rechunked.isel(time=0),
variable=variable,
connection_string=connection_string,
)
# use rechunked fine_obs from coarsening step above because that is in map chunks so it
# will play nice with the interpolated obs
schema_maps_chunks.validate(fine_obs_rechunked[variable])
# calculate difference between interpolated obs and the original obs
spatial_anomalies = obs_interpolated - fine_obs_rechunked
# calculate seasonal cycle (12 time points)
seasonal_cycle_spatial_anomalies = spatial_anomalies.groupby("time.month").mean()
return seasonal_cycle_spatial_anomalies | 5,355,910 |
def get_pip_package_name(provider_package_id: str) -> str:
"""
Returns PIP package name for the package id.
:param provider_package_id: id of the package
:return: the name of pip package
"""
return "apache-airflow-providers-" + provider_package_id.replace(".", "-") | 5,355,911 |
def test_second_phase(players):
"""Verify that the second phase of the algorithm produces a valid set of
players with appropriate matches."""
players = first_phase(players)
assume(any(len(p.prefs) > 1 for p in players))
with warnings.catch_warnings(record=True) as w:
players = second_phase(players)
for player in players:
if player.prefs:
assert player.prefs[0] == player.matching
else:
message = w[-1].message
assert isinstance(message, NoStableMatchingWarning)
assert str(player.name) in str(message)
assert player.matching is None | 5,355,912 |
def func_asymmetry_f_b(z, flag_z: bool = False):
"""Function F_b(z) for asymmetry factor.
"""
f_a , dder_f_a = func_asymmetry_f_a(z, flag_z=flag_z)
res = 2*(2*numpy.square(z)-3)*f_a
dder = {}
if flag_z:
dder["z"] = 8 * z * f_a + 2*(2*numpy.square(z)-3)*dder_f_a["z"]
return res, dder | 5,355,913 |
def verify_codegen(
module, num_vitis_ai_modules=1, params=None, target="llvm", dpu_target="DPUCADX8G"
):
"""Check Vitis-AI codegen against a known good output."""
module = build_module(module, target, params=params, dpu_target=dpu_target)
vitis_ai_modules = extract_vitis_ai_modules(module)
assert len(vitis_ai_modules) == num_vitis_ai_modules, (
f"The number of Vitis-AI modules produced ({len(vitis_ai_modules)}) does not "
f"match the expected value ({num_vitis_ai_modules})."
) | 5,355,914 |
def momentum(state):
"""
solve for momentum for taup1
"""
vs = state.variables
"""
time tendency due to Coriolis force
"""
vs.update(tend_coriolisf(state))
"""
wind stress forcing
"""
vs.update(tend_tauxyf(state))
"""
advection
"""
vs.update(momentum_advection(state))
with state.timers["friction"]:
friction.friction(state)
"""
external mode
"""
with state.timers["pressure"]:
streamfunction.solve_streamfunction(state) | 5,355,915 |
def parse_args():
"""Command-line argument parser for generating scenes."""
# New parser
parser = ArgumentParser(description='Monte Carlo rendering generator')
# Rendering parameters
parser.add_argument('-t', '--tungsten', help='tungsten renderer full path', default='tungsten', type=str)
parser.add_argument('-d', '--scene-path', help='scene root path', type=str)
parser.add_argument('-r', '--resolution', help='image resolution (w, h)', nargs='+', type=int)
parser.add_argument('-s', '--spp', help='sample per pixel', default=16, type=int)
parser.add_argument('-n', '--nb-renders', help='number of renders', default=10, type=int)
parser.add_argument('--hdr-buffers', help='save buffers as hdr images', action='store_true')
parser.add_argument('--hdr-targets', help='save targets as hdr images', action='store_true')
parser.add_argument('-o', '--output-dir', help='output directory', default='../../data/renders', type=str)
return parser.parse_args() | 5,355,916 |
def unique_id(token_id):
"""Return a unique ID for a token.
The returned value is useful as the primary key of a database table,
memcache store, or other lookup table.
:returns: Given a PKI token, returns it's hashed value. Otherwise, returns
the passed-in value (such as a UUID token ID or an existing
hash).
"""
return cms.cms_hash_token(token_id) | 5,355,917 |
def encode_aval_types(df_param: pd.DataFrame, df_ret: pd.DataFrame, df_var: pd.DataFrame,
df_aval_types: pd.DataFrame):
"""
It encodes the type of parameters and return according to visible type hints
"""
types = df_aval_types['Types'].tolist()
def trans_aval_type(x):
for i, t in enumerate(types):
if x in t:
return i
return len(types) - 1
# If the arg type doesn't exist in top_n available types, we insert n + 1 into the vector as it represents the other type.
df_param['param_aval_enc'] = df_param['arg_type'].progress_apply(trans_aval_type)
df_ret['ret_aval_enc'] = df_ret['return_type'].progress_apply(trans_aval_type)
df_var['var_aval_enc'] = df_var['var_type'].progress_apply(trans_aval_type)
return df_param, df_ret | 5,355,918 |
def render_app_children(node: WxNode, context: WxRenderingContext):
"""Renders App children"""
render_children(node, context, lambda x, n, ctx: WxRenderingContext({
'xml_node': x,
'parent_node': n,
'node_globals': InheritedDict(node.node_globals)
})) | 5,355,919 |
def first(iterable, default=None):
"""
Returns the first item or a default value
>>> first(x for x in [1, 2, 3] if x % 2 == 0)
2
>>> first((x for x in [1, 2, 3] if x > 42), -1)
-1
"""
return next(iter(iterable), default) | 5,355,920 |
def generate_uuid_from_wf_data(wf_data: np.ndarray, decimals: int = 12) -> str:
"""
Creates a unique identifier from the waveform data, using a hash. Identical arrays
yield identical strings within the same process.
Parameters
----------
wf_data:
The data to generate the unique id for.
decimals:
The number of decimal places to consider.
Returns
-------
:
A unique identifier.
"""
waveform_hash = hash(wf_data.round(decimals=decimals).tobytes())
return str(waveform_hash) | 5,355,921 |
def get_date_of_x_cases_reached(df, x):
"""
Determines the date hit n number of cases were reached
:param df: pandas df
:param x {int}: number of cases
"""
pass | 5,355,922 |
def display_quantiles(prediction_list, prediction_length, target_ts=None):
"""
Display average prediction value with 80% confidence interval compared to
target values.
:param prediction_list: list of predictions for stock prices over time at 0.1, 0.5, 0.9 quantiles
:prediction_length: how far in the future we are trying to predict
:target_ts: the target time series to compare against
"""
# show predictions for all input ts
for k in range(len(prediction_list)):
plt.figure(figsize=(12,6))
# get the target month of data
if target_ts is not None:
target = target_ts[k][-prediction_length:]
plt.plot(range(len(target)), target, label='target')
# get the quantile values at 10 and 90%
p10 = prediction_list[k]['0.1']
p90 = prediction_list[k]['0.9']
# fill the 80% confidence interval
plt.fill_between(p10.index, p10, p90, color='y', alpha=0.5, label='80% confidence interval')
# plot the median prediction line
prediction_list[k]['0.5'].plot(label='prediction median')
plt.legend()
plt.show() | 5,355,923 |
def bridge(int, flag, unw, bridge, width, xmin='-', xmax='-', ymin='-', ymax='-', logpath=None, outdir=None,
shellscript=None):
"""
| Phase unwrap new regions with bridges to regions already unwrapped
| Copyright 2010, Gamma Remote Sensing, v1.5 clw 4-Nov-2010
Parameters
----------
int:
(input) interferogram (fcomplex)
flag:
(input) unwrapping flag file
unw:
(input/output) unwrapped phase (float)
bridge:
(input) bridge data file (text format)
width:
number of samples/row
xmin:
starting range pixel offset to unwrap (default = 0)
xmax:
last range pixel offset to unwrap (default=width-1)
ymin:
starting azimuth row offset to unwrap, relative to start (default = 0)
ymax:
last azimuth row offset to unwrap, relative to start (default = nlines-1)
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
"""
process(
['/usr/local/GAMMA_SOFTWARE-20180703/ISP/bin/bridge', int, flag, unw, bridge, width, xmin, xmax, ymin, ymax],
logpath=logpath, outdir=outdir, shellscript=shellscript) | 5,355,924 |
def all_codes(number):
"""
:param: number - input integer
Return - list() of all codes possible for this number
TODO: complete this method and return a list with all possible codes for the input number
"""
pass | 5,355,925 |
def aggregate(table, key, aggregation=None, value=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
"""Group rows under the given key then apply aggregation functions.
E.g.::
>>> import petl as etl
>>>
>>> table1 = [['foo', 'bar', 'baz'],
... ['a', 3, True],
... ['a', 7, False],
... ['b', 2, True],
... ['b', 2, False],
... ['b', 9, False],
... ['c', 4, True]]
>>> # aggregate whole rows
... table2 = etl.aggregate(table1, 'foo', len)
>>> table2
+-----+-------+
| foo | value |
+=====+=======+
| 'a' | 2 |
+-----+-------+
| 'b' | 3 |
+-----+-------+
| 'c' | 1 |
+-----+-------+
>>> # aggregate single field
... table3 = etl.aggregate(table1, 'foo', sum, 'bar')
>>> table3
+-----+-------+
| foo | value |
+=====+=======+
| 'a' | 10 |
+-----+-------+
| 'b' | 13 |
+-----+-------+
| 'c' | 4 |
+-----+-------+
>>> # alternative signature using keyword args
... table4 = etl.aggregate(table1, key=('foo', 'bar'),
... aggregation=list, value=('bar', 'baz'))
>>> table4
+-----+-----+-------------------------+
| foo | bar | value |
+=====+=====+=========================+
| 'a' | 3 | [(3, True)] |
+-----+-----+-------------------------+
| 'a' | 7 | [(7, False)] |
+-----+-----+-------------------------+
| 'b' | 2 | [(2, True), (2, False)] |
+-----+-----+-------------------------+
| 'b' | 9 | [(9, False)] |
+-----+-----+-------------------------+
| 'c' | 4 | [(4, True)] |
+-----+-----+-------------------------+
>>> # aggregate multiple fields
... from collections import OrderedDict
>>> import petl as etl
>>>
>>> aggregation = OrderedDict()
>>> aggregation['count'] = len
>>> aggregation['minbar'] = 'bar', min
>>> aggregation['maxbar'] = 'bar', max
>>> aggregation['sumbar'] = 'bar', sum
>>> # default aggregation function is list
... aggregation['listbar'] = 'bar'
>>> aggregation['listbarbaz'] = ('bar', 'baz'), list
>>> aggregation['bars'] = 'bar', etl.strjoin(', ')
>>> table5 = etl.aggregate(table1, 'foo', aggregation)
>>> table5
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| foo | count | minbar | maxbar | sumbar | listbar | listbarbaz | bars |
+=====+=======+========+========+========+===========+=====================================+===========+
| 'a' | 2 | 3 | 7 | 10 | [3, 7] | [(3, True), (7, False)] | '3, 7' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| 'b' | 3 | 2 | 9 | 13 | [2, 2, 9] | [(2, True), (2, False), (9, False)] | '2, 2, 9' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| 'c' | 1 | 4 | 4 | 4 | [4] | [(4, True)] | '4' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
"""
if callable(aggregation):
return SimpleAggregateView(table, key, aggregation=aggregation,
value=value, presorted=presorted,
buffersize=buffersize, tempdir=tempdir,
cache=cache)
elif aggregation is None or isinstance(aggregation, (list, tuple, dict)):
# ignore value arg
return MultiAggregateView(table, key, aggregation=aggregation,
presorted=presorted, buffersize=buffersize,
tempdir=tempdir, cache=cache)
else:
raise ArgumentError('expected aggregation is callable, list, tuple, dict '
'or None') | 5,355,926 |
def show_mpls_bypass_lsp_name_extensive_rpc(self, show_lsp_input_info=None, api_timeout=''):
"""
This is an auto-generated method for the PySwitchLib.
**Supported Versions**:
* SLXOS: 17r.1.01a, 17r.2.00, 17s.1.02
**Child Instance Keyword Argument Tuple(s)**:
:type show_lsp_input_info: lsp, lsp_wide, lsp_detail, lsp_extensive, lsp_debug, lsp_name, lsp_name_extensive, lsp_name_debug, bypass_lsp, bypass_lsp_wide, bypass_lsp_detail, bypass_lsp_extensive, bypass_lsp_debug, bypass_lsp_name, bypass_lsp_name_extensive, bypass_lsp_name_debug, bypass_lsp_static, bypass_lsp_static_wide, bypass_lsp_static_detail, bypass_lsp_static_extensive, bypass_lsp_static_debug, bypass_lsp_static_name, bypass_lsp_static_name_extensive, bypass_lsp_static_name_debug, bypass_lsp_dynamic, bypass_lsp_dynamic_wide, bypass_lsp_dynamic_detail, bypass_lsp_dynamic_extensive, bypass_lsp_dynamic_debug, bypass_lsp_dynamic_name, bypass_lsp_dynamic_name_extensive, bypass_lsp_dynamic_name_debug, lsp_input_lsp_name, lsp_input_bypass, lsp_input_dynamic, lsp_input_brief, lsp_input_wide, lsp_input_detail, lsp_input_extensive, lsp_input_debug, lsp_input_one, lsp_input_all, lsp_input_more
:param show_lsp_input_info: Keyword argument tuple.
:type lsp: unicode
:param lsp: **show_lsp_input_info** tuple argument.
:type lsp_wide: YANGBool
:param lsp_wide: **show_lsp_input_info** tuple argument.
:type lsp_detail: YANGBool
:param lsp_detail: **show_lsp_input_info** tuple argument.
:type lsp_extensive: YANGBool
:param lsp_extensive: **show_lsp_input_info** tuple argument.
:type lsp_debug: YANGBool
:param lsp_debug: **show_lsp_input_info** tuple argument.
:type lsp_name: unicode
:param lsp_name: **show_lsp_input_info** tuple argument.
:type lsp_name_extensive: YANGBool
:param lsp_name_extensive: **show_lsp_input_info** tuple argument.
:type lsp_name_debug: YANGBool
:param lsp_name_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp: unicode
:param bypass_lsp: **show_lsp_input_info** tuple argument.
:type bypass_lsp_wide: YANGBool
:param bypass_lsp_wide: **show_lsp_input_info** tuple argument.
:type bypass_lsp_detail: YANGBool
:param bypass_lsp_detail: **show_lsp_input_info** tuple argument.
:type bypass_lsp_extensive: YANGBool
:param bypass_lsp_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_debug: YANGBool
:param bypass_lsp_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_name: unicode
:param bypass_lsp_name: **show_lsp_input_info** tuple argument.
:type bypass_lsp_name_extensive: YANGBool
:param bypass_lsp_name_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_name_debug: YANGBool
:param bypass_lsp_name_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static: unicode
:param bypass_lsp_static: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_wide: YANGBool
:param bypass_lsp_static_wide: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_detail: YANGBool
:param bypass_lsp_static_detail: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_extensive: YANGBool
:param bypass_lsp_static_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_debug: YANGBool
:param bypass_lsp_static_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_name: unicode
:param bypass_lsp_static_name: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_name_extensive: YANGBool
:param bypass_lsp_static_name_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_name_debug: YANGBool
:param bypass_lsp_static_name_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic: unicode
:param bypass_lsp_dynamic: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_wide: YANGBool
:param bypass_lsp_dynamic_wide: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_detail: YANGBool
:param bypass_lsp_dynamic_detail: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_extensive: YANGBool
:param bypass_lsp_dynamic_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_debug: YANGBool
:param bypass_lsp_dynamic_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_name: unicode
:param bypass_lsp_dynamic_name: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_name_extensive: YANGBool
:param bypass_lsp_dynamic_name_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_name_debug: YANGBool
:param bypass_lsp_dynamic_name_debug: **show_lsp_input_info** tuple argument.
:type lsp_input_lsp_name: unicode
:param lsp_input_lsp_name: **show_lsp_input_info** tuple argument.
:type lsp_input_bypass: YANGBool
:param lsp_input_bypass: **show_lsp_input_info** tuple argument.
:type lsp_input_dynamic: YANGBool
:param lsp_input_dynamic: **show_lsp_input_info** tuple argument.
:type lsp_input_brief: YANGBool
:param lsp_input_brief: **show_lsp_input_info** tuple argument.
:type lsp_input_wide: YANGBool
:param lsp_input_wide: **show_lsp_input_info** tuple argument.
:type lsp_input_detail: YANGBool
:param lsp_input_detail: **show_lsp_input_info** tuple argument.
:type lsp_input_extensive: YANGBool
:param lsp_input_extensive: **show_lsp_input_info** tuple argument.
:type lsp_input_debug: YANGBool
:param lsp_input_debug: **show_lsp_input_info** tuple argument.
:type lsp_input_one: YANGBool
:param lsp_input_one: **show_lsp_input_info** tuple argument.
:type lsp_input_all: YANGBool
:param lsp_input_all: **show_lsp_input_info** tuple argument.
:type lsp_input_more: YANGBool
:param lsp_input_more: **show_lsp_input_info** tuple argument.
:type api_timeout: long or tuple(long, long)
:param api_timeout: Timeout for connection and response in seconds. If a tuple is specified, then the first value is for the connection timeout and the second value is for the response timeout.
:rtype: (*bool, list*)
:returns: Returns a tuple.
#. **api_success** (*bool*) - The success or failure of the API.
#. **details** (*list*) - List of REST request/response dictionaries, keyed by the asset's ip address.
:raises ConnectionError: If requests module connection or response timeout occurs.
:raises UnsupportedOSError: If firmware version installed on asset is not supported.
:raises RestInterfaceError: If requests module does not get a successful response from the rest URI.
:raises ValueError: If the argument value does not meet type requirements or value restrictions.
"""
operation_type = 'rpc'
compositions_list = []
bindings_list = [('pybind.slxos.v17r_1_01a.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', 'pybind.slxos.v17r_1_01a.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17r_1_01a.brocade_mpls_rpc'), ('pybind.slxos.v17r_2_00.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', 'pybind.slxos.v17r_2_00.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17r_2_00.brocade_mpls_rpc'), ('pybind.slxos.v17s_1_02.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', 'pybind.slxos.v17s_1_02.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17s_1_02.brocade_mpls_rpc')]
composed_child_list = [('pybind.slxos.v17s_1_02.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', u'show_lsp_input_info'), ('pybind.slxos.v17r_1_01a.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', u'show_lsp_input_info'), ('pybind.slxos.v17r_2_00.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', u'show_lsp_input_info')]
compositions_keyval_list = []
bindings_keyval = {'kwargs_key_name': '', 'keyval': '', 'extra_keyval': ''}
composed_child_leafval_list = [{'leafval': 'lsp, lsp_wide, lsp_detail, lsp_extensive, lsp_debug, lsp_name, lsp_name_extensive, lsp_name_debug, bypass_lsp, bypass_lsp_wide, bypass_lsp_detail, bypass_lsp_extensive, bypass_lsp_debug, bypass_lsp_name, bypass_lsp_name_extensive, bypass_lsp_name_debug, bypass_lsp_static, bypass_lsp_static_wide, bypass_lsp_static_detail, bypass_lsp_static_extensive, bypass_lsp_static_debug, bypass_lsp_static_name, bypass_lsp_static_name_extensive, bypass_lsp_static_name_debug, bypass_lsp_dynamic, bypass_lsp_dynamic_wide, bypass_lsp_dynamic_detail, bypass_lsp_dynamic_extensive, bypass_lsp_dynamic_debug, bypass_lsp_dynamic_name, bypass_lsp_dynamic_name_extensive, bypass_lsp_dynamic_name_debug, lsp_input_lsp_name, lsp_input_bypass, lsp_input_dynamic, lsp_input_brief, lsp_input_wide, lsp_input_detail, lsp_input_extensive, lsp_input_debug, lsp_input_one, lsp_input_all, lsp_input_more'}, {'leafval': 'lsp, lsp_wide, lsp_detail, lsp_extensive, lsp_debug, lsp_name, lsp_name_extensive, lsp_name_debug, bypass_lsp, bypass_lsp_wide, bypass_lsp_detail, bypass_lsp_extensive, bypass_lsp_debug, bypass_lsp_name, bypass_lsp_name_extensive, bypass_lsp_name_debug, bypass_lsp_static, bypass_lsp_static_wide, bypass_lsp_static_detail, bypass_lsp_static_extensive, bypass_lsp_static_debug, bypass_lsp_static_name, bypass_lsp_static_name_extensive, bypass_lsp_static_name_debug, bypass_lsp_dynamic, bypass_lsp_dynamic_wide, bypass_lsp_dynamic_detail, bypass_lsp_dynamic_extensive, bypass_lsp_dynamic_debug, bypass_lsp_dynamic_name, bypass_lsp_dynamic_name_extensive, bypass_lsp_dynamic_name_debug, lsp_input_lsp_name, lsp_input_bypass, lsp_input_dynamic, lsp_input_brief, lsp_input_wide, lsp_input_detail, lsp_input_extensive, lsp_input_debug, lsp_input_one, lsp_input_all, lsp_input_more'}, {'leafval': 'lsp, lsp_wide, lsp_detail, lsp_extensive, lsp_debug, lsp_name, lsp_name_extensive, lsp_name_debug, bypass_lsp, bypass_lsp_wide, bypass_lsp_detail, bypass_lsp_extensive, bypass_lsp_debug, bypass_lsp_name, bypass_lsp_name_extensive, bypass_lsp_name_debug, bypass_lsp_static, bypass_lsp_static_wide, bypass_lsp_static_detail, bypass_lsp_static_extensive, bypass_lsp_static_debug, bypass_lsp_static_name, bypass_lsp_static_name_extensive, bypass_lsp_static_name_debug, bypass_lsp_dynamic, bypass_lsp_dynamic_wide, bypass_lsp_dynamic_detail, bypass_lsp_dynamic_extensive, bypass_lsp_dynamic_debug, bypass_lsp_dynamic_name, bypass_lsp_dynamic_name_extensive, bypass_lsp_dynamic_name_debug, lsp_input_lsp_name, lsp_input_bypass, lsp_input_dynamic, lsp_input_brief, lsp_input_wide, lsp_input_detail, lsp_input_extensive, lsp_input_debug, lsp_input_one, lsp_input_all, lsp_input_more'}]
leafval_map = {}
rest_leaf_name = ''
choices_kwargs_map = {}
leaf_os_support_map = {}
self._api_validation(choices_kwargs_map=choices_kwargs_map, leaf_os_support_map=leaf_os_support_map, show_lsp_input_info=show_lsp_input_info)
pybind_object = self._get_pybind_object(operation_type=operation_type, compositions_list=compositions_list, bindings_list=bindings_list, composed_child_list=composed_child_list, compositions_keyval_list=compositions_keyval_list, bindings_keyval=bindings_keyval, composed_child_leafval_list=composed_child_leafval_list, leafval_map=leafval_map, show_lsp_input_info=show_lsp_input_info)
return self._rpc_worker(operation_type=operation_type, pybind_object=pybind_object, resource_depth=1, timeout=api_timeout) | 5,355,927 |
def most_similar(sen, voting_dict):
"""
Input: the last name of a senator, and a dictionary mapping senator names
to lists representing their voting records.
Output: the last name of the senator whose political mindset is most
like the input senator (excluding, of course, the input senator
him/herself). Resolve ties arbitrarily.
Example:
>>> vd = {'Klein': [1,1,1], 'Fox-Epstein': [1,-1,0], 'Ravella': [-1,0,0]}
>>> most_similar('Klein', vd)
'Fox-Epstein'
Note that you can (and are encouraged to) re-use you policy_compare procedure.
"""
most_sim = -1000
most_sim_senator = ""
for key, val in voting_dict.items():
if key != sen:
cmp = policy_compare(sen, key, voting_dict)
if most_sim < cmp:
most_sim = cmp
most_sim_senator = key
return most_sim_senator | 5,355,928 |
def overlay_spectra(model, dataset):
""" Run a series of diagnostics on the fitted spectra
Parameters
----------
model: model
best-fit Cannon spectral model
dataset: Dataset
original spectra
"""
best_flux, best_ivar = draw_spectra(model, dataset)
coeffs_all, covs, scatters, all_chisqs, pivots, label_vector = model.model
# Overplot original spectra with best-fit spectra
print("Overplotting spectra for ten random stars")
res = dataset.test_flux-best_flux
lambdas = dataset.wl
npix = len(lambdas)
nstars = best_flux.shape[0]
pickstars = []
for i in range(10):
pickstars.append(random.randrange(0, nstars-1))
for i in pickstars:
print("Star %s" % i)
ID = dataset.test_ID[i]
spec_orig = dataset.test_flux[i,:]
bad = dataset.test_flux[i,:] == 0
lambdas = np.ma.array(lambdas, mask=bad, dtype=float)
npix = len(lambdas.compressed())
spec_orig = np.ma.array(dataset.test_flux[i,:], mask=bad)
spec_fit = np.ma.array(best_flux[i,:], mask=bad)
ivars_orig = np.ma.array(dataset.test_ivar[i,:], mask=bad)
ivars_fit = np.ma.array(best_ivar[i,:], mask=bad)
red_chisq = np.sum(all_chisqs[:,i], axis=0) / (npix - coeffs_all.shape[1])
red_chisq = np.round(red_chisq, 2)
fig,axarr = plt.subplots(2)
ax1 = axarr[0]
im = ax1.scatter(lambdas, spec_orig, label="Orig Spec",
c=1 / np.sqrt(ivars_orig), s=10)
ax1.scatter(lambdas, spec_fit, label="Cannon Spec", c='r', s=10)
ax1.errorbar(lambdas, spec_fit,
yerr=1/np.sqrt(ivars_fit), fmt='ro', ms=1, alpha=0.7)
ax1.set_xlabel(r"Wavelength $\lambda (\AA)$")
ax1.set_ylabel("Normalized flux")
ax1.set_title("Spectrum Fit: %s" % ID)
ax1.set_title("Spectrum Fit")
ax1.set_xlim(min(lambdas.compressed())-10, max(lambdas.compressed())+10)
ax1.legend(loc='lower center', fancybox=True, shadow=True)
ax2 = axarr[1]
ax2.scatter(spec_orig, spec_fit, c=1/np.sqrt(ivars_orig), alpha=0.7)
ax2.errorbar(spec_orig, spec_fit, yerr=1 / np.sqrt(ivars_fit),
ecolor='k', fmt="none", ms=1, alpha=0.7)
#fig.subplots_adjust(right=0.8)
#cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar()
#fig.colorbar(
# im, cax=cbar_ax,
# label="Uncertainties on the Fluxes from the Original Spectrum")
xlims = ax2.get_xlim()
ylims = ax2.get_ylim()
lims = [np.min([xlims, ylims]), np.max([xlims, ylims])]
ax2.plot(lims, lims, 'k-', alpha=0.75)
textstr = "Red Chi Sq: %s" % red_chisq
props = dict(boxstyle='round', facecolor='palevioletred', alpha=0.5)
ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax2.set_xlim(xlims)
ax2.set_ylim(ylims)
ax2.set_xlabel("Orig Fluxes")
ax2.set_ylabel("Fitted Fluxes")
plt.tight_layout()
filename = "best_fit_spec_Star%s.png" % i
print("Saved as %s" % filename)
fig.savefig(filename)
plt.close(fig) | 5,355,929 |
def fixed_ro_bci_edge(ascentlat, lat_fixed_ro_ann,
zero_bounds_guess_range=np.arange(0.1, 90, 5)):
"""Numerically solve fixed-Ro, 2-layer BCI model of HC edge."""
def _solver(lat_a, lat_h):
# Reasonable to start guess at the average of the two given latitudes.
init_guess = 0.5 * (lat_a + lat_h)
return brentq_solver_sweep_param(
_fixed_ro_bci_edge,
lat_a,
init_guess,
zero_bounds_guess_range,
funcargs=(lat_h,),
)
return xr.apply_ufunc(_solver, ascentlat, lat_fixed_ro_ann,
vectorize=True, dask="parallelized") | 5,355,930 |
def delete_projects(
*, projects,
db_name='smartshark',
db_user=None,
db_password=None,
db_hostname='localhost',
db_port=27017,
db_authentication_db=None,
db_ssl=False,
):
"""
Delete a list of project from a database.
:param projects: List of projects that should be copied (required)
:param db_name: name of the source database. Default: 'smartshark'
:param db_user: user name for the source database. Default: None
:param db_password: password for the source database. Default: None
:param db_hostname: host of the source database Default: 'localhost'
:param db_port: port of the source database. Default: 27017
:param db_authentication_db: authentication db of the source database. Default: None
:param db_ssl: whether SSL is used for the connection to the source database. Default: None
"""
project_ref_collections = ['vcs_system', 'issue_system', 'mailing_list', 'pull_request_system']
vcs_ref_collections = ['branch', 'tag', 'file', 'commit', 'travis_build']
commit_ref_collections = ['clone_instance', 'code_entity_state', 'code_group_state',
'commit_changes', 'file_action', 'refactoring']
file_action_ref_collections = ['hunk']
its_ref_collections = ['issue']
issue_ref_collections = ['issue_comment', 'event']
ml_ref_collections = ['message']
travis_ref_collections = ['travis_job']
prsystem_ref_collections = ['pull_request']
pr_ref_collections = ['pull_request_comment', 'pull_request_commit', 'pull_request_event', 'pull_request_file',
'pull_request_file', 'pull_request_review']
prreview_ref_collections = ['pull_request_review_comment']
print("connecting to database")
db_uri = create_mongodb_uri_string(db_user, db_password, db_hostname, db_port, db_authentication_db, db_ssl)
print(db_uri)
db_client = MongoClient(db_uri)
db = db_client[db_name]
for project_name in projects:
print('starting for project %s' % project_name)
project = db['project'].find_one({'name': project_name})
for cur_proref_col in project_ref_collections:
if cur_proref_col == 'vcs_system':
for vcs_system in db.vcs_system.find({'project_id': project['_id']}):
file_id = vcs_system['repository_file']
fs = gridfs.GridFS(db, collection='repository_data')
fs.delete(file_id)
for cur_vcsref_col in vcs_ref_collections:
if cur_vcsref_col == 'commit':
commits = [commit['_id'] for commit in
db.commit.find({'vcs_system_id': vcs_system['_id']}, {'_id': 1})]
print("start copying data that references commit (%i commits total)" % len(commits))
for i in range(0, math.ceil(len(commits) / 100)):
slice_start = i * 100
slice_end = min((i + 1) * 100, len(commits))
cur_commit_slice = commits[slice_start:slice_end]
for cur_commitref_col in commit_ref_collections:
if cur_commitref_col == 'commit_changes': # special case because no field commit_id
print('deleting %s' % cur_commitref_col)
db[cur_commitref_col].delete_many({'old_commit_id': {'$in': cur_commit_slice}})
if cur_commitref_col == 'file_action':
file_actions = [file_action['_id'] for file_action in
db.file_action.find({'commit_id': {'$in': cur_commit_slice}})]
for cur_faref_col in file_action_ref_collections:
print('deleting %s' % cur_faref_col)
db[cur_faref_col].delete_many({'file_action_id': {'$in': file_actions}})
print('deleting %s' % cur_commitref_col)
db[cur_commitref_col].delete_many({'commit_id': {'$in': cur_commit_slice}})
print((i + 1) * 100, 'commits done')
if cur_vcsref_col == 'travis_build':
for cur_travisref_col in travis_ref_collections:
print('deleting %s' % cur_travisref_col)
db[cur_travisref_col].delete_many({'vcs_system_id': vcs_system['_id']})
print('deleting %s' % cur_vcsref_col)
db[cur_vcsref_col].delete_many({'vcs_system_id': vcs_system['_id']})
if cur_proref_col == 'issue_system':
for issue_system in db.issue_system.find({'project_id': project['_id']}):
for cur_itsref_col in its_ref_collections:
if cur_itsref_col == 'issue':
issues = [issue['_id'] for issue in
db.issue.find({'issue_system_id': issue_system['_id']}, {'_id': 1})]
for cur_issueref_col in issue_ref_collections:
print('deleting %s' % cur_issueref_col)
db[cur_issueref_col].delete_many({'issue_id': {'$in': issues}})
print('deleting %s' % cur_itsref_col)
db[cur_itsref_col].delete_many({'issue_system_id': issue_system['_id']})
if cur_proref_col == 'mailing_list':
for mailing_list in db.mailing_list.find({'project_id': project['_id']}):
for cur_mlref_col in ml_ref_collections:
print('deleting %s' % cur_mlref_col)
db[cur_mlref_col].delete_many({'mailing_list_id': mailing_list['_id']})
if cur_proref_col == 'pull_request_system':
for pull_request_system in db.pull_request_system.find({'project_id': project['_id']},
no_cursor_timeout=True):
for cur_prsysref_col in prsystem_ref_collections:
if cur_prsysref_col == 'pull_request':
pull_requests = [pull_request['_id'] for pull_request in
db.pull_request.find({'pull_request_system_id':
pull_request_system['_id']}, {'_id': 1})]
for cur_prref_col in pr_ref_collections:
if cur_prref_col == 'pull_request_review':
pull_request_reviews = [pull_request_review['_id'] for pull_request_review in
db.pull_request_review.find({'pull_request_id':
{'$in': pull_requests}},
{'_id': 1})]
for cur_prreviewref_col in prreview_ref_collections:
print('deleting %s' % cur_prreviewref_col)
db[cur_prreviewref_col].delete_many({'pull_request_review_id':
{'$in': pull_request_reviews}})
print('deleting %s' % cur_prref_col)
db[cur_prref_col].delete_many({'pull_request_id': {'$in': pull_requests}})
print('deleting %s' % cur_prsysref_col)
db[cur_prsysref_col].delete_many({'pull_request_system_id': pull_request_system['_id']})
print('deleting %s' % cur_proref_col)
db[cur_proref_col].delete_many({'project_id': project['_id']})
db['project'].delete_one({'name': project_name}) | 5,355,931 |
def prepend(list, item):
"""
Return a list with the given item added at the beginning. (Not
recursive.)
"""
pass | 5,355,932 |
def fresh_jwt_required(fn):
"""
A decorator to protect a Flask endpoint.
If you decorate an endpoint with this, it will ensure that the requester
has a valid and fresh access token before allowing the endpoint to be
called.
See also: :func:`~flask_jwt_extended.jwt_required`
"""
@wraps(fn)
def wrapper(*args, **kwargs):
jwt_data = _decode_jwt_from_request(request_type='access')
ctx_stack.top.jwt = jwt_data
if not jwt_data['fresh']:
raise FreshTokenRequired('Fresh token required')
if not verify_token_claims(jwt_data[config.user_claims_key]):
raise UserClaimsVerificationError('User claims verification failed')
_load_user(jwt_data[config.identity_claim_key])
return fn(*args, **kwargs)
return wrapper | 5,355,933 |
def random_small_number():
"""
随机生成一个小数
:return: 返回小数
"""
return random.random() | 5,355,934 |
def get_sample(df, col_name, n=100, seed=42):
"""Get a sample from a column of a dataframe.
It drops any numpy.nan entries before sampling. The sampling
is performed without replacement.
Example of numpydoc for those who haven't seen yet.
Parameters
----------
df : pandas.DataFrame
Source dataframe.
col_name : str
Name of the column to be sampled.
n : int
Sample size. Default is 100.
seed : int
Random seed. Default is 42.
Returns
-------
pandas.Series
Sample of size n from dataframe's column.
"""
np.random.seed(seed)
random_idx = np.random.choice(df[col_name].dropna().index, size=n, replace=False)
return df.loc[random_idx, col_name] | 5,355,935 |
def gen_mail_content(content, addr_from):
"""
根据邮件体生成添加了dkim的新邮件
@param content: string 邮件体内容
@return str_mail: 加上dkim的新邮件
"""
try:
domain = addr_from.split('@')[-1]
dkim_info = get_dkim_info(domain)
if dkim_info:
content = repalce_mail(content, addr_from)
selector, private = dkim_info
private = private.replace('\r\n', '\n')
dkim_sig = dkim.sign(content, selector, domain, private, include_headers=['From', 'To', 'Subject', 'Date'])
dk_sig = domainkeys(dkim_sig + content, selector, domain, private, include_heads=['From', 'To', 'Subject'])
return dk_sig + dkim_sig + content
else:
return content
except Exception, e:
print >>sys.stderr, e
print >>sys.stderr, traceback.format_exc()
return content | 5,355,936 |
def autocov(ary, axis=-1):
"""Compute autocovariance estimates for every lag for the input array.
Parameters
----------
ary : Numpy array
An array containing MCMC samples
Returns
-------
acov: Numpy array same size as the input array
"""
axis = axis if axis > 0 else len(ary.shape) + axis
n = ary.shape[axis]
m = next_fast_len(2 * n)
ary = ary - ary.mean(axis, keepdims=True)
# added to silence tuple warning for a submodule
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ifft_ary = np.fft.rfft(ary, n=m, axis=axis)
ifft_ary *= np.conjugate(ifft_ary)
shape = tuple(
slice(None) if dim_len != axis else slice(0, n) for dim_len, _ in enumerate(ary.shape)
)
cov = np.fft.irfft(ifft_ary, n=m, axis=axis)[shape]
cov /= n
return cov | 5,355,937 |
def get_args(argv=None):
"""Parses given arguments and returns argparse.Namespace object."""
prsr = argparse.ArgumentParser(
description="Perform a conformational search on given molecules."
)
group = prsr.add_mutually_exclusive_group(required=True)
group.add_argument(
'-m', '--molecules', nargs='+',
help='One or more files with molecule specification.'
)
group.add_argument(
'-d', '--directory', help='Directory with .mol files.'
)
prsr.add_argument(
'-o', '--output_dir', default='.\\confsearch', help='Output directory.'
)
prsr.add_argument(
'-n', '--num_confs', type=int, default=10,
help='Number of cnformers to generate.'
)
prsr.add_argument(
'-r', '--rms_tresh', type=float, default=1,
help='Maximum RMSD of conformers.'
)
prsr.add_argument(
'-e', '--energy_window', type=float, default=5,
help='Maximum energy difference from lowest-energy conformer '
'in kcal/mol.'
)
prsr.add_argument(
'-c', '--max_cycles', type=int, default=10,
help='Maximum number of energy minimization cycles.'
)
prsr.add_argument(
'-f', '--fixed', type=int, nargs='+', default=(),
help='Indices (starting at 1) of atoms fixed during molecule embedding.'
)
prsr.add_argument(
'-x', '--constraints',
help='File with constraints specified in format '
'"kind a [b [c [d]] rel min] max [const]", one for line. `kind` '
'should be one of: P (position), D (distance), A (angle), T '
'(torsion). Number of required atoms indices depends on `kind` '
'given and should be 1, 2, 3 or 4 respectively. Atoms indices '
'start at 1. `rel` should be 0 or 1 and specifies if `min` and '
'`max` values should be treated as absolute values or relative '
'to current value. `min` and `max` should be floats, representing '
'minimum and maximum value of constrained property in relevant '
'units (angstroms or degrees). `rel` and `min` should be omitted '
'if `kind` is P. `const` is force constant for given constraint, '
'should be integer or float, defaults to 1e5.'
)
prsr.add_argument(
'-V', '--verbose', action='store_true',
help='Sets logging level to INFO.'
)
prsr.add_argument(
'-D', '--debug', action='store_true',
help='Sets logging level to DEBUG.'
)
return prsr.parse_args(argv) | 5,355,938 |
def _optimize_rule_mip(
set_opt_model_func,
profile,
committeesize,
resolute,
max_num_of_committees,
solver_id,
name="None",
committeescorefct=None,
):
"""Compute rules, which are given in the form of an optimization problem, using Python MIP.
Parameters
----------
set_opt_model_func : callable
sets constraints and objective and adds additional variables, see examples below for its
signature
profile : abcvoting.preferences.Profile
approval sets of voters
committeesize : int
number of chosen alternatives
resolute : bool
max_num_of_committees : int
maximum number of committees this method returns, value can be None
solver_id : str
name : str
name of the model, used for error messages
committeescorefct : callable
a function used to compute the score of a committee
Returns
-------
committees : list of sets
a list of winning committees,
each of them represented as set of integers from `0` to `num_cand`
"""
maxscore = None
committees = []
if solver_id not in ["gurobi", "cbc"]:
raise ValueError(f"Solver {solver_id} not known in Python MIP.")
while True:
model = mip.Model(solver_name=solver_id)
# note: verbose = 1 causes issues with unittests, seems as if output is printed too late
# and anyway the output does not seem to be very helpful
model.verbose = 0
# `in_committee` is a binary variable indicating whether `cand` is in the committee
in_committee = [
model.add_var(var_type=mip.BINARY, name=f"cand{cand}_in_committee")
for cand in profile.candidates
]
set_opt_model_func(
model,
profile,
in_committee,
committeesize,
)
# find a new committee that has not been found yet by excluding previously found committees
for committee in committees:
model += mip.xsum(in_committee[cand] for cand in committee) <= committeesize - 1
# emphasis is optimality:
# activates procedures that produce improved lower bounds, focusing in pruning the search
# tree even if the production of the first feasible solutions is delayed.
model.emphasis = 2
model.opt_tol = ACCURACY
model.max_mip_gap = ACCURACY
model.integer_tol = ACCURACY
status = model.optimize()
if status not in [mip.OptimizationStatus.OPTIMAL, mip.OptimizationStatus.INFEASIBLE]:
raise RuntimeError(
f"Python MIP returned an unexpected status code: {status}"
f"Warning: solutions may be incomplete or not optimal (model {name})."
)
elif status == mip.OptimizationStatus.INFEASIBLE:
if len(committees) == 0:
# we are in the first round of searching for committees
# and Gurobi didn't find any
raise RuntimeError("Python MIP found no solution (INFEASIBLE) (model {name})")
break
committee = set(
cand
for cand in profile.candidates
if in_committee[cand].x >= 0.9
# this should be >= 1 - ACCURACY, but apparently it is not necessarily the case that
# integers are only ACCURACY apart from either 0 or 1
)
if len(committee) != committeesize:
raise RuntimeError(
"_optimize_rule_mip produced a committee with "
"fewer than `committeesize` members (model {name})."
)
if committeescorefct is None:
objective_value = model.objective_value # numeric value from MIP
else:
objective_value = committeescorefct(profile, committee) # exact value
if maxscore is None:
maxscore = objective_value
elif (committeescorefct is not None and objective_value > maxscore) or (
committeescorefct is None and objective_value > maxscore + CMP_ACCURACY
):
raise RuntimeError(
"Python MIP found a solution better than a previous optimum. This "
f"should not happen (previous optimal score: {maxscore}, "
f"new optimal score: {objective_value}, model {name})."
)
elif (committeescorefct is not None and objective_value < maxscore) or (
committeescorefct is None and objective_value < maxscore - CMP_ACCURACY
):
# no longer optimal
break
committees.append(committee)
if resolute:
break
if max_num_of_committees is not None and len(committees) >= max_num_of_committees:
return committees
return committees | 5,355,939 |
def Zuo_fig_3_18(verbose=True):
"""
Input for Figure 3.18 in Zuo and Spence \"Advanced TEM\", 2017
This input acts as an example as well as a reference
Returns:
dictionary: tags is the dictionary of all input and output paramter needed to reproduce that figure.
"""
# INPUT
# Create Silicon structure (Could be produced with Silicon routine)
if verbose:
print('Sample Input for Figure 3.18 in Zuo and Spence \"Advanced TEM\", 2017')
tags = {'crystal_name': 'Silicon'}
if verbose:
print('tags[\'crystal\'] = ', tags['crystal_name'])
a = 0.514 # nm
tags['lattice_parameter_nm'] = a
if verbose:
print('tags[\'lattice_parameter_nm\'] =', tags['lattice_parameter_nm'])
tags['unit_cell'] = [[a, 0, 0], [0, a, 0], [0, 0, a]]
if verbose:
print('tags[\'unit_cell\'] =', tags['unit_cell'])
tags['elements'] = list(itertools.repeat('Si', 8))
if verbose:
print('tags[\'elements\'] =', tags['elements'])
base = [(0., 0., 0.), (0.5, 0.0, 0.5), (0.5, 0.5, 0.), (0., 0.5, 0.5)]
tags['base'] = np.array(base + (np.array(base) + (.25, .25, .25)).tolist())
if verbose:
print('tags[\'base\'] =', tags['base'])
# Define Experimental Conditions
tags['convergence_angle_mrad'] = 7
tags['acceleration_voltage_V'] = 101.6*1000.0 # V
if verbose:
print('tags[\'acceleration_voltage_V\'] =', tags['acceleration_voltage_V'])
tags['convergence_angle_mrad'] = 7.1 # mrad; 0 is parallel illumination
if verbose:
print('tags[\'convergence_angle_mrad\'] =', tags['convergence_angle_mrad'])
tags['zone_hkl'] = np.array([-2, 2, 1]) # incident neares zone axis: defines Laue Zones!!!!
if verbose:
print('tags[\'zone_hkl\'] =', tags['zone_hkl'])
tags['mistilt'] = np.array([0, 0, 0]) # mistilt in degrees
if verbose:
print('tags[\'mistilt\'] =', tags['mistilt'])
# Define Simulation Parameters
tags['Sg_max'] = .2 # 1/nm maximum allowed excitation error
if verbose:
print('tags[\'Sg_max\'] =', tags['Sg_max'])
tags['hkl_max'] = 9 # Highest evaluated Miller indices
if verbose:
print('tags[\'hkl_max\'] =', tags['hkl_max'])
print('##################')
print('# Output Options #')
print('##################')
# Output options
tags['background'] = 'black' # 'white' 'grey'
if verbose:
print('tags[\'background\'] =', tags['background'], '# \'white\', \'grey\' ')
tags['color map'] = 'plasma'
if verbose:
print('tags[\'color map\'] =', tags['color map'], '#,\'cubehelix\',\'Greys\',\'jet\' ')
tags['plot HOLZ'] = 1
if verbose:
print('tags[\'plot HOLZ\'] =', tags['plot HOLZ'])
tags['plot HOLZ excess'] = 1
if verbose:
print('tags[\'plot HOLZ excess\'] =', tags['plot HOLZ excess'])
tags['plot Kikuchi'] = 1
if verbose:
print('tags[\'plot Kikuchi\'] =', tags['plot Kikuchi'])
tags['plot reflections'] = 1
if verbose:
print('tags[\'plot reflections\'] =', tags['plot reflections'])
tags['label HOLZ'] = 0
if verbose:
print('tags[\'label HOLZ\'] =', tags['label HOLZ'])
tags['label Kikuchi'] = 0
if verbose:
print('tags[\'label Kikuchi\'] =', tags['label Kikuchi'])
tags['label reflections'] = 0
if verbose:
print('tags[\'label reflections\'] =', tags['label reflections'])
tags['label color'] = 'black'
if verbose:
print('tags[\'label color\'] =', tags['label color'])
tags['label size'] = 10
if verbose:
print('tags[\'label size\'] =', tags['label size'])
tags['color Laue Zones'] = ['red', 'blue', 'green', 'blue', 'green'] # , 'green', 'red'] #for OLZ give a sequence
if verbose:
print('tags[\'color Laue Zones\'] =', tags['color Laue Zones'], ' #[\'red\', \'blue\', \'lightblue\']')
tags['color Kikuchi'] = 'green'
if verbose:
print('tags[\'color Kikuchi\'] =', tags['color Kikuchi'])
tags['linewidth HOLZ'] = -1 # -1: linewidth according to intensity (structure factor F^2
if verbose:
print('tags[\'linewidth HOLZ\'] =', tags['linewidth HOLZ'], '# -1: linewidth according to intensity '
'(structure factor F^2)')
tags['linewidth Kikuchi'] = -1 # -1: linewidth according to intensity (structure factor F^2
if verbose:
print('tags[\'linewidth Kikuchi\'] =', tags['linewidth Kikuchi'], '# -1: linewidth according to intensity '
'(structure factor F^2)')
tags['color reflections'] = 'intensity' # 'Laue Zone'
if verbose:
print('tags[\'color reflections\'] =', tags['color reflections'], '#\'Laue Zone\' ')
tags['color zero'] = 'white' # 'None', 'white', 'blue'
if verbose:
print('tags[\'color zero\'] =', tags['color zero'], '#\'None\', \'white\', \'blue\' ')
tags['color ring zero'] = 'None' # 'Red' #'white' #, 'None'
if verbose:
print('tags[\'color ring zero\'] =', tags['color ring zero'], '#\'None\', \'white\', \'Red\' ')
print('########################')
print('# End of Example Input #')
print('########################\n\n')
return tags | 5,355,940 |
def test_sqlcreds_connection(sql_creds):
"""
Simple test to ensure that the generated creds can connect to the database
The sql_creds fixture necessarily uses username and password (no Windows auth)
"""
df = pd.read_sql(con=sql_creds.engine, sql="SELECT TOP 1 * FROM sys.objects")
assert df.shape[0] == 1 | 5,355,941 |
def subscribe_feed(feed_link: str, title: str, parser: str, conn: Conn) -> str:
"""Return the feed_id if nothing wrong."""
feed_id = new_feed_id(conn)
conn.execute(
stmt.Insert_feed,
dict(
id=feed_id,
feed_link=feed_link,
website="",
title=title,
author_name="",
updated=arrow.now().format(RFC3339),
notes="",
parser=parser,
),
)
return feed_id | 5,355,942 |
def MaybeLogReleaseChannelDefaultWarning(args):
"""Logs a release channel default change message for applicable commands."""
if (not _IsSpecified(args, 'cluster_version') and
not _IsSpecified(args, 'release_channel') and
(hasattr(args, 'enable_autoupgrade') and
cmd_util.GetAutoUpgrade(args)) and
(hasattr(args, 'enable_autorepair') and cmd_util.GetAutoRepair(args))):
log.warning('Starting in January 2021, clusters will use the Regular '
'release channel by default when `--cluster-version`, '
'`--release-channel`, `--no-enable-autoupgrade`, and '
'`--no-enable-autorepair` flags are not specified.') | 5,355,943 |
def run(fname):
"""
Create a new C file and H file corresponding to the filename "fname",
and add them to the corresponding include.am.
This function operates on paths relative to the top-level tor directory.
"""
# Make sure we're in the top-level tor directory,
# which contains the src directory
if not os.path.isdir("src"):
raise RuntimeError("Could not find './src/'. "
"Run this script from the top-level tor source "
"directory.")
# And it looks like a tor/src directory
if not os.path.isfile("src/include.am"):
raise RuntimeError("Could not find './src/include.am'. "
"Run this script from the top-level tor source "
"directory.")
# Make the file name relative to the top-level tor directory
tor_fname = tordir_file(fname)
# And check that we're adding files to the "src" directory,
# with canonical paths
if tor_fname[:4] != "src/":
raise ValueError("Requested file path '{}' canonicalized to '{}', "
"but the canonical path did not start with 'src/'. "
"Please add files to the src directory."
.format(fname, tor_fname))
c_tor_fname = makeext(tor_fname, "c")
h_tor_fname = makeext(tor_fname, "h")
if os.path.exists(c_tor_fname):
print("{} already exists".format(c_tor_fname))
return 1
if os.path.exists(h_tor_fname):
print("{} already exists".format(h_tor_fname))
return 1
with open(c_tor_fname, 'w') as f:
f.write(instantiate_template(C_FILE_TEMPLATE, c_tor_fname))
with open(h_tor_fname, 'w') as f:
f.write(instantiate_template(HEADER_TEMPLATE, h_tor_fname))
iam = get_include_am_location(c_tor_fname)
if iam is None or not os.path.exists(iam):
print("Made files successfully but couldn't identify include.am for {}"
.format(c_tor_fname))
return 1
amfile = ParsedAutomake()
cur_chunk = AutomakeChunk()
with open(iam) as f:
for line in f:
if cur_chunk.addLine(line):
amfile.addChunk(cur_chunk)
cur_chunk = AutomakeChunk()
amfile.addChunk(cur_chunk)
amfile.add_file(c_tor_fname, "sources")
amfile.add_file(h_tor_fname, "headers")
with open(iam+".tmp", 'w') as f:
amfile.dump(f)
os.rename(iam+".tmp", iam) | 5,355,944 |
def process_grid(procstatus, dscfg, radar_list=None):
"""
Puts the radar data in a regular grid
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The data type where we want to extract the point measurement
gridconfig : dictionary. Dataset keyword
Dictionary containing some or all of this keywords:
xmin, xmax, ymin, ymax, zmin, zmax : floats
minimum and maximum horizontal distance from grid origin [km]
and minimum and maximum vertical distance from grid origin [m]
Defaults -40, 40, -40, 40, 0., 10000.
hres, vres : floats
horizontal and vertical grid resolution [m]
Defaults 1000., 500.
latorig, lonorig, altorig : floats
latitude and longitude of grid origin [deg] and altitude of
grid origin [m MSL]
Defaults the latitude, longitude and altitude of the radar
wfunc : str. Dataset keyword
the weighting function used to combine the radar gates close to a
grid point. Possible values BARNES, BARNES2, CRESSMAN, NEAREST
Default NEAREST
roif_func : str. Dataset keyword
the function used to compute the region of interest.
Possible values: dist_beam, constant
roi : float. Dataset keyword
the (minimum) radius of the region of interest in m. Default half
the largest resolution
beamwidth : float. Dataset keyword
the radar antenna beamwidth [deg]. If None that of the key
radar_beam_width_h in attribute instrument_parameters of the radar
object will be used. If the key or the attribute are not present
a default 1 deg value will be used
beam_spacing : float. Dataset keyword
the beam spacing, i.e. the ray angle resolution [deg]. If None,
that of the attribute ray_angle_res of the radar object will be
used. If the attribute is None a default 1 deg value will be used
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the gridded data
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
field_names_aux = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_names_aux.append(get_fieldname_pyart(datatype))
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
# keep only fields present in radar object
field_names = []
nfields_available = 0
for field_name in field_names_aux:
if field_name not in radar.fields:
warn('Field name '+field_name+' not available in radar object')
continue
field_names.append(field_name)
nfields_available += 1
if nfields_available == 0:
warn("Fields not available in radar data")
return None, None
# default parameters
xmin = -40.
xmax = 40.
ymin = -40.
ymax = 40.
zmin = 0.
zmax = 10000.
hres = 1000.
vres = 500.
lat = float(radar.latitude['data'])
lon = float(radar.longitude['data'])
alt = float(radar.altitude['data'])
if 'gridConfig' in dscfg:
if 'xmin' in dscfg['gridConfig']:
xmin = dscfg['gridConfig']['xmin']
if 'xmax' in dscfg['gridConfig']:
xmax = dscfg['gridConfig']['xmax']
if 'ymin' in dscfg['gridConfig']:
ymin = dscfg['gridConfig']['ymin']
if 'ymax' in dscfg['gridConfig']:
ymax = dscfg['gridConfig']['ymax']
if 'zmin' in dscfg['gridConfig']:
zmin = dscfg['gridConfig']['zmin']
if 'zmax' in dscfg['gridConfig']:
zmax = dscfg['gridConfig']['zmax']
if 'hres' in dscfg['gridConfig']:
hres = dscfg['gridConfig']['hres']
if 'vres' in dscfg['gridConfig']:
vres = dscfg['gridConfig']['vres']
if 'latorig' in dscfg['gridConfig']:
lat = dscfg['gridConfig']['latorig']
if 'lonorig' in dscfg['gridConfig']:
lon = dscfg['gridConfig']['lonorig']
if 'altorig' in dscfg['gridConfig']:
alt = dscfg['gridConfig']['altorig']
wfunc = dscfg.get('wfunc', 'NEAREST')
roi_func = dscfg.get('roi_func', 'dist_beam')
# number of grid points in cappi
nz = int((zmax-zmin)/vres)+1
ny = int((ymax-ymin)*1000./hres)+1
nx = int((xmax-xmin)*1000./hres)+1
min_radius = dscfg.get('roi', np.max([vres, hres])/2.)
# parameters to determine the gates to use for each grid point
beamwidth = dscfg.get('beamwidth', None)
beam_spacing = dscfg.get('beam_spacing', None)
if beamwidth is None:
if (radar.instrument_parameters is not None and
'radar_beam_width_h' in radar.instrument_parameters):
beamwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
else:
warn('Unknown radar beamwidth. Default 1 deg will be used')
beamwidth = 1
if beam_spacing is None:
if radar.ray_angle_res is not None:
beam_spacing = radar.ray_angle_res['data'][0]
else:
warn('Unknown beam spacing. Default 1 deg will be used')
beam_spacing = 1
# cartesian mapping
grid = pyart.map.grid_from_radars(
(radar,), gridding_algo='map_to_grid',
weighting_function=wfunc,
roi_func=roi_func, h_factor=1.0, nb=beamwidth, bsp=beam_spacing,
min_radius=min_radius, constant_roi=min_radius,
grid_shape=(nz, ny, nx),
grid_limits=((zmin, zmax), (ymin*1000., ymax*1000.),
(xmin*1000., xmax*1000.)),
grid_origin=(lat, lon), grid_origin_alt=alt,
fields=field_names)
new_dataset = {'radar_out': grid}
return new_dataset, ind_rad | 5,355,945 |
def float_to_16(value):
""" convert float value into fixed exponent (8) number
returns 16 bit integer, as value * 256
"""
value = int(round(value*0x100,0))
return value & 0xffff | 5,355,946 |
def create_keras_one_layer_dense_model(*,
input_size,
output_size,
verbose=False,
**kwargs
):
"""
Notes:
https://www.tensorflow.org/tutorials/keras/save_and_load
"""
# ...................................................
# Create model
model = Sequential()
#.. add fully connected layer
model.add(Dense(
input_dim=input_size, # IE 784 PIXELS, !
units=output_size,
activation=kwargs["out_activation"],
kernel_regularizer=tf.keras.regularizers.l2(0.001),
kernel_initializer=initializers.VarianceScaling(scale=1.0, seed=0)
))
# Print network summary
if verbose==True:
print(model.summary())
else:
pass
# ...................................................
# Define Loss Function and Trianing Operation
""" # [option]: Use only default values,
model.compile( optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['acc'])
"""
model.compile(
optimizer= kwargs["optimizer"],
loss= losses.sparse_categorical_crossentropy,
metrics= kwargs["metrics"] # even one arg must be in the list
)
return model | 5,355,947 |
def test_list_base64_binary_enumeration_2_nistxml_sv_iv_list_base64_binary_enumeration_3_4(mode, save_output, output_format):
"""
Type list/base64Binary is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/base64Binary/Schema+Instance/NISTSchema-SV-IV-list-base64Binary-enumeration-3.xsd",
instance="nistData/list/base64Binary/Schema+Instance/NISTXML-SV-IV-list-base64Binary-enumeration-3-4.xml",
class_name="NistschemaSvIvListBase64BinaryEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,355,948 |
def API_encrypt(key, in_text, formatting:str = "Base64", nonce_type:str = "Hybrid"):
""" Returns: Input Text 147 Encrypted with Input Key. """
try:
# Ensure an Appropriate Encoding Argument is Provided.
try: encoding = FORMATS[formatting]
except: raise ValueError("Invalid Encoding Argument")
# Generate Nonce Integer Based on Input Argument.
nonce = gen_nonce(nonce_type)
# Encode Text into Specified Encoding and Remove any Padding.
encoded_text = convert_input(in_text, "encode", encoding)
# Encode Key into Decimal Number (Base10).
dec_key = key_convert(key, encoding)
# Substitute Down Input Text.
shifted_text = substitution(dec_key, nonce, encoded_text, encoding, "encrypt", "normal")
# Randomly join Shifted Text and Nonce into one Text.
full_text = pair_function(shifted_text, dec_key, encoding, nonce)
# Substitute Up Input Text.
return substitution(dec_key + 135, 147, full_text, encoding, "encrypt", "reverse")
except: raise ValueError(f"Encryption with Key: {key} Failed for Input: {in_text}") | 5,355,949 |
def convert_log_dict_to_np(logs):
"""
Take in logs and return params
"""
# Init params
n_samples_after_warmup = len(logs)
n_grid = logs[0]['u'].shape[-1]
u = np.zeros((n_samples_after_warmup, n_grid))
Y = np.zeros((n_samples_after_warmup, n_grid))
k = np.zeros((n_samples_after_warmup, n_grid))
kl_trunc_errs = np.empty((n_samples_after_warmup,1))
n_stoch_disc = logs[0]['coefs'].shape[-1] # e.g., n_alpha_indices for PCE, or kl_dim for KL-E
coefs = np.empty((n_samples_after_warmup, n_grid, n_stoch_disc))
stoch_dim = logs[0]['rand_insts'].shape[-1]
rand_insts = np.empty((n_samples_after_warmup, stoch_dim))
# Copy logs into params
for n, log in enumerate(logs):
k[n,:] = log['rand_param']
Y[n,:] = log['Y']
u[n,:] = log['u']
kl_trunc_errs[n,0] = log['kl_trunc_err']
coefs[n,:,:] = log['coefs']
rand_insts[n,:] = log['rand_insts']
return k, Y, u, kl_trunc_errs, coefs, rand_insts | 5,355,950 |
def get_parent_dir():
"""Returns the root directory of the project."""
return os.path.abspath(os.path.join(os.getcwd(), os.pardir)) | 5,355,951 |
def step_erase_licenses(context):
"""Erases the J-Link's licenses.
Args:
context (Context): the ``Context`` instance
Returns:
``None``
"""
jlink = context.jlink
assert jlink.erase_licenses() | 5,355,952 |
def map_links_in_markdownfile(
filepath: Path,
func: Callable[[Link], None]
) -> bool:
"""Dosyadaki tüm linkler için verilen fonksiyonu uygular
Arguments:
filepath {Path} -- Dosya yolu objesi
func {Callable[[Link], None]} -- Link alan ve değiştiren fonksiyon
Returns:
bool -- Değişim olduysa True
"""
content = filesystem.read_file(filepath)
content = map_links_in_string(content, func)
return filesystem.write_to_file(filepath, content) | 5,355,953 |
def test_runner_should_iterate_all_steps_in_a_scenario(
hook_registry, default_config, mocker
):
"""The Runner should iterate all Steps in a Scenario"""
# given
runner = Runner(default_config, None, hook_registry)
runner.run_step = mocker.MagicMock()
runner.run_step.return_value = State.PASSED
scenario_mock = mocker.MagicMock(name="Scenario")
scenario_mock.background = None
first_step = mocker.MagicMock(name="First Step")
second_step = mocker.MagicMock(name="Second Step")
scenario_mock.steps = [first_step, second_step]
# when
runner.run_scenario(scenario_mock)
# then
runner.run_step.assert_has_calls([call(first_step), call(second_step)]) | 5,355,954 |
def half_cell_t_2d_triangular_precursor(p, t):
"""Creates a precursor to horizontal transmissibility for prism grids (see notes).
arguments:
p (numpy float array of shape (N, 2 or 3)): the xy(&z) locations of cell vertices
t (numpy int array of shape (M, 3)): the triangulation of p for which the transmissibility
precursor is required
returns:
a pair of numpy float arrays, each of shape (M, 3) being the normal length and flow length
relevant for flow across the face opposite each vertex as defined by t
notes:
this function acts as a precursor to the equivalent of the half cell transmissibility
functions but for prism grids; for a resqpy VerticalPrismGrid, the triangulation can
be shared by many layers with this function only needing to be called once; the first
of the returned values (normal length) is the length of the triangle edge, in xy, when
projected onto the normal of the flow direction; multiplying the normal length by a cell
height will yield the area needed for transmissibility calculations; the second of the
returned values (flow length) is the distance from the trangle centre to the midpoint of
the edge and can be used as the distance term for a half cell transmissibilty; this
function does not account for dip, it only handles the geometric aspects of half
cell transmissibility in the xy plane
"""
assert p.ndim == 2 and p.shape[1] in [2, 3]
assert t.ndim == 2 and t.shape[1] == 3
# centre points of triangles, in xy
centres = np.mean(p[t], axis = 1)[:, :2]
# midpoints of edges of triangles, in xy
edge_midpoints = np.empty(tuple(list(t.shape) + [2]), dtype = float)
edge_midpoints[:, 0, :] = 0.5 * (p[t[:, 1]] + p[t[:, 2]])[:, :2]
edge_midpoints[:, 1, :] = 0.5 * (p[t[:, 2]] + p[t[:, 0]])[:, :2]
edge_midpoints[:, 2, :] = 0.5 * (p[t[:, 0]] + p[t[:, 1]])[:, :2]
# triangle edge vectors, projected in xy
edge_vectors = np.empty(edge_midpoints.shape, dtype = float)
edge_vectors[:, 0] = (p[t[:, 2]] - p[t[:, 1]])[:, :2]
edge_vectors[:, 1] = (p[t[:, 0]] - p[t[:, 2]])[:, :2]
edge_vectors[:, 2] = (p[t[:, 1]] - p[t[:, 0]])[:, :2]
# vectors from triangle centres to mid points of edges (3 per triangle), in xy plane
cem_vectors = edge_midpoints - centres.reshape((-1, 1, 2))
cem_lengths = vec.naive_lengths(cem_vectors)
# unit length vectors normal to cem_vectors, in the xy plane
normal_vectors = np.zeros(edge_midpoints.shape)
normal_vectors[:, :, 0] = cem_vectors[:, :, 1]
normal_vectors[:, :, 1] = -cem_vectors[:, :, 0]
normal_vectors = vec.unit_vectors(normal_vectors)
# edge lengths projected onto normal vectors (length perpendicular to nominal flow direction)
normal_lengths = np.abs(vec.dot_products(edge_vectors, normal_vectors))
# return normal (cross-sectional) lengths and nominal flow direction lengths
assert normal_lengths.shape == t.shape and cem_lengths.shape == t.shape
return normal_lengths, cem_lengths | 5,355,955 |
async def stop_runner() -> None:
"""Stop the runlevel-pursuing runner task."""
daemons.cancel(daemons.Service.RUNLEVEL) | 5,355,956 |
def main():
"""Main function that calls the JCDecaux API and writes to database.
Function retrieves JSON data fromthe JCDecaux API.
The JSON data is parsed and validated.
The data is inserted into a remote database."""
# MySQL connection
conex = mysql.connector.connect(user='root', password='Rugby_777', database='dublinbikes', host='0.0.0.0')
cursor = conex.cursor()
# JCDecaux API link
link = "http://api.openweathermap.org/data/2.5/forecast?id=524901&APPID=73281f45f2eec1f97e90acdcbacaf4ee"
# MySQL query
sqlDelete = "DELETE FROM weatherForecast"
sqlQuery = "INSERT INTO weatherForecast (dt, temp, humidity, description, pressure, day, hour, icon, dt_txt) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);"
# Infinite l
while True:
# MySQL object
#cursor.execute(sqlDelete)
#conex.commit()
# Retrieve and load JSON data
r = requests.get(link)
jTxt = json.loads(r.text)
# If the JSON call was successfuly
if r.status_code == 200:
for row in jTxt['list']:
try:
hour = pd.to_datetime(row['dt_txt']).hour - 1
day = pd.to_datetime(row['dt_txt']).weekday_name
print(hour)
for i in range(0,3):
dt = row['dt']
temp = row['main']['temp']
dt_txt = row['dt_txt']
icon = row['weather'][0]['icon']
humidity = row['main']['humidity']
description = row['weather'][0]['main']
pressure = row['main']['pressure']
hour += 1
args = (int(dt), float(temp), float(humidity), str(description), float(pressure), day, hour, icon, dt_txt)
cursor.execute(sqlQuery, args)
print("executed")
# Commit data to DB
except Exception as e:
print(str(e))
pass
conex.commit()
print("committed")
# Close MySQL object
cursor.close()
time.sleep(432000) | 5,355,957 |
def backup(source, target, update_log_table, use_rsync, verbose, debug, dry_run):
"""
Back up a source directory to a target directory.
This function will accept a source and target directories, most often
on separate external hard drives, and copy all files from the source
to the target that are either:
(1) Not in the target directory
(2) Are in the target directory, but have been updated
Files in the target that have been deleted in the source will also be deleted.
"""
args, result = pydoni.__pydonicli_declare_args__(locals()), dict()
start_ts = time.time()
vb = Verbose(verbose=verbose, debug=debug)
ws = ' '
ignore_files = [
'The Office S09E16 Moving On.mkv',
'The Office S09E20 Paper Airplanes.mkv',
]
if update_log_table:
start_ts_utc = datetime.datetime.utcnow()
pg = pydoni.Postgres()
directory_backup_table_schema = 'pydonicli'
directory_backup_table_name = 'directory_backup'
insert_dict = dict(source=source,
source_size_bytes=stat(source).st_size,
target=target,
target_size_before_bytes=stat(target).st_size,
target_size_after_bytes=None,
start_ts=start_ts_utc,
is_completed=False)
insert_sql = pg.build_insert(schema_name=directory_backup_table_schema,
table_name=directory_backup_table_name,
columns=list(insert_dict.keys()),
values=list(insert_dict.values()),
validate=True)
if not dry_run:
pg.execute(insert_sql)
directory_backup_id = pg.read_sql(f"""
select directory_backup_id
from {directory_backup_table_schema}.{directory_backup_table_name}
order by gen_ts desc
limit 1""").squeeze()
assert source != target, 'Source and target directories must be different'
if use_rsync:
cmd_lst = ['rsync', '--delete-before', '-a', '-h', '-u']
if verbose:
cmd_lst = cmd_lst + ['-v', '--progress']
cmd_lst = cmd_lst + [f'"{source}"'] + [f'"{target}"']
cmd = ' '.join(cmd_lst)
subprocess.call(cmd, shell=True)
# progress_flag = ' --progress' if verbose else ''
# backup_cmd = f'rsync -avhu{progress_flag} --delete-before "{source}" "{target}"'
# subprocess.call(backup_cmd, shell=True)
else:
vb.info(f'Listing files at source: {source}')
files_source = pydoni.listfiles(path=source, recursive=True, full_names=True)
vb.debug('Found files at source: ' + str(len(files_source)))
files_source = [x for x in files_source if x not in ignore_files]
vb.debug(f'Found files at source after filtering out manually ignored files: {len(files_source)}')
vb.info(f'Listing files at target: {target}')
files_target = pydoni.listfiles(path=target, recursive=True, full_names=True)
vb.debug('Found files at target: ' + str(len(files_target)))
files_target = [x for x in files_target if x not in ignore_files]
vb.debug(f'Found files at target after filtering out manually ignored files: {len(files_target)}')
# Scan source files and for each determine whether to do nothing, copy to target,
# or replace at target
copied_files = []
replaced_files = []
vb.info('Scanning for new, updated or deleted files at source')
vb.pbar_init(total=len(files_source), unit='file')
for sourcefile in files_source:
vb.pbar_write(f'Sourcefile: {sourcefile}', refer_debug=True)
vb.pbar.set_postfix({'file': basename(sourcefile)})
targetfile = sourcefile.replace(source, target)
vb.pbar_write(f'{ws}Expected mirrored targetfile: {targetfile}', refer_debug=True)
if not isfile(targetfile):
# Copy file to target. Create parent directory at target if not exists
vb.pbar_write(f'{ws}(Copy) attempting to copy file "{sourcefile}" to "{targetfile}"', refer_debug=True)
targetdpath = dirname(targetfile)
if not isdir(targetdpath):
vb.pbar_write(f'{ws}{ws}Parent directory of targetfile does not exist, creating it at: ' + targetdpath, refer_debug=True)
if not dry_run:
makedirs(targetdpath)
vb.pbar_write(f'{ws}{ws}Successful', refer_debug=True)
if not dry_run:
shutil.copy2(sourcefile, targetfile)
vb.pbar_write(f'{ws}Successful', refer_debug=True)
copied_files.append(sourcefile)
elif isfile(targetfile) and is_file_changed(sourcefile, targetfile):
# Replace file at target (same action as copy, but parent directory must exist)
vb.pbar_write(f'(Replace) attempting to copy file "{sourcefile}" to "{targetfile}"', refer_debug=True)
if not dry_run:
shutil.copy2(sourcefile, targetfile)
vb.pbar_write(f'Successful', refer_debug=True)
replaced_files.append(sourcefile)
else:
vb.pbar_write(f'{ws}Targetfile already exists and is unchanged', refer_debug=True)
vb.pbar_update(1)
vb.pbar_close()
# Scam target files and for each determine whether that file has been since
# deleted from source
deleted_files = []
vb.info('Scanning for files at target since deleted from source')
vb.pbar_init(total=len(files_target))
for targetfile in files_target:
sourcefile = targetfile.replace(target, source)
vb.pbar.set_postfix({'file': basename(targetfile)})
if not isfile(sourcefile) and not isdir(sourcefile):
vb.pbar_write(f'(Delete) attempting to delete "{targetfile}"', refer_debug=True)
if not dry_run:
send2trash(targetfile)
vb.pbar_write(f'{ws}Successful', refer_debug=True)
deleted_files.append(targetfile)
vb.pbar_update(1)
vb.pbar_close()
# Record number of files copied, replaced and deleted
vb.info(f'Copied {len(copied_files)} files')
vb.info(f'Replaced {len(replaced_files)} files')
vb.info(f'Deleted {len(deleted_files)} files')
vb.info(f'Unchanged {len(files_source) - len(copied_files) - len(replaced_files) - len(deleted_files)} files')
result = dict(copied=len(copied_files),
replaced=len(replaced_files),
deleted=len(deleted_files),
unchanged=len(files_source) - len(copied_files) - len(replaced_files) - len(deleted_files))
if update_log_table:
vb.debug('Attempting to update log table with results...')
update_dict = dict(target_size_after_bytes=pydoni.dirsize(target),
end_ts=datetime.datetime.utcnow(),
is_completed=True)
update_sql = pg.build_update(schema_name=directory_backup_table_schema,
table_name=directory_backup_table_name,
pkey_name='directory_backup_id',
pkey_value=directory_backup_id,
columns=list(update_dict.keys()),
values=list(update_dict.values()),
validate=True)
if not dry_run:
pg.execute(update_sql)
vb.debug(f'{ws}Successful')
vb.program_complete('Backup complete', start_ts=start_ts)
pydoni.__pydonicli_register__(dict(args=args, result=result, command_name='data.backup')) | 5,355,958 |
def count_datavolume(sim_dict):
"""
Extract from the given input the amount of time and the memory you need to
process each simulation through the JWST pipeline
:param dict sim_dict: Each key represent a set of simulations (a CAR activity for instance)
each value is a list of simulation. Each simulation being a dict with detailled info
:return: Return (mem, time) where mem and time are dictionnaries with the same keys as the input dict.
:rtype: Memory is in GB, Time is in hours
"""
mem_volume = {} # Total memory required in GB
time_volume = {} # Pipeline estimated run time in s
for (car, sim_list) in sim_dict.items():
memory = []
times = []
for sim in sim_list:
if "detector" in sim.keys():
if sim["detector"] in ["IMAGER", "ALL"]:
tmp = {
"integrations": sim["ima_integrations"],
"frames": sim["ima_frames"],
"exposures": sim["exposures"],
"subarray": sim["subarray"],
"NDither": sim["NDither"],
}
(ram, time, nb_exps) = get_prediction(tmp)
memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse
times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse
if sim["detector"] in ["ALL", "MRS"]:
tmp = {
"integrations": sim["LW_integrations"],
"frames": sim["LW_frames"],
"exposures": sim["exposures"],
"subarray": "FULL",
"NDither": sim["NDither"],
}
(ram, time, nb_exps) = get_prediction(tmp)
memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse
times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse
tmp = {
"integrations": sim["SW_integrations"],
"frames": sim["SW_frames"],
"exposures": sim["exposures"],
"subarray": "FULL",
"NDither": sim["NDither"],
}
(ram, time, nb_exps) = get_prediction(tmp)
memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse
times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse
else:
(ram, time, nb_exps) = get_prediction(sim)
memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse
times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse
mem_volume[car] = np.array(memory)
time_volume[car] = np.array(times)
return mem_volume, time_volume | 5,355,959 |
def get_exception_class_by_code(code):
"""Gets exception with the corresponding error code,
otherwise returns UnknownError
:param code: error code
:type code: int
:return: Return Exception class associated with the specified API error.
"""
code = int(code)
module_classes = inspect.getmembers(sys.modules[__name__], inspect.isclass)
exception_classes = (h[1] for h in module_classes
if is_valid_error_cls(h[1]))
exception_cls = None
for e_c in exception_classes:
if e_c.error_code == code:
exception_cls = e_c
break
if exception_cls is None:
exception_cls = UnknownError
return exception_cls | 5,355,960 |
def context():
"""Return an instance of the JIRA tool context."""
return dict() | 5,355,961 |
def plot_decision_boundary_distances(model, X, Y, feat_crosses=None, axis_lines=False, save=False):
"""
Plots decision boundary
Args:
model: neural network layer and activations in lambda function
X: Data in shape (num_of_examples x features)
feat_crosses: list of tuples showing which features to cross
axis_lines: Draw axis lines at x=0 and y=0(bool, default False)
save: flag to save plot image
"""
# first plot the data to see what is the size of the plot
plt.scatter(X[:, 0], X[:, 1], s=200, c=np.squeeze(Y))
# get the x and y range of the plot
x_ticks = plt.xticks()[0]
y_ticks = plt.yticks()[0]
plt.clf() # clear figure after getting size
# Generate a grid of points between min_x_point-0.5 and max_x_point+0.5 with 1000 points in between,
# similarly, for y points
xs = np.linspace(min(x_ticks) - 0.5, max(x_ticks) + 0.5, 1000)
ys = np.linspace(max(y_ticks) + 0.5, min(y_ticks) - 0.5, 1000)
xx, yy = np.meshgrid(xs, ys) # create data points
# Predict the function value for the whole grid
prediction_data = np.c_[xx.ravel(), yy.ravel()]
# add feat_crosses if provided
if feat_crosses:
for feature in feat_crosses:
prediction_data = np.c_[prediction_data, prediction_data[:, feature[0]] * prediction_data[:, feature[1]]]
Z = model(prediction_data)
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.style.use('seaborn-whitegrid')
c = plt.contour(xx, yy, Z, cmap='Blues') # draw a blue colored decision boundary
plt.title('Distances from Decision Boundary', size=18)
plt.xlabel('$x_1$', size=20)
plt.ylabel('$x_2$', size=20)
if axis_lines:
plt.axhline(0, color='black')
plt.axvline(0, color='black')
# color map 'cmap' maps 0 labeled data points to red and 1 labeled points to green
cmap = matplotlib.colors.ListedColormap(["red", "green"], name='from_list', N=None)
plt.scatter(X[:, 0], X[:, 1], s=200, c=np.squeeze(Y), marker='x', cmap=cmap) # s-> size of marker
points = X # data points from to which perpendicular lines are drawn
v = c.collections[0].get_paths()[0].vertices # returns two points from the decision line(visible start & end point)
P1 = np.expand_dims(np.asarray((v[0, 0], v[0, 1])), axis=0) # the visible start point of the line
P2 = np.expand_dims(np.asarray((v[-1, 0], v[-1, 1])), axis=0) # the visible end point of the line
inter_points, distances = point_on_line(P1, P2, points)
# combine the intersection points so that they're in the format required by `plt.plot` so
# each list item is:
# [(x_1,x_2), (y_1, y_2), len_of_line]
perpendicular_line_points = [list(zip(a, b))+[c] for a, b, c in zip(points, inter_points, distances)]
# plot and label perpendicular lines to the decision boundary one by one
# labelLine function comes from https://github.com/cphyc/matplotlib-label-lines/tree/master/labellines/baseline
for line in perpendicular_line_points:
x_points = np.clip(line[0], a_min=-0.5, a_max=1.5) # clip lines going out of bounds of visible area
y_points = np.clip(line[1], a_min=-0.5, a_max=1.5)
len = line[2]
plt.plot(x_points, y_points, 'm--', label='{:.2f}'.format(len)) # print label to 2 decimal places
labelLine(plt.gca().get_lines()[-1], x= sum(x_points)/2) # label of the line should be centered, so (x_1+x_2)/2
if save:
plt.savefig('decision_boundary_with_distances.png', bbox_inches='tight')
plt.tight_layout()
plt.show() | 5,355,962 |
def dist_to_group(idx: int, group_type: str, lst):
"""
A version of group_count that allows for sorting with solo agents
Sometimes entities don't have immediately adjacent neighbors.
In that case, the value represents the distance to any neighbor, e.g
-1 means that an entity one to the left or right has a neighbor of that type.
Args:
idx (int):index in the list
group_type (str):group type we care about matching
lst ([type]): [description]
"""
my_count = group_count(idx, group_count, lst)
if my_count > 0:
return my_count
adjacent_counts = []
l_neighbor_count = dist_to_group(idx-1, group_type, lst) if idx > 0 else None
r_neighbor_count = dist_to_group(idx+1, group_type, lst) if idx < len(lst)-1 else None
for neighbor_count in (l_neighbor_count, r_neighbor_count):
if neighbor_count != 0:
if neighbor_count < 0 and neighbor_count is not None: #The neighbor doesn't have any next directly to it either
adjacent_counts.append(neighbor_count - 1)
else: #The neighbor does have one next to it!
adjacent_counts.append(neighbor_count)
return max(adjacent_counts) | 5,355,963 |
def label_encode(dataset, column):
"""
This will encode a binary categorical variable.
Column needs to be a string
"""
labelencoder_X = LabelEncoder()
dataset[column] = labelencoder_X.fit_transform(dataset[column])
return | 5,355,964 |
def get_nodes_rating(start: AnyStr,
end: AnyStr,
tenant_id: AnyStr,
namespaces: List[AnyStr]) -> List[Dict]:
"""
Get the rating by node.
:start (AnyStr) A timestamp, as a string, to represent the starting time.
:end (AnyStr) A timestamp, as a string, to represent the ending time.
:tenant_id (AnyStr) A string representing the tenant, only used by decorators.
:namespaces (List[AnyStr]) A list of namespaces accessible by the tenant.
Return the results of the query as a list of dictionary.
"""
qry = sa.text("""
SELECT frame_begin,
sum(frame_price) as frame_price,
node
FROM frames
WHERE frame_begin >= :start
AND frame_end <= :end
AND namespace != 'unspecified'
AND pod != 'unspecified'
AND namespace IN :namespaces
GROUP BY frame_begin, node
ORDER BY frame_begin, node
""").bindparams(bindparam('namespaces', expanding=True))
params = {
'start': start,
'end': end,
'tenant_id': tenant_id,
'namespaces': namespaces
}
return process_query(qry, params) | 5,355,965 |
def f(OPL,R):
""" Restoration function calculated from optical path length (OPL)
and from rational function parameter (R). The rational is multiplied
along all optical path.
"""
x = 1
for ii in range(len(OPL)):
x = x * (OPL[ii] + R[ii][2]) / (R[ii][0] * OPL[ii] + R[ii][1])
return x | 5,355,966 |
def test_add():
"""Test required to make the CI pass"""
assert 2 + 2 == 4 | 5,355,967 |
def _str_conv(number, rounded=False):
"""
Convenience tool to convert a number, either float or int into a string.
If the int or float is None, returns empty string.
>>> print(_str_conv(12.3))
12.3
>>> print(_str_conv(12.34546, rounded=1))
12.3
>>> print(_str_conv(None))
<BLANKLINE>
>>> print(_str_conv(1123040))
11.2e5
"""
if not number:
return str(' ')
if not rounded and isinstance(number, (float, int)):
if number < 100000:
string = str(number)
else:
exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1
divisor = 10 ** exponant
string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant)
elif rounded == 2 and isinstance(number, (float, int)):
if number < 100000:
string = '{0:.2f}'.format(number)
else:
exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1
divisor = 10 ** exponant
string = '{0:.2f}'.format(number / divisor) + 'e' + str(exponant)
elif rounded == 1 and isinstance(number, (float, int)):
if number < 100000:
string = '{0:.1f}'.format(number)
else:
exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1
divisor = 10 ** exponant
string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant)
else:
return str(number)
return string | 5,355,968 |
def load_img(img: Any):
"""
Load an image, whether it's from a URL, a file, an array, or an already
in-memory image.
"""
raise ValueError(f"Can not load object of type {type(img)} as image.") | 5,355,969 |
def configure_service():
"""Configure the GlusterFS filesystems"""
generate_etc_hosts()
modify_cassandra_yaml()
modify_jvm_options()
create_dirs()
add_cassandra_to_systemd() | 5,355,970 |
def build_eval_graph(input_fn, model_fn, hparams):
"""Build the evaluation computation graph."""
dataset = input_fn(None)
batch = dataset.make_one_shot_iterator().get_next()
batch_holder = {
"transform":
tf.placeholder(
tf.float32,
[1, 1, hparams.n_parts, hparams.n_dims + 1, hparams.n_dims + 1]),
"joint":
tf.placeholder(tf.float32, [1, 1, hparams.n_parts, hparams.n_dims]),
"point":
tf.placeholder(tf.float32, [1, 1, None, hparams.n_dims]),
"label":
tf.placeholder(tf.float32, [1, 1, None, 1]),
}
latent_holder, latent, occ = model_fn(batch_holder, None, None, "gen_mesh")
# Eval Summary
iou_holder = tf.placeholder(tf.float32, [])
best_holder = tf.placeholder(tf.float32, [])
tf.summary.scalar("IoU", iou_holder)
tf.summary.scalar("Best_IoU", best_holder)
return {
"batch_holder": batch_holder,
"latent_holder": latent_holder,
"latent": latent,
"occ": occ,
"batch": batch,
"iou_holder": iou_holder,
"best_holder": best_holder,
"merged_summary": tf.summary.merge_all(),
} | 5,355,971 |
def test_timeout():
"""
Test is the timeout exception is raised with proper message.
"""
lock = get_connection(1, 5, 15)
collection = str(random.random())
with pytest.raises(MongoLockTimeout) as excinfo:
with lock(collection):
for i in range(15):
with lock(collection):
assert lock.isLocked(collection) == False
assert 'timedout' in str(excinfo.value) | 5,355,972 |
def _encodeLength(length):
"""
Encode length as a hex string.
Args:
length: write your description
"""
assert length >= 0
if length < hex160:
return chr(length)
s = ("%x" % length).encode()
if len(s) % 2:
s = "0" + s
s = BinaryAscii.binaryFromHex(s)
lengthLen = len(s)
return chr(hex160 | lengthLen) + str(s) | 5,355,973 |
def check_mask(mask):
# language=rst
"""
Check if the 2d boolean mask is valid
:param mask: 2d boolean mask array
"""
if(jnp.any(mask) == False):
assert 0, 'Empty mask! Reduce num'
if(jnp.sum(mask)%2 == 1):
assert 0, 'Need masks with an even number! Choose a different num' | 5,355,974 |
def stemmer(stemmed_sent):
"""
Removes stop words from a tokenized sentence
"""
porter = PorterStemmer()
stemmed_sentence = []
for word in literal_eval(stemmed_sent):
stemmed_word = porter.stem(word)
stemmed_sentence.append(stemmed_word)
return stemmed_sentence | 5,355,975 |
def _queue_number_priority(v):
"""Returns the task's priority.
There's an overflow of 1 bit, as part of the timestamp overflows on the laster
part of the year, so the result is between 0 and 330. See _gen_queue_number()
for the details.
"""
return int(_queue_number_order_priority(v) >> 22) | 5,355,976 |
def test_generator_aovs(path):
"""Generate a function testing given `path`.
:param path: gproject path to test
:return: function
"""
def test_func(self):
"""test render pass render layer and AOV particularities
"""
assert path in g_parsed
p = g_parsed[path]
aov = grl_util.aov_node(p, 'RenderPass', 'Layer', 'Beauty')
self.assertIsInstance(aov, guerilla_parser.GuerillaNode)
self.assertEqual(aov.path, "|RenderPass|Layer|Input1")
rp_iter = (n for n in p.nodes if n.type == 'RenderPass')
for rp in rp_iter:
rl_iter = (n for n in rp.children if n.type == 'RenderLayer')
for rl in rl_iter:
for aov in rl.children:
self.assertEqual(aov.type, "LayerOut")
aov_2 = grl_util.aov_node(p, rp.name, rl.name,
aov.display_name)
self.assertIs(aov, aov_2)
return test_func | 5,355,977 |
def delimited_list(
expr: Union[str, ParserElement],
delim: Union[str, ParserElement] = ",",
combine: bool = False,
min: OptionalType[int] = None,
max: OptionalType[int] = None,
*,
allow_trailing_delim: bool = False,
) -> ParserElement:
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
If ``allow_trailing_delim`` is set to True, then the list may end with
a delimiter.
Example::
delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
if isinstance(expr, str_type):
expr = ParserElement._literalStringClass(expr)
dlName = "{expr} [{delim} {expr}]...{end}".format(
expr=str(expr.copy().streamline()),
delim=str(delim),
end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
)
if not combine:
delim = Suppress(delim)
if min is not None:
if min < 1:
raise ValueError("min must be greater than 0")
min -= 1
if max is not None:
if min is not None and max <= min:
raise ValueError("max must be greater than, or equal to min")
max -= 1
delimited_list_expr = expr + (delim + expr)[min, max]
if allow_trailing_delim:
delimited_list_expr += Opt(delim)
if combine:
return Combine(delimited_list_expr).set_name(dlName)
else:
return delimited_list_expr.set_name(dlName) | 5,355,978 |
def get_minion_node_ips(k8s_conf):
"""
Returns a list IP addresses to all configured minion hosts
:param k8s_conf: the configuration dict
:return: a list IPs
"""
out = list()
node_tuple_3 = get_minion_nodes_ip_name_type(k8s_conf)
for hostname, ip, node_type in node_tuple_3:
out.append(ip)
return out | 5,355,979 |
def main(argv):
"""
Main function to run strategic_svm.py. Set up SVM classifier, perform
and evaluate attack, deploy defense and perform strategic attack. Resutls
and adv. sample images are also saved on each task.
"""
# Parse arguments and store in model_dict
model_dict = svm_model_dict_create()
DR = model_dict['dim_red']
rev_flag = model_dict['rev']
strat_flag = 1
# Load dataset and create data_dict to store metadata
print('Loading data...')
dataset = model_dict['dataset']
if (dataset == 'MNIST') or (dataset == 'GTSRB'):
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(
model_dict)
img_flag = None
elif dataset == 'HAR':
X_train, y_train, X_test, y_test = load_dataset(model_dict)
img_flag = None
# TODO: 2 classes case
# if model_dict['classes'] == 2:
# X_train = X_train
data_dict = get_data_shape(X_train, X_test)
n_features = data_dict['no_of_features']
# Reshape dataset to have dimensions suitable for SVM
X_train_flat = X_train.reshape(-1, n_features)
X_test_flat = X_test.reshape(-1, n_features)
# Center dataset with mean of training set
mean = np.mean(X_train_flat, axis=0)
X_train_flat -= mean
X_test_flat -= mean
# Create a new model or load an existing one
clf = model_creator(model_dict, X_train_flat, y_train)
model_tester(model_dict, clf, X_test_flat, y_test)
# Assign parameters
n_mag = 25 # No. of deviations to consider
dev_list = np.linspace(0.1, 2.5, n_mag) # A list of deviations mag.
if dataset == 'MNIST':
rd_list = [784, 331, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10] # Reduced dimensions to use
# rd_list = [784]
elif dataset == 'HAR':
rd_list = [561, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]
# rd_list = [561]
n_rd = len(rd_list)
output_list = []
clear_flag = None
# Clear old output files
if clear_flag ==1:
abs_path_o = resolve_path_o(model_dict)
_, fname = file_create(model_dict)
os.remove(abs_path_o + fname + '.txt')
_, fname = file_create(model_dict, rd=1, strat=strat_flag, rev=rev_flag)
os.remove(abs_path_o + fname + '.txt')
# Test clf against adv. samples
print('Performing attack...')
if model_dict['classes'] != 2:
for i in range(n_mag):
X_adv, y_ini = mult_cls_atk(clf, X_test_flat, mean, dev_list[i])
output_list.append(acc_calc_all(clf, X_adv, y_test, y_ini))
if img_flag != None:
save_svm_images(model_dict, data_dict, X_test, X_adv,
dev_list[i])
fname = print_svm_output(model_dict, output_list, dev_list)
# subprocess.call(["gnuplot -e \"filename='{}.png'; in_name='{}.txt'\" gnu_in_loop.plg".format(fname,fname)], shell=True)
# else:
# # TODO: 2 classes
# print('TODO')
# Retrain defense and strategic attack
print('--------------Retrain Defense & Strategic Attack--------------')
for rd in rd_list:
output_list = []
print('Reduced dimensions: {}'.format(rd))
# Dimension reduce dataset and reshape
X_train_dr, _, dr_alg = dr_wrapper(
X_train_flat, X_test_flat, DR, rd, y_train, rev=rev_flag)
# With dimension reduced dataset, create new model or load existing one
clf = model_creator(model_dict, X_train_dr, y_train, rd, rev_flag)
# Modify classifier to include transformation matrix
clf = model_transform(model_dict, clf, dr_alg)
model_tester(model_dict, clf, X_test_flat, y_test, rd, rev_flag)
# rev_flag = 1
# model_dict['rev'] = rev_flag
# # Dimension reduce dataset and reshape
# X_train_dr, _, dr_alg = dr_wrapper(
# X_train_flat, X_test_flat, DR, rd, y_train, rev=rev_flag)
#
# # With dimension reduced dataset, create new model or load existing one
# clf_1 = model_creator(model_dict, X_train_dr, y_train, rd, rev_flag)
# # Modify classifier to include transformation matrix
# clf_1 = model_transform(model_dict, clf_1, dr_alg)
# # Test model on original data
# model_tester(model_dict, clf_1, X_test_flat, y_test, rd, rev_flag)
#
# print clf_1.coef_[0]-clf.coef_[0]
# print np.linalg.norm(clf_1.coef_[0]), np.linalg.norm(clf.coef_[0])
# print np.dot(clf_1.coef_[0],clf.coef_[0])/(np.linalg.norm(clf_1.coef_[0])*np.linalg.norm(clf.coef_[0]))
# Strategic attack: create new adv samples based on retrained clf
print('Performing strategic attack...')
for i in range(n_mag):
X_adv, y_ini = mult_cls_atk(clf, X_test_flat, mean, dev_list[i])
output_list.append(acc_calc_all(clf, X_adv, y_test, y_ini))
if img_flag != None:
save_svm_images(model_dict, data_dict, X_test_flat, X_adv,
dev_list[i], rd, dr_alg, rev_flag)
fname = print_svm_output(model_dict, output_list, dev_list, rd,
strat_flag, rev_flag)
# fname = dataset +'_' + fname
subprocess.call(
["gnuplot -e \"mname='{}'\" gnu_in_loop.plg".format(fname)], shell=True) | 5,355,980 |
def calculate_ri(column):
"""
Function that calculates radiant intensity
"""
return float(sc.h * sc.c / 1e-9 * np.sum(column)) | 5,355,981 |
def find_kw_in_lines(kw, lines, addon_str=' = '):
"""
Returns the index of a list of strings that had a kw in it
Args:
kw: Keyword to find in a line
lines: List of strings to search for the keyword
addon_str: String to append to your key word to help filter
Return:
i: Integer of the index of a line containing a kw. -1 otherwise
"""
str_temp = '{}' + addon_str
for i, line in enumerate(lines):
s = str_temp.format(kw)
uncommented = line.strip('#')
if s in uncommented:
if s[0] == uncommented[0]:
break
# No match
if i == len(lines) - 1:
i = -1
return i | 5,355,982 |
def delete_group(current_session, groupname):
"""
Deletes a group
"""
projects_to_purge = gp.get_group_projects(current_session, groupname)
remove_projects_from_group(current_session, groupname, projects_to_purge)
gp.clear_users_in_group(current_session, groupname)
gp.clear_projects_in_group(current_session, groupname)
gp.delete_group(current_session, groupname)
return {"result": "success"} | 5,355,983 |
def label_smoothed_nll_loss(lprobs, target, epsilon: float = 1e-8, ignore_index=None):
"""Adapted from fairseq
Parameters
----------
lprobs
Log probabilities of amino acids per position
target
Target amino acids encoded as integer indices
epsilon
Smoothing factor between 0 and 1, by default 1e-8
ignore_index, optional
Amino acid (encoded as integer) to ignore, by default None
Returns
-------
Negative log-likelihood loss
"""
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss | 5,355,984 |
def ParseFieldDefRequest(post_data, config):
"""Parse the user's HTML form data to update a field definition."""
field_name = post_data.get('name', '')
field_type_str = post_data.get('field_type')
# TODO(jrobbins): once a min or max is set, it cannot be completely removed.
min_value_str = post_data.get('min_value')
try:
min_value = int(min_value_str)
except (ValueError, TypeError):
min_value = None
max_value_str = post_data.get('max_value')
try:
max_value = int(max_value_str)
except (ValueError, TypeError):
max_value = None
regex = post_data.get('regex')
needs_member = 'needs_member' in post_data
needs_perm = post_data.get('needs_perm', '').strip()
grants_perm = post_data.get('grants_perm', '').strip()
notify_on_str = post_data.get('notify_on')
if notify_on_str in config_svc.NOTIFY_ON_ENUM:
notify_on = config_svc.NOTIFY_ON_ENUM.index(notify_on_str)
else:
notify_on = 0
is_required = 'is_required' in post_data
is_multivalued = 'is_multivalued' in post_data
field_docstring = post_data.get('docstring', '')
choices_text = post_data.get('choices', '')
applicable_type = post_data.get('applicable_type', '')
applicable_predicate = '' # TODO(jrobbins): placeholder for future feature
revised_labels = _ParseChoicesIntoWellKnownLabels(
choices_text, field_name, config)
return ParsedFieldDef(
field_name, field_type_str, min_value, max_value, regex,
needs_member, needs_perm, grants_perm, notify_on, is_required,
is_multivalued, field_docstring, choices_text, applicable_type,
applicable_predicate, revised_labels) | 5,355,985 |
async def get_museum_session_key() -> str:
"""
Retrieve a session key for the MuseumPlus service, generating a new
one if necessary.
:returns: Session key
"""
# We might have an active session key stored locally.
key_path = get_session_key_file_path()
try:
session_time = key_path.stat().st_mtime
session_key = key_path.read_text()
except FileNotFoundError:
# Create the parent directories and/or file if they don't exist
os.makedirs(key_path.parent, exist_ok=True)
session_time = time.time()
session_key = await generate_museum_session_key(previous_key=None)
# Regenerate a session key if it *could* have expired.
# This is done because the alternative is to test the session key for
# validity each time a session is created, and this would create
# more useless requests than regenerating a session key after the worker
# has stayed dormant for a while; a far more unlikely scenario.
maybe_expired = time.time() - SESSION_KEY_REGENERATE_TIMEOUT > session_time
if maybe_expired:
session_key = await generate_museum_session_key(
previous_key=session_key
)
return session_key | 5,355,986 |
def sftp_fail_cases(s1, s2):
"""
Test to verify negative scenarios
Description : Verify the negative scenarios when user source
path of the file is invalid and when destination
path is invalid
"""
print("\n############################################\n")
print("Verify SFTP negative test cases")
print("\n############################################\n")
switch1 = s1
switch2 = s2
# opsuccess = False
copy = "copy sftp"
username = "root"
hostip = get_switch_ip(switch2)
srcpath = "/etc/ssh/sshd_config"
destpath = "/home/admin/"
destfile = "trial_file"
invalidsrcpath = "/invalid/src_path"
srcfailmsg = "not found"
invaliddestpath = "/invalid/dest_file"
destfailmsg = "No such file or directory"
# Enable SFTP server on SW2
print("Enable SFTP server on switch2")
sftp_server_config_test(switch2, True)
# Invalid source path test
cmd = copy + " " + username + " " + hostip + " " + \
invalidsrcpath + " " + destpath + destfile
out = switch1(cmd)
assert srcfailmsg in out, \
"Verify invalid source path test - FAILED"
print("Verify invalid source path test - SUCCESS")
# Invalid destination path test
cmd = copy + " " + username + " " + hostip + \
" " + srcpath + " " + invaliddestpath
out = switch1(cmd)
assert destfailmsg in out, \
"Verify invalid destination path test - FAILED"
print("Verify invalid destination path test - SUCCESS") | 5,355,987 |
def parse_date(regexen, date_str):
"""
Parse a messy string into a granular date
`regexen` is of the form [ (regex, (granularity, groups -> datetime)) ]
"""
if date_str:
for reg, (gran, dater) in regexen:
m = re.match(reg, date_str)
if m:
try:
return gran, dater(m.groups())
except ValueError:
return 0, None
return 0, None | 5,355,988 |
def floatScrollBar(*args, **kwargs):
"""
Create a scroll bar control that accepts only float values and is bound by a minimum and maximum value.
Returns: `string` Full path name to the control.
"""
pass | 5,355,989 |
def create_web_config(new_dir, filename):
"""
The function searches for the specified *filename* in *config* directory of this module
and, if that file exists, copies it to the *new_dir* directory.
Args:
new_dir (str): Config file *filename* will be created in this directory.
filename (str): Config file to copy.
"""
with open(new_dir + '/' + filename, 'wb') as f:
f.write(pkg_resources.resource_string(app_name, '/config/' + filename)) | 5,355,990 |
def scrape_inmates(example_url, force):
"""
Scrape Dane County inmate database
"""
d = DaneCountyInmatesDriver()
if example_url:
inmates = [example_url]
else:
inmates = d.inmates()
path = f"./inmates/13"
os.makedirs(path, exist_ok=True)
long_ago = datetime.now() - timedelta(days=7)
for url in inmates:
# The last digits are the "name number", whatever that means
name_number = re.search("\d+", url).group()
inmate_json = f'{path}/{name_number}.json'
failure_json = f'{path}/{name_number}.failure'
if os.path.exists(failure_json) and not force:
click.echo(f"Inmate {failure_json} already failed (use --force to retry)")
elif not os.path.exists(inmate_json) or \
datetime.fromtimestamp(os.path.getmtime(inmate_json)) < long_ago:
details = d.inmate_details(url)
if details:
with open(inmate_json, 'w') as f:
json.dump(details, f)
else:
click.echo(f"Inmate details failed at {url} (use --force to retry)")
Path(failure_json).touch()
d.close() | 5,355,991 |
def pytest_addoption(parser):
"""Add an option to run tests against a real AWS account instead of the Stubber."""
parser.addoption(
"--use-real-aws-may-incur-charges", action="store_true", default=False,
help="Connect to real AWS services while testing. WARNING: THIS MAY INCUR "
"CHARGES ON YOUR ACCOUNT!"
) | 5,355,992 |
def PreNotebook(*args, **kwargs):
"""PreNotebook() -> Notebook"""
val = _controls_.new_PreNotebook(*args, **kwargs)
return val | 5,355,993 |
def handle_cf_removed_obj_types(instance, action, pk_set, **kwargs):
"""
Handle the cleanup of old custom field data when a CustomField is removed from one or more ContentTypes.
"""
if action == 'post_remove':
instance.remove_stale_data(ContentType.objects.filter(pk__in=pk_set)) | 5,355,994 |
def user_voted(message_id: int, user_id: int) -> bool:
"""
CHECK IF A USER VOTED TO A DETECTION REPORT
"""
return bool(
c.execute(
"""
SELECT *
FROM reports
WHERE message_id=? AND user_id=?
""",
(message_id, user_id),
).fetchone()
) | 5,355,995 |
def test_from_date(months):
"""Test the from_date method.
"""
assert ttcal.Month.from_date(date(2012, 7, 10)) == months[2]
assert ttcal.Month.from_date(date(2012, 10, 20)) == months[1] | 5,355,996 |
def get_values(wsdl_url, site_code, variable_code, start=None, end=None,
suds_cache=("default",), timeout=None, user_cache=False):
"""
Retrieves site values from a WaterOneFlow service using a GetValues request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
site_code : str
Site code of the site you'd like to get values for. Site codes MUST
contain the network and be of the form <network>:<site_code>, as is
required by WaterOneFlow.
variable_code : str
Variable code of the variable you'd like to get values for. Variable
codes MUST contain the network and be of the form
<vocabulary>:<variable_code>, as is required by WaterOneFlow.
start : ``None`` or datetime (see :ref:`dates-and-times`)
Start of the query datetime range. If omitted, data from the start of
the time series to the ``end`` timestamp will be returned (but see caveat,
in note below).
end : ``None`` or datetime (see :ref:`dates-and-times`)
End of the query datetime range. If omitted, data from the ``start``
timestamp to end of the time series will be returned (but see caveat,
in note below).
suds_cache : ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
timeout : int or float
suds SOAP URL open timeout (seconds).
If unspecified, the suds default (90 seconds) will be used.
user_cache : bool
If False (default), use the system temp location to store cache WSDL and
other files. Use the default user ulmo directory if True.
Returns
-------
site_values : dict
a python dict containing values
Notes
-----
If both ``start`` and ``end`` parameters are omitted, the entire time series
available will typically be returned. However, some service providers will return
an error if either start or end are omitted; this is specially true for services
hosted or redirected by CUAHSI via the CUAHSI HydroPortal, which have a 'WSDL' url
using the domain http://hydroportal.cuahsi.org. For HydroPortal, a start datetime
of '1753-01-01' has been known to return valid results while catching the oldest
start times, though the response may be broken up into chunks ('paged').
"""
suds_client = _get_client(wsdl_url, suds_cache, timeout, user_cache)
# Note from Emilio:
# Not clear if WOF servers really do handle time zones (time offsets or
# "Z" in the iso8601 datetime strings. In the past, I (Emilio) have
# passed naive strings to GetValues(). if a datetime object is passed to
# this ulmo function, the isodate code above will include it in the
# resulting iso8601 string; if not, no. Test effect of dt_isostr having
# a timezone code or offset, vs not having it (the latter, naive dt
# strings, is what I've been using all along)
# the interpretation of start and end time zone is server-dependent
start_dt_isostr = None
end_dt_isostr = None
if start is not None:
start_datetime = util.convert_datetime(start)
start_dt_isostr = isodate.datetime_isoformat(start_datetime)
if end is not None:
end_datetime = util.convert_datetime(end)
end_dt_isostr = isodate.datetime_isoformat(end_datetime)
waterml_version = _waterml_version(suds_client)
response = suds_client.service.GetValues(
site_code, variable_code, startDate=start_dt_isostr,
endDate=end_dt_isostr)
response_buffer = io.BytesIO(util.to_bytes(response))
if waterml_version == '1.0':
values = waterml.v1_0.parse_site_values(response_buffer)
elif waterml_version == '1.1':
values = waterml.v1_1.parse_site_values(response_buffer)
if not variable_code is None:
return list(values.values())[0]
else:
return values | 5,355,997 |
def GetDot1xInterfaces():
"""Retrieves attributes of all dot1x compatible interfaces.
Returns:
Array of dict or empty array
"""
interfaces = []
for interface in GetNetworkInterfaces():
if interface['type'] == 'IEEE80211' or interface['type'] == 'Ethernet':
if (interface['builtin'] and
'AppleThunderboltIPPort' not in interface['bus']):
interfaces.append(interface)
return interfaces | 5,355,998 |
def get_v_l(mol, at_name, r_ea):
"""
Returns list of the l's, and a nconf x nl array, v_l values for each l: l= 0,1,2,...,-1
"""
vl = generate_ecp_functors(mol._ecp[at_name][1])
v_l = np.zeros([r_ea.shape[0], len(vl)])
for l, func in vl.items(): # -1,0,1,...
v_l[:, l] = func(r_ea)
return vl.keys(), v_l | 5,355,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.