content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def create_output(whole_exome_indel_list, whole_exome_snp_list, coding_exons_indel_list, coding_exons_snp_list):
"""
This function creates the output file.
:param whole_exome_indel_list: A list the values for a single output line.
:type whole_exome_indel_list: list
:param whole_exome_snp_list: A list the values for a single output line.
:type whole_exome_snp_list: list
:param coding_exons_indel_list: A list the values for a single output line.
:type coding_exons_indel_list: list
:param coding_exons_snp_list: A list the values for a single output line.
:type coding_exons_snp_list: list
:rtype: void
"""
with open(os.path.join(output_dir, output_file), 'w') as outfile_obj:
header_columns = ['Case', 'Number of bases', 'Truth total', 'TP', 'FP', 'FN',
'TN = TotalBases - (TP + FN + FP)', 'TotalNegative = TN + FP', 'NPA = TN/(Total Negative)',
'Precision', 'Recall']
# SNPs Whole Exome
outfile_obj.write('\tBenchmarking SNPs Whole Exome\n')
outfile_obj.write('\t'.join(header_columns) + '\n')
for snp in whole_exome_snp_list:
outfile_obj.write('\t'.join(snp) + '\n')
# INDELs Whole Exome
outfile_obj.write('\tBenchmarking INDELs Whole Exome\n')
outfile_obj.write('\t'.join(header_columns) + '\n')
for indels in whole_exome_indel_list:
for indel in indels:
outfile_obj.write('\t'.join(indel) + '\n')
# SNPs Coding Exons
outfile_obj.write('\tBenchmarking SNPs Coding Exons\n')
outfile_obj.write('\t'.join(header_columns) + '\n')
for snp in coding_exons_snp_list:
outfile_obj.write('\t'.join(snp) + '\n')
# INDELs Coding Exons
outfile_obj.write('\tBenchmarking INDELs Coding Exons\n')
outfile_obj.write('\t'.join(header_columns) + '\n')
for indels in coding_exons_indel_list:
for indel in indels:
outfile_obj.write('\t'.join(indel) + '\n')
# screen output
print 'Output file created. It can be found at', os.path.join(output_dir, output_file) | 5,352,700 |
def get_offset(sample_time):
"""
Find simple offsett values.
During the sample time of this function
the BBB with the magnetometer on should be rotated
along all axis.
sample_time is in seconds
"""
start = time.clock()
mag_samples = []
mag_max = [0,0,0]
mag_min = [0,0,0]
offset = [0,0,0]
while (time.clock() - start) < sample_time:
raw_data = get_raw_mag()
mag_samples.append(transform_readable(raw_data))
# blink leds to signify timespan
while mag_samples != []:
a = mag_samples.pop()
# find maximum, minimum Values
for i in range(3):
if (a[i] > mag_max[i]):
mag_max[i] = a[i]
if (a[i] < mag_max[i]):
mag_min[i] = a[i]
#print(mag_max)
#print(mag_min)
# calculate offset from Extrema
for i in range(3):
offset[i] = (mag_max[i] + mag_min[i])/2
return offset | 5,352,701 |
def get_zips(directory: str) -> list:
"""
Return a the ZIP from a specified directory after running
some sanity checks
"""
zips = {}
for file in [os.path.join(dp, file) for dp, dn, fn in os.walk(directory) for file in fn]:
if file.split('.')[-1] != 'zip':
continue
zip_name = file.split('/')[-1]
try:
version, buildtype, device, builddate = get_metadata_from_zip(zip_name)
except IndexError:
continue
if buildtype.lower() not in ALLOWED_BUILDTYPES:
continue
if version not in ALLOWED_VERSIONS:
continue
if device in zips:
if get_date_from_zip(zips[device]) > builddate:
continue
zips[device] = zip_name
data = list(zips.values())
data.sort()
return data | 5,352,702 |
def get_multimode_2d_dist(num_modes: int = 1, scale: float = 1.0):
"""Get a multimodal distribution of Gaussians."""
angles = jnp.linspace(0, jnp.pi * 2, num_modes + 1)
angles = angles[:-1]
x, y = jnp.cos(angles) * scale / 2., jnp.sin(angles) * scale / 2.
loc = jnp.array([x, y]).T
scale = jnp.ones((num_modes, 2)) * scale / 10.
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=jnp.ones((num_modes,)) / num_modes),
components_distribution=tfd.MultivariateNormalDiag(
loc=loc, scale_diag=scale)) | 5,352,703 |
def residual_squared_error(data_1, data_2):
"""
Calculation the residual squared error between two arrays.
Parameters
----------
data: numpy array
Data
calc: numpy array
Calculated values
Return
------
rse: float
residual squared error
"""
RSS = np.sum(np.square(data_1 - data_2))
rse = np.sqrt(RSS / (len(data_1) - 2))
return rse | 5,352,704 |
def by_index(pot):
""" Build a new potential where the keys of the potential dictionary
correspond to the indices along values of n-dimensional grids,
rather than, possibly, the coordinate values of the grids themselves.
Key Transformation:
((grid_val_i, grid_val_j, ...)_i,) -> ((i, j, ...)_i,)
:param pot: potential along a coordinate
:type pot: dict[tuple(float)] = float
:rtype: dict[tuple(int)] = float
"""
pot_keys = list(pot.keys())
dim = dimension(pot)
remap_dcts = []
for i in range(dim):
_coords = sorted(list(set(lst[i] for lst in pot_keys)))
_idxs = list(range(len(_coords)))
remap_dcts.append(dict(zip(_coords, _idxs)))
new_dct = {}
for keys in pot_keys:
new_tup = ()
for i, val in enumerate(keys):
new_tup += (remap_dcts[i][val],)
new_dct[new_tup] = pot[keys]
return new_dct | 5,352,705 |
def current_user():
"""Получить текущего пользователя или отредактировать профиль"""
user = get_user_from_request()
if request.method == "POST":
json = request.get_json()
user.email = json.get("email", user.email)
user.name = json.get("name", user.name)
user.about = sanitize(json.get("about", user.about))
user.birthday = json.get("birthday", user.birthday)
if "avatar" in json:
content = Content.get_or_none(Content.id == json["avatar"])
if content:
if not content.is_image:
return errors.user_avatar_is_not_image()
elif content.size > 1024 * 500: # 500kb
return errors.user_avatar_too_large()
else:
user.avatar = content
user.save()
user = User.get(User.id == user.id)
return jsonify({"success": 1, "user": user.to_json_with_email()}) | 5,352,706 |
def writelines_infile(filename,lines):
"""Writelines in the specific file"""
with open(filename,"w") as file:
file.writelines(lines) | 5,352,707 |
def test_impossible_traveler_det_distance_bad_dest_coords(mocker):
"""
Given:
Coordinations lists to calculate distances between, when the two are different
When:
Calculating impossible traveler distances
Then
Raise an error
"""
with pytest.raises(ValueError) as e:
verify_coords(BAD_ARGS)
if not e:
assert False | 5,352,708 |
async def async_union_polygons(bal_name, geom_list):
"""union a set of polygons & return the resulting multipolygon"""
start_time = datetime.now()
big_poly = unary_union(geom_list)
print(f"\t - {bal_name} : set of polygons unioned: {datetime.now() - start_time}")
return big_poly | 5,352,709 |
async def test_websocket_get_triggers(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected triggers from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": "light",
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"platform": "device",
"domain": "light",
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
triggers = msg["result"]
assert _same_lists(triggers, expected_triggers) | 5,352,710 |
def _error_text(because: str, text: str, backend: usertypes.Backend) -> str:
"""Get an error text for the given information."""
other_backend, other_setting = _other_backend(backend)
if other_backend == usertypes.Backend.QtWebKit:
warning = ("<i>Note that QtWebKit hasn't been updated since "
"July 2017 (including security updates).</i>")
suffix = " (not recommended)"
else:
warning = ""
suffix = ""
return ("<b>Failed to start with the {backend} backend!</b>"
"<p>qutebrowser tried to start with the {backend} backend but "
"failed because {because}.</p>{text}"
"<p><b>Forcing the {other_backend.name} backend{suffix}</b></p>"
"<p>This forces usage of the {other_backend.name} backend by "
"setting the <i>backend = '{other_setting}'</i> option "
"(if you have a <i>config.py</i> file, you'll need to set "
"this manually). {warning}</p>".format(
backend=backend.name, because=because, text=text,
other_backend=other_backend, other_setting=other_setting,
warning=warning, suffix=suffix)) | 5,352,711 |
def COUNT(condition: pd.DataFrame, n: int):
"""the number of days fits the 'condition' in the past n days
Args:
condition (pd.DataFrame): dataframe index by date time(level 0) and asset(level 1), containing bool values
n (int): the number of past days
"""
return condition.rolling(n, center=False, min_periods=n).sum() | 5,352,712 |
def Mat33_nrow():
"""Mat33_nrow() -> int"""
return _simbody.Mat33_nrow() | 5,352,713 |
def test_build_instru23():
"""USE_PL=0 PL_OVERLOAD_NEW_DELETE=0"""
build_target("testprogram", test_build_instru23.__doc__) | 5,352,714 |
def isstruct(ob):
""" isstruct(ob)
Returns whether the given object is an SSDF struct.
"""
if hasattr(ob, '__is_ssdf_struct__'):
return bool(ob.__is_ssdf_struct__)
else:
return False | 5,352,715 |
def where_between(field_name, start_date, end_date):
"""
Return the bit of query for the dates interval.
"""
str = """ {0} between date_format('{1}', '%%Y-%%c-%%d %%H:%%i:%%S')
and date_format('{2}', '%%Y-%%c-%%d 23:%%i:%%S')
""" .format( field_name,
start_date.strftime("%Y-%m-%d %H:%M:%S"),
end_date.strftime("%Y-%m-%d %H:%M:%S"))
return str | 5,352,716 |
def to_console_formatted_string(data: dict) -> str:
"""..."""
def make_line(key: str) -> str:
if key.startswith('__cauldron_'):
return ''
data_class = getattr(data[key], '__class__', data[key])
data_type = getattr(data_class, '__name__', type(data[key]))
value = '{}'.format(data[key])[:250].replace('\n', '\n ')
if value.find('\n') != -1:
value = '\n{}'.format(value)
return '+ {name} ({type}): {value}'.format(
name=key,
type=data_type,
value=value
)
keys = list(data.keys())
keys.sort()
lines = list(filter(
lambda line: len(line) > 0,
[make_line(key) for key in keys]
))
return '\n'.join(lines) | 5,352,717 |
def cluster_instance_get_info_ajax(request, c_id):
"""
get cluster instance status
"""
dic = {"res": True, "info":None, "err":None}
instance_id = request.GET.get("instance_id")
require_vnc = request.GET.get("require_vnc")
if require_vnc == "true":
require_vnc = True
else:
require_vnc = False
if instance_id.isdecimal():
instance_id = int(instance_id)
instance_info = get_cluster_instance_info(request.user, instance_id,require_vnc=require_vnc)
if not instance_info:
raise Http404
dic["info"] = {"status":instance_info["status"], "status_name":instance_info["status_name"], "vnc_url":instance_info["vnc_url"]}
else:
dic["res"] = False
dic["err"] = "Invalid ID"
return JsonResponse(dic) | 5,352,718 |
def timeit(verbose=False):
"""
Time functions via decoration. Optionally output time to stdout.
Parameters:
-----------
verbose : bool
Example Usage:
>>> @timeit(verbose=True)
>>> def foo(*args, **kwargs): pass
"""
def _timeit(f):
@wraps(f)
def wrapper(*args, **kwargs):
if verbose:
start = time.time()
res = f(*args, **kwargs)
runtime = time.time() - start
print(f'{f.__name__!r} in {runtime:.4f} s')
else:
res = f(*args, **kwargs)
return res
return wrapper
return _timeit | 5,352,719 |
async def _iter_reference_values(
client: redis.ResourceClient,
index: redis.ResourceIndex,
key: _RedisKeyT,
*,
window_size: int = DEFAULT_WINDOW_SIZE,
match: typing.Optional[str] = None,
) -> typing.AsyncIterator[typing.Iterator[typing.Optional[bytes]]]:
"""Asynchronously iterate over slices of the values referenced by a REFERENCE set."""
connection = client.get_connection(index)
async for window in _iter_reference_keys(client, key, window_size=window_size, match=match):
yield await connection.mget(*window) | 5,352,720 |
def signal_period(peaks, sampling_rate=1000, desired_length=None,
interpolation_order="cubic"):
"""Calculate signal period from a series of peaks.
Parameters
----------
peaks : list, array, DataFrame, Series or dict
The samples at which the peaks occur. If an array is passed in, it is
assumed that it was obtained with `signal_findpeaks()`. If a DataFrame
is passed in, it is assumed it is of the same length as the input
signal in which occurrences of R-peaks are marked as "1", with such
containers obtained with e.g., ecg_findpeaks() or rsp_findpeaks().
sampling_rate : int
The sampling frequency of the signal that contains peaks (in Hz, i.e.,
samples/second). Defaults to 1000.
desired_length : int
By default, the returned signal rate has the same number of elements as
the raw signal. If set to an integer, the returned signal rate will be
interpolated between peaks over `desired_length` samples. Has no
effect if a DataFrame is passed in as the `signal` argument. Defaults
to None.
interpolation_order : str
Order used to interpolate the rate between peaks. See
`signal_interpolate()`.
Returns
-------
array
A vector containing the period.
See Also
--------
signal_findpeaks, signal_fixpeaks, signal_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000,
>>> frequency=1)
>>> info = nk.signal_findpeaks(signal)
>>>
>>> rate = nk.signal_rate(peaks=info["Peaks"])
>>> nk.signal_plot(rate)
"""
peaks, desired_length = _signal_formatpeaks_sanitize(peaks, desired_length)
# Sanity checks.
if len(peaks) <= 3:
print("NeuroKit warning: _signal_formatpeaks(): too few peaks detected"
" to compute the rate. Returning empty vector.")
return np.full(desired_length, np.nan)
# Calculate period in sec, based on peak to peak difference and make sure
# that rate has the same number of elements as peaks (important for
# interpolation later) by prepending the mean of all periods.
period = np.ediff1d(peaks, to_begin=0) / sampling_rate
period[0] = np.mean(period[1:])
# Interpolate all statistics to desired length.
if desired_length != np.size(peaks):
period = signal_interpolate(peaks, period,
desired_length=desired_length,
method=interpolation_order)
return period | 5,352,721 |
def model_co_group_by_key_tuple(email_list, phone_list, output_path):
"""Applying a CoGroupByKey Transform to a tuple.
URL: https://cloud.google.com/dataflow/model/group-by-key
"""
import google.cloud.dataflow as df
from google.cloud.dataflow.utils.options import PipelineOptions
p = df.Pipeline(options=PipelineOptions())
# [START model_group_by_key_cogroupbykey_tuple]
# Each data set is represented by key-value pairs in separate PCollections.
# Both data sets share a common key type (in this example str).
# The email_list contains values such as: ('joe', '[email protected]') with
# multiple possible values for each key.
# The phone_list contains values such as: ('mary': '111-222-3333') with
# multiple possible values for each key.
emails = p | df.Create('email', email_list)
phones = p | df.Create('phone', phone_list)
# The result PCollection contains one key-value element for each key in the
# input PCollections. The key of the pair will be the key from the input and
# the value will be a dictionary with two entries: 'emails' - an iterable of
# all values for the current key in the emails PCollection and 'phones': an
# iterable of all values for the current key in the phones PCollection.
# For instance, if 'emails' contained ('joe', '[email protected]') and
# ('joe', '[email protected]'), then 'result' will contain the element
# ('joe', {'emails': ['[email protected]', '[email protected]'], 'phones': ...})
result = {'emails': emails, 'phones': phones} | df.CoGroupByKey()
def join_info((name, info)):
return '; '.join(['%s' % name,
'%s' % ','.join(info['emails']),
'%s' % ','.join(info['phones'])])
contact_lines = result | df.Map(join_info)
# [END model_group_by_key_cogroupbykey_tuple]
contact_lines | df.io.Write(df.io.TextFileSink(output_path))
p.run() | 5,352,722 |
def get_recommendation(anime_name, cosine_sim, clean_anime, anime_index):
"""
Getting pairwise similarity scores for all anime in the data frame.
The function returns the top 10 most similar anime to the given query.
"""
idx = anime_index[anime_name]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[0:11]
anime_indices = [i[0] for i in sim_scores]
result = clean_anime[['name']].iloc[anime_indices].drop(idx)
return result | 5,352,723 |
def get_nwb_metadata(experiment_id):
"""
Collects metadata based on the experiment id and converts the weight to a float.
This is needed for further export to nwb_converter.
This function also validates, that all metadata is nwb compatible.
:param experiment_id: The experiment id given by the user.
:return: Final nwb metadata to be passed on.
:rtype: dict
"""
metadata = get_raw_nwb_metadata(experiment_id)
# nwb_converter unfortunately needs the weight to be a float in kg.
metadata["Subject"]["weight"] = convert_weight(metadata["Subject"]["weight"])
if validate_pynwb_data(metadata):
return metadata
else:
raise Exception("Could not validate nwb file.") | 5,352,724 |
def string_to_value_error_mark(string: str) -> Tuple[float, Union[float, None], str]:
"""
Convert string to float and error.
Parameters
----------
string : str
DESCRIPTION.
Returns
-------
value : float
Value.
error : float
Error.
"""
value, error, mark = None, None, ""
ind_1 = string.find("(")
s_sigma = ""
if value == ".":
pass
elif ind_1 != -1:
ind_2 = string.find(")")
if ind_2 > ind_1:
s_sigma = string[(ind_1+1):ind_2]
if not(s_sigma.isdigit()):
s_sigma = ""
str_1 = string.split("(")[0]
value = float(str_1)
mark = string[ind_2+1:].strip()
if s_sigma != "":
s_h = "".join(["0" if _h.isdigit() else _h for _h in
str_1[:-len(s_sigma)]])
error = abs(float(s_h+s_sigma))
else:
error = 0.
else:
try:
value = float(string)
except ValueError:
value = None
return value, error, mark | 5,352,725 |
def _children_with_tags(element, tags):
"""Returns child elements of the given element whose tag is in a given list.
Args:
element: an ElementTree.Element.
tags: a list of strings that are the tags to look for in child elements.
Returns:
an iterable of ElementTree.Element instances, which are the children of
the input element whose tags matched one of the elements of the list.
"""
return itertools.chain(*(_children_with_tag(element, tag) for tag in tags)) | 5,352,726 |
def train_and_eval():
"""Train and evaluate StackOver NWP task."""
logging.info('Show FLAGS for debugging:')
for f in HPARAM_FLAGS:
logging.info('%s=%s', f, FLAGS[f].value)
hparam_dict = collections.OrderedDict([
(name, FLAGS[name].value) for name in HPARAM_FLAGS
])
if FLAGS.experiment_type == 'private':
# Evaluate on StackOverflow
train_dataset_computation, train_set, validation_set, test_set = (
_preprocess_data('stackoverflow_private', FLAGS.vocab_size,
FLAGS.num_oov_buckets, FLAGS.sequence_length,
FLAGS.num_validation_examples, FLAGS.client_batch_size,
FLAGS.client_epochs_per_round,
FLAGS.max_elements_per_user))
elif FLAGS.experiment_type == 'public_SO' or FLAGS.experiment_type == 'stackoverflow_SGD':
# Evaluate on StackOverflow
_, train_set_private, _, _ = (
_preprocess_data('stackoverflow_private', FLAGS.vocab_size,
FLAGS.num_oov_buckets, FLAGS.sequence_length,
FLAGS.num_validation_examples, FLAGS.client_batch_size,
FLAGS.client_epochs_per_round,
FLAGS.max_elements_per_user))
train_dataset_computation, train_set, validation_set, test_set = (
_preprocess_data('stackoverflow_public', FLAGS.vocab_size,
FLAGS.num_oov_buckets, FLAGS.sequence_length,
FLAGS.num_validation_examples, FLAGS.client_batch_size,
FLAGS.client_epochs_per_round,
FLAGS.max_elements_per_user))
client_ids_size = int(100)
training_set_client_ids = train_set.client_ids[:client_ids_size]
elif FLAGS.experiment_type == 'warmstart':
#Evaluate on StackOverflow
train_dataset_computation, train_set, validation_set, test_set = (
_preprocess_data('stackoverflow_private', FLAGS.vocab_size,
FLAGS.num_oov_buckets, FLAGS.sequence_length,
FLAGS.num_validation_examples, FLAGS.client_batch_size,
FLAGS.client_epochs_per_round,
FLAGS.max_elements_per_user))
input_spec = train_dataset_computation.type_signature.result.element
metrics = _get_metrics(FLAGS.vocab_size, FLAGS.num_oov_buckets)
if FLAGS.use_tff_learning:
iterative_process, evaluate_fn, server_state_update_fn = _build_tff_learning_model_and_process(
input_spec, metrics)
else:
iterative_process, evaluate_fn, server_state_update_fn = _build_custom_model_and_process(
input_spec, metrics)
iterative_process = tff.simulation.compose_dataset_computation_with_iterative_process(
dataset_computation=train_dataset_computation, process=iterative_process)
if FLAGS.total_epochs is None:
# def client_dataset_ids_fn(round_num: int, epoch: int):
# return _sample_client_ids(FLAGS.clients_per_round, train_set, round_num,
# epoch)
def client_dataset_ids_fn(round_num: int, epoch: int):
logging.info("Sampling from subset of public")
return _sample_public_client_ids(FLAGS.clients_per_round, training_set_client_ids, round_num, epoch)
logging.info('Sample clients for max %d rounds', FLAGS.total_rounds)
total_epochs = 0
else:
client_shuffer = training_loop.ClientIDShuffler(FLAGS.clients_per_round,
train_set)
client_dataset_ids_fn = client_shuffer.sample_client_ids
logging.info('Shuffle clients for max %d epochs and %d rounds',
FLAGS.total_epochs, FLAGS.total_rounds)
total_epochs = FLAGS.total_epochs
warmstart_loop.run(
iterative_process,
client_dataset_ids_fn,
warmstart_file=FLAGS.warmstart_file,
validation_fn=functools.partial(evaluate_fn, dataset=validation_set),
total_epochs=total_epochs,
total_rounds=FLAGS.total_rounds,
experiment_name=FLAGS.experiment_name,
train_eval_fn=None,
test_fn=functools.partial(evaluate_fn, dataset=test_set),
root_output_dir=FLAGS.root_output_dir,
hparam_dict=hparam_dict,
rounds_per_eval=FLAGS.rounds_per_eval,
rounds_per_checkpoint=FLAGS.rounds_per_checkpoint,
rounds_per_train_eval=2000,
server_state_epoch_update_fn=server_state_update_fn)
return
else:
raise ValueError('Experiment type is not supported %s'.format(
FLAGS.experiment_type))
input_spec = train_dataset_computation.type_signature.result.element
metrics = _get_metrics(FLAGS.vocab_size, FLAGS.num_oov_buckets)
if FLAGS.use_tff_learning:
iterative_process, evaluate_fn, server_state_update_fn = _build_tff_learning_model_and_process(
input_spec, metrics)
else:
iterative_process, evaluate_fn, server_state_update_fn = _build_custom_model_and_process(
input_spec, metrics)
iterative_process = tff.simulation.compose_dataset_computation_with_iterative_process(
dataset_computation=train_dataset_computation, process=iterative_process)
if FLAGS.total_epochs is None:
def client_dataset_ids_fn(round_num: int, epoch: int):
if FLAGS.experiment_type == 'public_SO' or FLAGS.experiment_type == 'stackoverflow_SGD':
logging.info("Sampling from subset of public")
return _sample_public_client_ids(FLAGS.clients_per_round, training_set_client_ids, round_num, epoch)
else:
return _sample_client_ids(FLAGS.clients_per_round, train_set, round_num,
epoch)
logging.info('Sample clients for max %d rounds', FLAGS.total_rounds)
total_epochs = 0
else:
client_shuffer = training_loop.ClientIDShuffler(FLAGS.clients_per_round,
train_set)
client_dataset_ids_fn = client_shuffer.sample_client_ids
logging.info('Shuffle clients for max %d epochs and %d rounds',
FLAGS.total_epochs, FLAGS.total_rounds)
total_epochs = FLAGS.total_epochs
if FLAGS.experiment_type != 'stackoverflow_SGD':
training_loop.run(
iterative_process,
client_dataset_ids_fn,
validation_fn=functools.partial(evaluate_fn, dataset=validation_set),
total_epochs=total_epochs,
total_rounds=FLAGS.total_rounds,
experiment_name=FLAGS.experiment_name,
train_eval_fn=None,
test_fn=functools.partial(evaluate_fn, dataset=test_set),
root_output_dir=FLAGS.root_output_dir,
hparam_dict=hparam_dict,
rounds_per_eval=FLAGS.rounds_per_eval,
rounds_per_checkpoint=FLAGS.rounds_per_checkpoint,
rounds_per_train_eval=2000,
server_state_epoch_update_fn=server_state_update_fn) | 5,352,727 |
def align_with_known_width(val, width: int, lowerBitCntToAlign: int):
"""
Does same as :func:`~.align` just with the known width of val
"""
return val & (mask(width - lowerBitCntToAlign) << lowerBitCntToAlign) | 5,352,728 |
def get_email_from_request(request):
"""Use cpg-utils to extract user from already-authenticated request headers."""
user = get_user_from_headers(request.headers)
if not user:
raise web.HTTPForbidden(reason='Invalid authorization header')
return user | 5,352,729 |
def test_get_authorization_header(authorizer):
"""
Get authorization header, confirms expected value
"""
assert authorizer.get_authorization_header() == "Bearer " + TOKEN | 5,352,730 |
def get_kernels(params: List[Tuple[str, int, int, int, int]]) -> List[np.ndarray]:
"""
Create list of kernels
:param params: list of tuples with following format ("kernel name", angle, multiplier, rotation angle)
:return: list of kernels
"""
kernels = [] # type: List[np.ndarray]
for param in params:
if len(param) < 5:
print('Number of parameters given must be 4, got', param, 'len(', len(param), ') instead')
return None
if param[0] == 'gauss':
kernels.append(rotate_matrix(get_gauss(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'log':
kernels.append(rotate_matrix(get_log(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'sobel':
kernels.append(rotate_matrix(get_sobel(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'ft0':
kernels.append(rotate_matrix(get_ft0(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'ft1':
kernels.append(rotate_matrix(get_ft1(param[1], param[2]) * param[3], param[4]))
elif param[0] == 'ft2c':
kernels.append(rotate_matrix(get_ft2c(param[1], param[2]) * param[3], param[4]))
if len(kernels) == 1:
return kernels[0]
else:
return kernels | 5,352,731 |
def forward_propagation(propagation_start_node, func, x):
"""A forward propagation starting at the `propagation_start_node` and
wrapping the all the composition operations along the way.
Parameters
----------
propagation_start_node : Node
The node where the gradient function (or anything similar) is requested.
func : function
The function to apply at the node (most likely be a composition of functions).
x : narray
A set of parameters for the function.
Returns
-------
Wrapper
The ending wrapper wrapping the propagation end node.
"""
trace_marker = marker_stack.get_marker()
propagation_start_wrapper = new_wrapper(
x, trace_marker, propagation_start_node)
propagation_end_wrapper = func(propagation_start_wrapper)
marker_stack.release_marker(trace_marker)
if isinstance(propagation_end_wrapper, Wrapper) and propagation_end_wrapper._trace_marker == propagation_start_wrapper.trace_marker:
return propagation_end_wrapper._value, propagation_end_wrapper._node
else:
return propagation_end_wrapper, None | 5,352,732 |
def main():
"""
Main function of the script.
"""
args = parse_args()
if args.version:
print("{v}".format(v=__version__))
return 0
config = ConfigFile(args.config_file, CONFIG_FILE_SCHEMA)
if args.help_config:
print(config.help())
return 0
if args.config_file is None:
print("Error: Config file must be specified.")
return 1
print("Using plexmediafixup config file: {file}".
format(file=args.config_file))
try:
config.load()
except ConfigFileError as exc:
print("Error: {}".format(exc))
return 1
plexapi_config_path = config.data['plexapi_config_path'] # required item
direct_connection = config.data['direct_connection'] # required item
server_name = config.data['server_name'] # optional but defaulted item
fixups = config.data['fixups'] # optional but defaulted item
fixup_mgr = FixupManager()
if not plexapi_config_path:
plexapi_config_path = plexapi.CONFIG_PATH
print("Using PlexAPI config file: {file}".
format(file=plexapi_config_path))
plexapi_config = plexapi.config.PlexConfig(plexapi_config_path)
# Verify that the fixups can be loaded
for fixup in fixups:
name = fixup['name'] # required item
enabled = fixup['enabled'] # required item
if enabled:
print("Loading fixup: {name}".format(name=name))
fixup_mgr.get_fixup(name)
if direct_connection:
server_baseurl = plexapi_config.get('auth.server_baseurl', None)
if server_baseurl is None:
print("Error: Parameter auth.server_baseurl is required for "
"direct connection but is not set in PlexAPI config file "
"{file}".
format(file=plexapi_config_path))
return 1
server_token = plexapi_config.get('auth.server_token', None)
if server_token is None:
print("Error: Parameter auth.server_token is required for "
"direct connection but is not set in PlexAPI config file "
"{file}".
format(file=plexapi_config_path))
return 1
print("Connecting directly to Plex Media Server at {url}".
format(url=server_baseurl))
try:
with Watcher() as w:
# If the PMS is not reachable on the network, this raises
# requests.exceptions.ConnectionError (using max_retries=0 and
# the connect and read timeout configured in the plexapi config
# file as plexapi.timeout).
plex = plexapi.server.PlexServer()
except (plexapi.exceptions.PlexApiException,
requests.exceptions.RequestException) as exc:
print("Error: Cannot connect to Plex server at {url}: {msg} "
"({w.debug_str})".
format(url=server_baseurl, msg=exc, w=w))
return 1
print("Connected directly to Plex Media Server at {url}".
format(url=server_baseurl))
else:
myplex_username = plexapi_config.get('auth.myplex_username', None)
if not myplex_username:
print("Error: Parameter auth.myplex_username is required for "
"indirect connection but is not set in PlexAPI config file "
"{file}".
format(file=plexapi_config_path))
return 1
myplex_password = plexapi_config.get('auth.myplex_password', None)
if not myplex_username:
print("Error: Parameter auth.myplex_password is required for "
"indirect connection but is not set in PlexAPI config file "
"{file}".
format(file=plexapi_config_path))
return 1
if not server_name:
print("Error: Parameter server_name is required for "
"indirect connection but is not set in plexmediafixup "
"config file {file}".
format(file=config.filepath))
return 1
print("Connecting indirectly to server {srv} of Plex account {user}".
format(srv=server_name, user=myplex_username))
try:
with Watcher() as w:
account = plexapi.myplex.MyPlexAccount(
myplex_username, myplex_password)
except (plexapi.exceptions.PlexApiException,
requests.exceptions.RequestException) as exc:
print("Error: Cannot login to Plex account {user}: {msg} "
"({w.debug_str})".
format(user=myplex_username, msg=exc, w=w))
return 1
try:
with Watcher() as w:
plex = account.resource(server_name).connect()
except (plexapi.exceptions.PlexApiException,
requests.exceptions.RequestException) as exc:
print("Error: Cannot connect to server {srv} of Plex account "
"{user}: {msg} ({w.debug_str})".
format(srv=server_name, user=myplex_username, msg=exc, w=w))
return 1
print("Connected indirectly to server {srv} of Plex account {user}".
format(srv=server_name, user=myplex_username))
for fixup in fixups:
name = fixup['name'] # required item
enabled = fixup['enabled'] # required item
dryrun = args.dryrun
fixup_kwargs = fixup.get('kwargs', dict())
if enabled:
fixup = fixup_mgr.get_fixup(name)
print("Executing fixup: {name} (dryrun={dryrun})".
format(name=name, dryrun=dryrun))
rc = fixup.run(plex=plex, dryrun=dryrun, verbose=args.verbose,
config=config, fixup_kwargs=fixup_kwargs)
if rc:
print("Error: Fixup {name} has encountered errors - aborting".
format(name=name))
return 1
print("Fixup succeeded: {name} (dryrun={dryrun})".
format(name=name, dryrun=dryrun))
return 0 | 5,352,733 |
def read_entities():
"""
find list of entities
:return:
"""
intents = Entity.objects.only('name','id')
return build_response.sent_json(intents.to_json()) | 5,352,734 |
def available(unit, item) -> bool:
"""
If any hook reports false, then it is false
"""
for skill in unit.skills:
for component in skill.components:
if component.defines('available'):
if component.ignore_conditional or condition(skill, unit):
if not component.available(unit, item):
return False
return True | 5,352,735 |
def step_impl(context, easting, northing, index):
"""
:type context: behave.runner.Context
:param easting: The easting value from the trajectory at index.
:type easting: float
:param northing: The northing value for the trajectory at index.
:type northing: float
:param index: The index of the well trajectory being sampled.
:type index: int
"""
assert_that(context.easting_array[index], close_to(easting, close_to_delta(context.well)))
assert_that(context.northing_array[index], close_to(northing, close_to_delta(context.well))) | 5,352,736 |
def cleanArray(arr):
"""Clean an array or list from unsupported objects for plotting.
Objects are replaced by None, which is then converted to NaN.
"""
try:
return np.asarray(arr, float)
except ValueError:
return np.array([x if isinstance(x, number_types) else None
for x in arr], float) | 5,352,737 |
def translate_node_in_object_space(node, translation_list, **kwargs):
"""
Translates given node with the given translation vector
:param node: str
:param translation_list: list(float, float, float)
"""
raise NotImplementedError() | 5,352,738 |
def test_datetime(snapshot):
"""Simple test with datetime"""
expect = datetime.datetime(2017, 11, 19)
snapshot.assert_match(expect) | 5,352,739 |
def parse_args():
"""Main function for parsing args. Utilizes the 'check_config'
function from the config module to ensure an API key has been
passed. If not, user is prompted to conduct initial configuration
for pwea.
If a valid configuration is found (there is currently no validity
check for the API key, argparser will look for location and
optional arguments. Location is required. Default weather report
is current, forecast can be specified using -t)
"""
if check_config():
parser = argparse.ArgumentParser(
usage='pwea [location] <optional args>',
description="description: pwea is a simple tool used to retrieve"
"current and forecasted weather information")
parser.add_argument('location', nargs='+',
help="Input a city name or US/UK/Canadian postal code")
parser.add_argument("-t" "--type", dest="report_type", default="current",
help="Acceptable report types are 'current' or 'forecast'. Default is 'current'")
parser.add_argument('--config', dest='config', default=None,
help="Pass your API key for https://weatherapi.com")
args = parser.parse_args()
args.location = ' '.join(args.location)
args.report_type = args.report_type.lower()
else:
parser = argparse.ArgumentParser(
usage='No API key found in ~/.config/pwearc. Please set your API key using pwea --config <API_KEY>',
description="description: pwea is a simple tool used to retrieve"
"current and forecasted weather information")
parser.add_argument('--config', dest='config', required=True,
help="Pass your API key for https://weatherapi.com")
args = parser.parse_args()
return args | 5,352,740 |
def test_tcp_server_ssl(sdc_builder, sdc_executor):
"""Runs a test using the TCP server origin pipeline with Enable TLS set and asserts that the file is received"""
expected_msg = get_expected_message(TCP_SSL_FILE_PATH)
# Start TCP server pipeline.
pipeline_builder = sdc_builder.get_pipeline_builder()
tcp_server = pipeline_builder.add_stage('TCP Server')
tcp_server.set_attributes(data_format='TEXT',
port=[str(TCP_PORT)],
tcp_mode='DELIMITED_RECORDS',
use_tls=True,
keystore_file=TCP_KEYSTORE_FILE_PATH,
keystore_type='JKS',
keystore_password='password',
keystore_key_algorithm='SunX509',
use_default_protocols=True,
use_default_cipher_suites=True)
trash = pipeline_builder.add_stage('Trash')
tcp_server >> trash
tcp_server_ssl_pipeline = pipeline_builder.build(title='TCP Server SSL pipeline')
sdc_executor.add_pipeline(tcp_server_ssl_pipeline)
try:
# Capture snapshot for HTTP Server pipeline.
snapshot_cmd = sdc_executor.capture_snapshot(tcp_server_ssl_pipeline, start_pipeline=True, batches=1,
batch_size=2, wait=False)
# Send twice the data. Even though batch_size = 2, 2 batches are sent (1 for each connection).
send_tcp_ssl_file(sdc_executor)
send_tcp_ssl_file(sdc_executor)
# Wait for snapshot to finish then stop the pipeline in order to get the summary later.
snapshot = snapshot_cmd.wait_for_finished().snapshot
sdc_executor.stop_pipeline(tcp_server_ssl_pipeline)
# Verify the results. First check number of batches received is 2.
history = sdc_executor.get_pipeline_history(tcp_server_ssl_pipeline)
assert history.latest.metrics.counter('pipeline.batchCount.counter').count == 2
# Then check last batch received in snapshot contains the expected message.
origin_data = snapshot[tcp_server.instance_name]
assert len(origin_data.output) == 1
assert str(origin_data.output[0].field['text']) in expected_msg.decode("utf-8")
finally:
if sdc_executor.get_pipeline_status(tcp_server_ssl_pipeline) == 'RUNNING':
sdc_executor.stop_pipeline(tcp_server_ssl_pipeline) | 5,352,741 |
def check_in_federated_context():
"""Checks if the current context is a `tff.program.FederatedContext`."""
context_stack = get_context_stack.get_context_stack()
if not isinstance(context_stack.current, FederatedContext):
raise ValueError(
'Expected the current context to be a `tff.program.FederatedContext`, '
f'found \'{type(context_stack.current)}\'.') | 5,352,742 |
def _ensure_args(G, source, method, directed,
return_predecessors, unweighted, overwrite, indices):
"""
Ensures the args passed in are usable for the API api_name and returns the
args with proper defaults if not specified, or raises TypeError or
ValueError if incorrectly specified.
"""
# checks common to all input types
if (method is not None) and (method != "auto"):
raise ValueError("only 'auto' is currently accepted for method")
if (indices is not None) and (type(indices) == list):
raise ValueError("indices currently cannot be a list-like type")
if (indices is not None) and (source is not None):
raise TypeError("cannot specify both 'source' and 'indices'")
if (indices is None) and (source is None):
raise TypeError("must specify 'source' or 'indices', but not both")
G_type = type(G)
# Check for Graph-type inputs
if (G_type in [Graph, DiGraph]) or is_nx_graph_type(G_type):
exc_value = "'%s' cannot be specified for a Graph-type input"
if directed is not None:
raise TypeError(exc_value % "directed")
if return_predecessors is not None:
raise TypeError(exc_value % "return_predecessors")
if unweighted is not None:
raise TypeError(exc_value % "unweighted")
if overwrite is not None:
raise TypeError(exc_value % "overwrite")
directed = False
# Check for non-Graph-type inputs
else:
if (directed is not None) and (type(directed) != bool):
raise ValueError("'directed' must be a bool")
if (return_predecessors is not None) and \
(type(return_predecessors) != bool):
raise ValueError("'return_predecessors' must be a bool")
if (unweighted is not None) and (unweighted is not True):
raise ValueError("'unweighted' currently must be True if "
"specified")
if (overwrite is not None) and (overwrite is not False):
raise ValueError("'overwrite' currently must be False if "
"specified")
source = source if source is not None else indices
if return_predecessors is None:
return_predecessors = True
return (source, directed, return_predecessors) | 5,352,743 |
def read_siemens_scil_b0():
""" Load Siemens 1.5T b0 image form the scil b0 dataset.
Returns
-------
img : obj,
Nifti1Image
"""
file = pjoin(dipy_home,
'datasets_multi-site_all_companies',
'1.5T',
'Siemens',
'b0.nii.gz')
return nib.load(file) | 5,352,744 |
def twin_primes():
"""
Twin Primes: Primes that are two more or less than another prime\n
OEIS A001097
"""
P = primes()
a,b,c = next(P),next(P),next(P)
while True:
if b-2 == a or b+2 == c:
yield b
a,b,c = b,c,next(P) | 5,352,745 |
def gdpcleaner(gdpdata: pd.DataFrame):
"""
Author: Gabe Fairbrother
Remove spurious columns, Rename relevant columns, Remove NaNs
Parameters
----------
gdpdata: DataFrame
a loaded dataframe based on a downloaded Open Government GDP at basic prices dataset (https://open.canada.ca/en/open-data)
Returns
-------
DataFrame: A cleaned and simplified DataFrame of the relevant columns for summary and visualization.
Possible columns (dataset dependent) include:
Date: Date of data
Location: Province or Jurisdiction
Scale: Scale of the Value column (Percent, Millions, etc)
Unit: Unit of Measure
Value: Portion of the GDP for the Location and Date
NAICS_Class: North American Industry Classification System ID
Industry: Industry of Record
Sub-sector: Non-profit sub-sector
Special_Industry: Special Industry Aggregate
Examples
--------
>>> result = gdpcleaner(example_data)
"""
#Check for DataFrame input argument
if (isinstance(gdpdata, pd.core.frame.DataFrame)):
pass
else:
raise TypeError("Argument must be a Pandas DataFrame")
cleaned_frame = gdpdata
#Remove spurious columns
spurious = ['DGUID', 'UOM_ID', 'SCALAR_ID', 'VECTOR', 'COORDINATE',
'STATUS', 'SYMBOL', 'TERMINATED', 'DECIMALS', 'Value', 'Seasonal adjustment']
for column in cleaned_frame.columns :
if column in spurious:
cleaned_frame = cleaned_frame.drop(columns=column)
#Drop any rows with null value
cleaned_frame = cleaned_frame.dropna()
#Rename relevant columns
cleaned_frame = cleaned_frame.rename(columns={'REF_DATE': 'Date', 'GEO': 'Location',
'SCALAR_FACTOR': 'Scale', 'VALUE': 'Value', 'UOM': 'Unit'})
for column in cleaned_frame.columns:
if 'NAICS' in column:
cleaned_frame = cleaned_frame.rename(columns={column: 'NAICS_Class'})
if 'aggregat' in column: #Not a spelling mistake, there are multiple similar column headers in different datasets
cleaned_frame = cleaned_frame.rename(columns={column: 'Special_Industry'})
return cleaned_frame | 5,352,746 |
def sort_with_num(path):
"""Extract leading numbers in a file name for numerical sorting."""
fname = path.name
nums = re.match('^\d+', fname)
if nums:
return int(nums[0])
else:
return 0 | 5,352,747 |
def ship_new(name):
"""Creates a new ship."""
click.echo(f"Created ship {name}") | 5,352,748 |
def test_get_representative_trip_for_route(request):
"""
Tests that parameters are read
"""
print("\n--Starting:", request.node.name)
transit_network = Transit.load_all_gtfs_feeds(
path = os.path.join(root_dir, "data", "external", "gtfs", "2015"),
roadway_network= roadway_network,
parameters=parameters
)
RanchLogger.info("transit feed has {} routes, they are {}".format(transit_network.feed.routes.route_id.nunique(), transit_network.feed.routes.route_short_name.unique()))
transit_network.get_representative_trip_for_route()
transit_network.feed.trips.to_csv(
os.path.join(scratch_dir, 'test_trips.txt'),
index = False
) | 5,352,749 |
def prob8(cur: sqlite3.Cursor) -> pd.DataFrame:
"""Give a list of the services which connect the stops 'Craiglockhart' and
'Tollcross'.
Parameters
----------
cur (sqlite3.Cursor) : The cursor for the database we're accessing.
Returns
-------
(pd.DataFrame) : Table with the solution.
"""
cur.execute("""SELECT DISTINCT r1.company, r1.num
FROM route AS r1
JOIN route AS r2 ON (r1.company = r2.company AND r1.num = r2.num)
JOIN stops AS stops1 ON stops1.id = r1.stop
JOIN stops as stops2 ON stops2.id = r2.stop
WHERE stops1.name = 'Craiglockhart'
AND stops2.name = 'Tollcross';
""")
return pd.DataFrame(cur.fetchall()) | 5,352,750 |
def get_command(name):
""" return command represented by name """
_rc = COMMANDS[name]()
return _rc | 5,352,751 |
def start_adash(self):
"""Start Adash in background inside VM."""
adash_start_command = (
"./adash-linux-x86_64 --bindhost 0.0.0.0 -bindport 8081 -notoken &"
)
with When(
"Connect to VM and run the Adash in background",
description=f"{adash_start_command}",
):
bash(adash_start_command, self.context.vm_terminal) | 5,352,752 |
def _str_struct(a):
"""converts the structure to a string for logging purposes."""
shape_dtype = lambda x: (jnp.asarray(x).shape, str(jnp.asarray(x).dtype))
return str(jax.tree_map(shape_dtype, a)) | 5,352,753 |
def get_moved_files(dir_path: str) -> Set:
"""
获取要移动的文件(夹),包括:
- 文件夹
- 损坏的图片
- 非图像文件
- 重复的图片
"""
removed_files = set()
file_map = {}
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file)
# 过滤文件
if os.path.isfile(file_path):
# 按文件大小进行分组
# 不同大小的图片一定不一样,所以只需对比相同大小的图片即可,缩小范围
size = os.path.getsize(file_path)
file_map.setdefault(size, []).append(file_path)
else:
removed_files.add(file_path)
for files in file_map.values():
duplicate_files = set()
m = len(files)
for i in range(m):
if files[i] in duplicate_files:
continue
# 损坏图像文件/非图像文件处理
try:
img1 = Image.open(files[i])
except UnidentifiedImageError:
duplicate_files.add(files[i])
continue
image1 = np.array(img1)
for j in range(i + 1, m):
if files[j] in duplicate_files:
continue
# 损坏图像文件/非图像文件处理
try:
img2 = Image.open(files[j])
except UnidentifiedImageError:
duplicate_files.add(files[j])
continue
# 判断图片尺寸是否相同
if img1.size == img2.size:
# 判断图片内容是否相同
image2 = np.array(img2)
if np.array_equal(image1, image2):
duplicate_files.add(files[j])
removed_files = removed_files | duplicate_files
return removed_files | 5,352,754 |
def delete_interface_address(
api_client, interface_id, address_id, **kwargs
): # noqa: E501
"""delete_interface_address # noqa: E501
Delete interface address details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.delete_interface_address(interface_id, address_id, async_req=True)
:param interface_id int: ID of interface
:param address_id int: ID of address
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {"interface_id": interface_id, "address_id": address_id}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/interfaces/system/{interface_id}/addresses/{address_id}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
) | 5,352,755 |
async def set_promo(message: types.Message, state: FSMContext):
"""
Команда /setpromo
"""
arg = message.get_args()
if not arg:
return await message.answer(_("Укажите аргумент: промокод. Например: <pre>/set_promo my-promo-code</pre>"),
parse_mode="HTML")
arg = arg.strip()
try:
UUID(arg)
except ValueError:
return await message.answer(_("Промокод не найден"))
promo = await models.Promo.get_or_none(code=arg)
if not promo:
return await message.answer(_("Промокод не найден"))
if promo.owner:
return await message.answer(_("Промокод уже использован"))
user, created = await models.User.get_or_create(telegram_id=message.from_user.id)
promo.owner = user
await promo.save(update_fields=["owner_id"])
await message.answer(_("Промокод активирован! Спасибо 🙌")) | 5,352,756 |
def plot_waterfall(*sigObjs, step=10, xLim:list=None,
Pmin=20, Pmax=None, tmin=0, tmax=None, azim=-72, elev=14,
cmap='jet', winPlot=False, waterfallPlot=True, fill=True,
lines=False, alpha=1, figsize=(20, 8), winAlpha=0,
removeGridLines=False, saveFig=False, bar=False, width=0.70,
size=3, lcol=None, filtered=True):
"""
This function was gently sent by Rinaldi Polese Petrolli.
# TO DO
Keyword Arguments:
step {int} -- [description] (default: {10})
xLim {list} -- [description] (default: {None})
Pmin {int} -- [description] (default: {20})
Pmax {[type]} -- [description] (default: {None})
tmin {int} -- [description] (default: {0})
tmax {[type]} -- [description] (default: {None})
azim {int} -- [description] (default: {-72})
elev {int} -- [description] (default: {14})
cmap {str} -- [description] (default: {'jet'})
winPlot {bool} -- [description] (default: {False})
waterfallPlot {bool} -- [description] (default: {True})
fill {bool} -- [description] (default: {True})
lines {bool} -- [description] (default: {False})
alpha {int} -- [description] (default: {1})
figsize {tuple} -- [description] (default: {(20, 8)})
winAlpha {int} -- [description] (default: {0})
removeGridLines {bool} -- [description] (default: {False})
saveFig {bool} -- [description] (default: {False})
bar {bool} -- [description] (default: {False})
width {float} -- [description] (default: {0.70})
size {int} -- [description] (default: {3})
lcol {[type]} -- [description] (default: {None})
filtered {bool} -- [description] (default: {True})
Returns:
[type] -- [description]
"""
realSigObjs = \
_remove_non_(SignalObj, sigObjs, msgPrefix='plot_waterfall:')
if len(realSigObjs) > 0:
figs = plot.waterfall(realSigObjs, step, xLim,
Pmin, Pmax, tmin, tmax, azim, elev,
cmap, winPlot, waterfallPlot, fill,
lines, alpha, figsize, winAlpha,
removeGridLines, saveFig, bar, width,
size, lcol, filtered)
return figs
else:
return | 5,352,757 |
def write_output(
payload: Union[dict, List[dict]],
out_handle: TextIO,
fmt: str = "json",
compact: bool = False,
indent: int = 4,
) -> None:
"""Writes the given dictionary as JSON or YAML to the output handle.
:param payload: Payload to write.
:param out_handle: Output handle.
:param fmt: Output format.
:param compact: Whether to write a compact JSON or not. Ignored if the
output format is JSON.
:param indent: Indentation level (ignored if output ``compact`` is true).
"""
if fmt == "json":
if compact:
json.dump(
payload, out_handle, sort_keys=True, indent=None, separators=(",", ":")
)
else:
json.dump(payload, out_handle, sort_keys=True, indent=indent)
out_handle.write(linesep)
else:
out_handle.write(yaml.dump(payload, default_flow_style=False, indent=indent)) | 5,352,758 |
def play_game(game):
"""Run the checkers game
This is the entry function that runs the entire game.
"""
game.show_rules()
game.get_player_names()
while True:
curr_player = game.black_name if game.blacks_turn else \
game.white_name
# Prompt for player input
move = game.get_player_move_from()
if move == 'r':
print("{} has resigned. Game over!".format(curr_player))
break
elif move == 'd':
game.move_msg = "{} is offering a draw.".format(curr_player)
if game.draw_accepted():
print("Game ended in a draw!")
break
else:
# Clear move message before the next move
game.move_msg = ""
continue
# Here move contains the number of a square. Convert it to integer in
# internal representation
square = int(move) - 1
# Validate that the piece on the selected square is legal to play
if game.validate_pick(square):
# Prompt player for square to move to
while True:
move_to = game.get_player_move_to()
# Convert it to internal representation
square = int(move_to) - 1
# Validate that the selected square is legal to move to
if game.validate_move_to(square):
game.board.move_piece_to(game, square)
else:
continue
if game.turn_is_complete(square):
# It was a simple move or no more jumps are available
game.game_piece_to_move = None
game.must_jump = False
break
# More jumps are available, which must be taken
game.game_piece_to_move = square
continue
else:
# Return to the top of the game loop to prompt the player again
continue
# The current player's turn has completed. Switch turns.
game.switch_turns()
if game.has_player_lost():
game.board.display_board(game)
winner = game.white_name if game.blacks_turn else game.black_name
print("Congratulations {}! You have won.".format(winner))
break
# Game over
print("Good bye!") | 5,352,759 |
def test_pytest_really_fails():
"""Make sure pytest fails due to incorrect expected output in the .md.
Generate a pytest that will assert.
"""
simulator_status = verify.one_example(
"phmdoctest tests/unexpected_output.md --outfile discarded.py",
want_file_name=None,
pytest_options=["--doctest-modules", "-v"],
junit_family=verify.JUNIT_FAMILY,
)
assert simulator_status.pytest_exit_code == 1
# Look at the returned JUnit XML to see that the test failed at the
# point and for the reason we expected.
# Note that the parsed XML values are all strings.
suite, fails = phmdoctest.tool.extract_testsuite(simulator_status.junit_xml)
assert suite.attrib["tests"] == "1"
assert suite.attrib["errors"] == "0"
assert suite.attrib["failures"] == "1"
assert fails[0].attrib["name"] == "test_code_4_output_17" | 5,352,760 |
def indices(input_file):
"""
Parse the index file or target file and return a list of values.
:return:
"""
index_list = []
line_num = 0
index_file = list(csv.reader(open(input_file), delimiter='\t'))
for line in index_file:
line_num += 1
col_count = len(line)
if col_count > 1 and len(line[0].split("#")[0]) > 1: # Skip any lines that are blank or comments.
tmp_line = []
for i in range(col_count):
try:
line[i] = line[i].split("#")[0] # Strip out end of line comments and white space.
except IndexError:
raise SystemExit(
"There is a syntax error in file {0} on line {1}, column {2} "
.format(input_file, str(line_num), str(i)))
line[i] = re.sub(",", '', line[i]) # Strip out any commas.
tmp_line.append(line[i])
index_list.append(tmp_line)
return index_list | 5,352,761 |
def parse_arguments():
"""
Parse the arguments from the user
"""
parser = argparse.ArgumentParser(
description="omeClust visualization script.\n",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"adist",
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"clusters",
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"--metadata",
help="metadata",
)
parser.add_argument(
"--shapeby",
type=str,
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"-o", "--output",
help="the output directory\n",
required=True)
parser.add_argument(
"--size-to-plot",
type=int,
dest='size_to_plot',
default=3,
help="Minimum size of cluster to be plotted")
parser.add_argument("--fig-size", nargs=2,
# type=int,
dest='fig_size',
default=[3, 2.5], help="width and height of plots")
parser.add_argument("--point-size",
type=int,
dest='point_size',
default=3, help="width and height of plots")
parser.add_argument("--show",
help="show ordination plot before save\n",
action="store_true",
default=False,
dest='show')
return parser.parse_args() | 5,352,762 |
def checkSequences(numl, rowlen, seqlen):
"""In a square of numbers, represented by the list with
the given row length, look for the top product with length
of seqlen"""
listl=len(numl)
collen=math.ceil(listl/rowlen)
seqind=seqlen-1
log.debug("List length, collen: %d, %d", listl, collen)
ret=[0]
def checkProd(mxprod, prodll, i):
r=None
for prodl in prodll:
prod=reduce(lambda x,y: x*y, prodl)
if prod > mxprod:
log.debug("Found a higher product than previous %d: %d at i=%d", mxprod, prod, i)
mxprod=prod
r=(prod, i, prodl)
return r
for i in range(0, listl):
# Check the first and last value are in the same row
checkrow = (i//rowlen)==((i+seqind)//rowlen) and (i+seqind)<listl
checkcol = (i+(rowlen*seqind)) < listl
checkdiagr = checkrow and (i+seqind+(rowlen*seqind)) < listl
checkdiagl = (i//rowlen)==((i-seqind)//rowlen) and (i-seqind+(rowlen*seqind)) < listl
log.debug("i: %d, check (row,col,diagr,diagl): (%d,%d,%d,%d)",
i, checkrow, checkcol, checkdiagr, checkdiagl)
prodll=[]
r=None
if checkrow:
prodll.append([numl[i+j] for j in range(0,seqlen)])
if checkcol:
prodll.append([numl[i+(j*rowlen)] for j in range(0,seqlen)])
if checkdiagr:
prodll.append([numl[i+j+(j*rowlen)] for j in range(0,seqlen)])
if checkdiagl:
prodll.append([numl[i-j+(j*rowlen)] for j in range(0,seqlen)])
r=checkProd(ret[0], prodll, i)
ret = r if r is not None else ret
return ret | 5,352,763 |
def has_same_facts(ruler_intervals1, ruler_intervals2, D):
"""
Check whether the two same-pattern ruler lists have the same facts at each corresponding ruler-interval
Args:
ruler_intervals1: a list of ruler-intervals
ruler_intervals2: a list of ruler-intervals
D: contain all relational facts
Returns:
True or False
"""
for ruler1, ruler2 in zip(ruler_intervals1, ruler_intervals2):
for predicate in D:
for entity in D[predicate]:
if interval_inclusion_intervallist(ruler1, D[predicate][entity]) and \
not interval_inclusion_intervallist(ruler2, D[predicate][entity]):
return False
return True | 5,352,764 |
def listenInput(username):
"""Listens user input and returns formatted messages (PeerMessage) using generators."""
print('Starting listening user input...')
while (True):
msgToSend = input()
if (len(msgToSend) == 0):
continue
timeStr = datetime.datetime.now().strftime('%H:%M:%S')
yield p2p_msg_pb2.PeerMessage(
name = username,
time = timeStr,
text = msgToSend) | 5,352,765 |
def named_keywords_params_func2(name, age, *info, qq, **kwargs):
"""
#如果函数定义中已经有了一个可变参数,后面跟着的命名关键字参数就不再需要一个特殊分隔符*了
"""
print(name, age, *info, qq, kwargs) | 5,352,766 |
def make_example_scripts_docs(spth, npth, rpth):
"""
Generate rst docs from example scripts. Arguments `spth`, `npth`,
and `rpth` are the top-level scripts directory, the top-level
notebooks directory, and the top-level output directory within the
docs respectively.
"""
# Ensure that output directory exists
mkdir(rpth)
# Iterate over index files
for fp in glob(os.path.join(spth, '*.rst')) + \
glob(os.path.join(spth, '*', '*.rst')):
# Index basename
b = os.path.basename(fp)
# Index dirname
dn = os.path.dirname(fp)
# Name of subdirectory of examples directory containing current index
sd = os.path.split(dn)
# Set d to the name of the subdirectory of the root directory
if dn == spth: # fp is the root directory index file
d = ''
else: # fp is a subdirectory index file
d = sd[-1]
# Path to corresponding subdirectory in docs directory
fd = os.path.join(rpth, d)
# Ensure notebook subdirectory exists
mkdir(fd)
# Filename of index file to be constructed
fn = os.path.join(fd, b)
# Process current index file if corresponding notebook file
# doesn't exist, or is older than index file
if update_required(fp, fn):
print('Converting %s ' % os.path.join(d, b),
end='\r')
# Convert script index to docs index
rst_to_docs_rst(fp, fn)
# Iterate over example scripts
for fp in sorted(glob(os.path.join(spth, '*', '*.py'))):
# Name of subdirectory of examples directory containing current script
d = os.path.split(os.path.dirname(fp))[1]
# Script basename
b = os.path.splitext(os.path.basename(fp))[0]
# Path to corresponding notebook
fn = os.path.join(npth, d, b + '.ipynb')
# Path to corresponding sphinx doc file
fr = os.path.join(rpth, d, b + '.rst')
# Only proceed if script and notebook exist
if os.path.exists(fp) and os.path.exists(fn):
# Convert notebook to rst if notebook is newer than rst
# file or if rst file doesn't exist
if update_required(fn, fr):
fnb = os.path.join(d, b + '.ipynb')
print('Processing %s ' % fnb, end='\r')
script_and_notebook_to_rst(fp, fn, fr)
else:
print('WARNING: script %s or notebook %s not found' %
(fp, fn)) | 5,352,767 |
def main():
""" """
undet = argument_parse()
print 'Start\t|\tCheck incorrect index'
fq_list = split_fastq(undet)
print 'Process\t|\tAnalysis undetermined data'
combined_df = multi_process(fq_list)
sorted_combined_df = combined_df.sort_values(
by='count',
ascending=False,
inplace=False
)
print sorted_combined_df.head(10)
print 'Process\t|\tWrite out result'
sorted_combined_df.to_csv('undetermined_top_index.csv', header=False)
for f in fq_list:
os.system('rm {}'.format(f))
print 'End\t|\tCheck incorrect index'
return True
else:
print 'End\t|\tCannot analyze index\n'
return False | 5,352,768 |
def ruleset_delete(p_engine, p_username, rulesetname, envname):
"""
Delete ruleset from Masking engine
param1: p_engine: engine name from configuration
param2: rulesetname: ruleset name
return 0 if added, non 0 for error
"""
return ruleset_worker(p_engine=p_engine, p_username=p_username, rulesetname=rulesetname,
envname=envname, function_to_call='do_delete') | 5,352,769 |
def tree_gzw(flag, width, mbl='-', xmin='-', xmax='-', ymin='-', ymax='-', logpath=None, outdir=None, shellscript=None):
"""
| Phase unwrapping tree generation (GZW algorithm)
| Copyright 2008, Gamma Remote Sensing, v3.6 5-Sep-2008 clw/uw
Parameters
----------
flag:
(input) phase unwrapping flag file
width:
number of samples/row
mbl:
maximum branch length (default=32)
xmin:
starting range pixel offset (default = 0)
xmax:
last range pixel offset (default = width-1)
ymin:
starting azimuth row, relative to start (default = 0)
ymax:
last azimuth row, relative to start (default = nlines-1)
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
"""
process(['/usr/local/GAMMA_SOFTWARE-20180703/ISP/bin/tree_gzw', flag, width, mbl, xmin, xmax, ymin, ymax],
logpath=logpath, outdir=outdir, shellscript=shellscript) | 5,352,770 |
def upload_to_bucket(client, src_path, dest_bucket_name, dest_path):
"""Upload a file or a directory (recursively) from local file system to specified bucket.
Args:
client: Google Cloud storage client object to ask resources.
src_path: Path to the local file or directory you want to send
dest_bucket_name: Destination bucket name
dest_path: Path where you want to store data inner the bucket
"""
bucket = client.get_bucket(dest_bucket_name)
if os.path.isfile(src_path):
blob = bucket.blob(os.path.join(dest_path, os.path.basename(src_path)))
blob.upload_from_filename(src_path)
return
for item in glob.glob(src_path + '/*'):
if os.path.isfile(item):
blob = bucket.blob(os.path.join(dest_path, os.path.basename(item)))
blob.upload_from_filename(item)
else:
upload_to_bucket(client,
item, dest_bucket_name, os.path.join(
dest_path, os.path.basename(item))) | 5,352,771 |
def config_from_file(file_name):
"""Load and return json from file."""
with open(file_name) as config_file:
config = ujson.load(config_file)
return config | 5,352,772 |
def setMinGap(typeID, minGap):
"""setMinGap(string, double) -> None
Sets the offset (gap to front vehicle if halting) of vehicles of this type.
"""
traci._sendDoubleCmd(tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_MINGAP, typeID, minGap) | 5,352,773 |
def build_yaml_object(
dataset_id: str,
table_id: str,
config: dict,
schema: dict,
metadata: dict = dict(),
columns_schema: dict = dict(),
partition_columns: list = list(),
):
"""Build a dataset_config.yaml or table_config.yaml
Args:
dataset_id (str): The dataset id.
table_id (str): The table id.
config (dict): A dict with the `basedosdados` client configurations.
schema (dict): A dict with the JSON Schema of the dataset or table.
metadata (dict): A dict with the metadata of the dataset or table.
columns_schema (dict): A dict with the JSON Schema of the columns of
the table.
partition_columns (list): A list with the partition columns of the
table.
Returns:
CommentedMap: A YAML object with the dataset or table metadata.
"""
properties: dict = schema["properties"]
definitions: dict = schema["definitions"]
# Drop all properties without yaml_order
properties = {
key: value for key, value in properties.items() if value.get("yaml_order")
}
# Add properties
yaml = add_yaml_property(
yaml=ryaml.CommentedMap(),
properties=properties,
definitions=definitions,
metadata=metadata,
)
# Add columns
if metadata.get("columns"):
yaml["columns"] = []
for metadatum in metadata.get("columns"):
properties = add_yaml_property(
yaml=ryaml.CommentedMap(),
properties=columns_schema["properties"],
definitions=columns_schema["definitions"],
metadata=metadatum,
has_column=True,
)
yaml["columns"].append(properties)
# Add partitions in case of new dataset/talbe or local overwriting
if partition_columns and partition_columns != ["[]"]:
yaml["partitions"] = ""
for local_column in partition_columns:
for remote_column in yaml["columns"]:
if remote_column["name"] == local_column:
remote_column["is_partition"] = True
yaml["partitions"] = ", ".join(partition_columns)
# Nullify `partitions` field in case of other-than-None empty values
if yaml.get("partitions") == "":
yaml["partitions"] = None
# Add dataset_id and table_id
yaml["dataset_id"] = dataset_id
if table_id:
yaml["table_id"] = table_id
# Add gcloud config variables
yaml["source_bucket_name"] = str(config.get("bucket_name"))
yaml["project_id_prod"] = str(
config.get("gcloud-projects", {}).get("prod", {}).get("name")
)
yaml["project_id_staging"] = str(
config.get("gcloud-projects", {}).get("staging", {}).get("name")
)
return yaml | 5,352,774 |
def read_candidate_data_list(file, path=IEDC_paths.candidates):
"""
Will read a candidate file and return its data.
:param file: Filename of the file to process
:param path: Path of the file
:return: Dictionary of dataframes for metadata, classifications, and data
"""
# make it a proper path
file = os.path.join(path, file)
data = pd.read_excel(file, sheet_name='Data')
return data | 5,352,775 |
def make_cumulative(frame, filedate, unit):
"""Create a cumulative graph of cases over time"""
gb = frame.groupby("Accurate_Episode_Date").agg(patients=("Row_ID", "count"))
gb = gb.resample("D").last().fillna(0).reset_index()
max_date = gb["Accurate_Episode_Date"].max().strftime("%Y-%m-%d")
gb["cumulative"] = gb.patients.cumsum().astype(int)
print(gb)
print(gb.info())
ax = sns.lineplot(
data=gb, x="Accurate_Episode_Date", y="cumulative", linewidth=2, color="red"
)
ax.set(
ylabel="Cumulative case count",
xlabel="Date",
title=f"{unit} Cumulative Cases by Episode Date ({max_date})",
)
ax2 = plt.twinx()
sns.lineplot(
data=gb, x="Accurate_Episode_Date", y="patients", ax=ax2, linewidth=0.5
)
ax2.set(ylim=(0, gb["patients"].max() * 2))
plt.gcf().autofmt_xdate()
fname = GRAPHDIR / Path(f"{filedate}-cumulative.png")
ax.figure.savefig(fname)
return fname | 5,352,776 |
def most_common(l):
""" Helper function.
:l: List of strings.
:returns: most common string.
"""
# another way to get max of list?
#from collections import Counter
#data = Counter(your_list_in_here)
#data.most_common() # Returns all unique items and their counts
#data.most_common(1)
count = 0
answer = ''
for element in l:
if l.count(element) > count:
count = l.count(element)
answer = element
return answer | 5,352,777 |
def reduce_avg(reduce_target, lengths, dim):
"""
Args:
reduce_target : shape(d_0, d_1,..,d_dim, .., d_k)
lengths : shape(d0, .., d_(dim-1))
dim : which dimension to average, should be a python number
"""
shape_of_lengths = lengths.get_shape()
shape_of_target = reduce_target.get_shape()
if len(shape_of_lengths) != dim:
raise ValueError(('Second input tensor should be rank %d, ' +
'while it got rank %d') % (dim, len(shape_of_lengths)))
if len(shape_of_target) < dim+1 :
raise ValueError(('First input tensor should be at least rank %d, ' +
'while it got rank %d') % (dim+1, len(shape_of_target)))
rank_diff = len(shape_of_target) - len(shape_of_lengths) - 1
mxlen = tf.shape(reduce_target)[dim]
mask = mkMask(lengths, mxlen)
if rank_diff!=0:
len_shape = tf.concat(axis=0, values=[tf.shape(lengths), [1]*rank_diff])
mask_shape = tf.concat(axis=0, values=[tf.shape(mask), [1]*rank_diff])
else:
len_shape = tf.shape(lengths)
mask_shape = tf.shape(mask)
lengths_reshape = tf.reshape(lengths, shape=len_shape)
mask = tf.reshape(mask, shape=mask_shape)
mask_target = reduce_target * tf.cast(mask, dtype=reduce_target.dtype)
red_sum = tf.reduce_sum(mask_target, axis=[dim], keep_dims=False)
red_avg = red_sum / (tf.to_float(lengths_reshape) + 1e-30)
return red_avg | 5,352,778 |
def _near_mod_2pi(e, t, atol=_DEFAULT_ATOL):
"""Returns whether a value, e, translated by t, is equal to 0 mod 2 * pi."""
return _near_mod_n(e, t, n=2 * np.pi, atol=atol) | 5,352,779 |
def create_test_votes_data(votes_table):
"""
Populates a votes table with test data
:param votes_table: table object
"""
votes_table.put_item(
Item={
"User": "user_1",
"TopicKey": "project_a/topic_aaa",
"ProjectName": "project_a",
"Topic": "topic_aaa",
"LastVote": "1111",
"VoteCount": 10,
}
)
votes_table.put_item(
Item={
"User": "user_1",
"TopicKey": "project_a/topic_bbb",
"ProjectName": "project_a",
"Topic": "topic_bbb",
"LastVote": "2222",
"VoteCount": 20,
}
)
votes_table.put_item(
Item={
"User": "user_1",
"TopicKey": "project_b/topic_ccc",
"ProjectName": "project_b",
"Topic": "topic_ccc",
"LastVote": "3333",
"VoteCount": 30,
"VoteHidden": True,
}
)
votes_table.put_item(
Item={
"User": "user_2",
"TopicKey": "project_c/topic_ddd",
"ProjectName": "project_c",
"Topic": "topic_ddd",
"LastVote": "4444",
"VoteCount": 40,
}
) | 5,352,780 |
def create_returns_similarity(strategy: QFSeries, benchmark: QFSeries, mean_normalization: bool = True,
std_normalization: bool = True, frequency: Frequency = None) -> KDEChart:
"""
Creates a new returns similarity chart. The frequency is determined by the specified returns series.
Parameters
----------
strategy: QFSeries
The strategy series to plot.
benchmark: QFSeries
The benchmark series to plot.
mean_normalization: bool
Whether to perform mean normalization on the series data.
std_normalization: bool
Whether to perform variance normalization on the series data.
frequency: Frequency
Returns can be aggregated in to specific frequency before plotting the chart
Returns
-------
KDEChart
A newly created KDEChart instance.
"""
chart = KDEChart()
colors = Chart.get_axes_colors()
if frequency is not None:
aggregate_strategy = get_aggregate_returns(strategy.to_simple_returns(), frequency)
aggregate_benchmark = get_aggregate_returns(benchmark.to_simple_returns(), frequency)
else:
aggregate_strategy = strategy.to_simple_returns()
aggregate_benchmark = benchmark.to_simple_returns()
scaled_strategy = preprocessing.scale(
aggregate_strategy, with_mean=mean_normalization, with_std=std_normalization)
strategy_data_element = DataElementDecorator(
scaled_strategy, bw="scott", shade=True, label=strategy.name, color=colors[0])
chart.add_decorator(strategy_data_element)
scaled_benchmark = preprocessing.scale(
aggregate_benchmark, with_mean=mean_normalization, with_std=std_normalization)
benchmark_data_element = DataElementDecorator(
scaled_benchmark, bw="scott", shade=True, label=benchmark.name, color=colors[1])
chart.add_decorator(benchmark_data_element)
# Add a title.
title = _get_title(mean_normalization, std_normalization, frequency)
title_decorator = TitleDecorator(title, key="title")
chart.add_decorator(title_decorator)
chart.add_decorator(AxesLabelDecorator("Returns", "Similarity"))
return chart | 5,352,781 |
def info(path):
"""
Debug sideload status.
"""
_echo_table("System Info:", [system_info()])
_echo_table("Word Installation:", [office_installation("word")])
net_shares = get_net_shares()
_echo_table("Net Shares:", net_shares)
title = rf"HKEY_CURRENT_USER\{SUBKEY_OFFICE}\{OFFICE_SUBKEY_PROVIDER}:"
rv = filter(
lambda v: v["attribute"] == "UniqueId",
enum_reg(_safe_open_key(title, OFFICE_SUBKEY_PROVIDER)),
)
_echo_table(title, rv)
title = rf"HKEY_CURRENT_USER\{SUBKEY_OFFICE}\{OFFICE_SUBKEY_CATALOG}:"
rv = enum_reg(_safe_open_key(title, OFFICE_SUBKEY_CATALOG))
_echo_table(title, rv)
urls = []
for item in rv:
if item["attribute"] == "Url":
urls.append(item["value"])
url_to_path = {local_server_url(v["netname"]): v["path"] for v in net_shares}
paths = set()
paths.add(Path(path))
for u in urls:
p = url_to_path.get(u)
if p:
paths.add(Path(p))
for path in paths:
addins = []
for p in path.glob("*.xml"):
d, _ = load_manifest(str(p))
addins.append(d)
_echo_table(f"Office Add-ins in `{path}`:", addins) | 5,352,782 |
def apply_colormap_on_image(org_im, activation, colormap_name='viridis', alpha=.4, thresh=30):
"""
Apply heatmap on image
Args:
org_img (PIL img): Original image
activation_map (numpy arr): Activation map (grayscale) 0-255
colormap_name (str): Name of the colormap
"""
import matplotlib.cm as mpl_color_map
from PIL import Image
org_im = Image.fromarray(to_img(org_im))
# Get colormap
color_map = mpl_color_map.get_cmap(colormap_name)
no_trans_heatmap = color_map(activation)
# Change alpha channel in colormap to make sure original image is displayed
heatmap = copy.copy(no_trans_heatmap)
heatmap[:, :, 3] = alpha
heatmap[:, :, 3][activation < thresh] = 0
heatmap = Image.fromarray((heatmap * 255).astype(np.uint8))
no_trans_heatmap = Image.fromarray((no_trans_heatmap * 255).astype(np.uint8))
# Apply heatmap on iamge
heatmap_on_image = Image.new("RGBA", org_im.size)
heatmap_on_image = Image.alpha_composite(heatmap_on_image, org_im.convert('RGBA'))
heatmap_on_image = Image.alpha_composite(heatmap_on_image, heatmap)
no_trans_heatmap = to_img(no_trans_heatmap)
heatmap_on_image = to_img(heatmap_on_image)
return no_trans_heatmap, heatmap_on_image | 5,352,783 |
def player_stats(cli, nick, chan, rest):
"""Gets the stats for the given player and role or a list of role totals if no role is given."""
if (chan != nick and var.LAST_PSTATS and var.PSTATS_RATE_LIMIT and
var.LAST_PSTATS + timedelta(seconds=var.PSTATS_RATE_LIMIT) >
datetime.now()):
cli.notice(nick, ('This command is rate-limited. Please wait a while '
'before using it again.'))
return
if chan != nick:
var.LAST_PSTATS = datetime.now()
if var.PHASE not in ('none', 'join'):
cli.notice(nick, 'Wait until the game is over to view stats.')
return
params = rest.split()
# Check if we have enough parameters
if params:
user = params[0]
else:
user = nick
# Find the player's account if possible
luser = user.lower()
lusers = {k.lower(): v for k, v in var.USERS.items()}
if luser in lusers:
acc = lusers[luser]['account']
if acc == '*':
if luser == nick.lower():
cli.notice(nick, 'You are not logged in to NickServ.')
else:
cli.notice(nick, user + ' is not logged in to NickServ.')
return
else:
acc = user
# List the player's total games for all roles if no role is given
if len(params) < 2:
if chan == nick:
pm(cli, nick, var.get_player_totals(acc))
else:
cli.msg(chan, var.get_player_totals(acc))
else:
role = ' '.join(params[1:])
# Attempt to find the player's stats
if chan == nick:
pm(cli, nick, var.get_player_stats(acc, role))
else:
cli.msg(chan, var.get_player_stats(acc, role)) | 5,352,784 |
def _cli():
"""
command line interface
:return:
"""
parser = generate_parser()
args = parser.parse_args()
return interface(args.bids_dir,
args.output_dir,
args.aseg,
args.subject_list,
args.session_list,
args.collect,
args.ncpus,
args.stage,
args.bandstop,
args.max_cortical_thickness,
args.check_outputs_only,
args.t1_brain_mask,
args.t2_brain_mask,
args.study_template,
args.t1_reg_method,
args.cleaning_json,
args.print,
args.ignore_expected_outputs,
args.multi_template_dir,
args.norm_method,
args.norm_gm_std_dev_scale,
args.norm_wm_std_dev_scale,
args.norm_csf_std_dev_scale,
args.make_white_from_norm_t1,
args.single_pass_pial,
args.registration_assist,
args.freesurfer_license) | 5,352,785 |
def run_experiment(parpath, subjname, run_id):
"""
Main function to run total experient.
For each new participant, a new run sequence will be generated
and saved into the output pickle file.
run_num selected from 1-8.
"""
if not os.path.isfile(pjoin(parpath, 'RecordStimuli', subjname+'.pkl')):
output = {}
output['subjname'] = subjname
output['run_ids'] = [run_id]
runsequences = np.arange(8)
np.random.shuffle(runsequences)
output['run_sequence'] = runsequences
else:
with open(pjoin(parpath, 'RecordStimuli', subjname+'.pkl'), 'rb') as f:
output = pickle.load(f)
output['run_ids'].append(run_id)
with open(pjoin(parpath, 'RecordStimuli', subjname+'.pkl'), 'wb') as f:
pickle.dump(output, f)
# Prepare window
win = visual.Window([800,600],
units='pix',
fullscr=False,
color=[0,0,0],
)
# Start stimuli
# Instruction
show_instruction(win)
# Show stimuli
show_imgblocks(win, parpath, output['run_sequence'][run_id-1])
win.close() | 5,352,786 |
def statusize():
"""Posts a status from the web."""
db = get_session(current_app)
user_id = session.get('user_id')
if not user_id:
return forbidden('You must be logged in to statusize!')
user = db.query(User).get(user_id)
message = request.form.get('message', '')
if not message:
return page_not_found('You cannot statusize nothing!')
status = Status(user_id=user.id, content=message, content_html=message)
project = request.form.get('project', '')
if project:
project = db.query(Project).filter_by(id=project).first()
if project:
status.project_id = project.id
# TODO: reply handling
db.add(status)
db.commit()
# Try to go back from where we came.
referer = request.headers.get('referer', url_for('status.index'))
redirect_url = request.form.get('redirect_to', referer)
return redirect(redirect_url) | 5,352,787 |
def savedata():
"""
This function will put data in output file if given.
"""
# for item in tldSorting(finalset):
# print(termcolor.colored(item, color='green', attrs=['bold']))
# if ipv4list:
# print(termcolor.colored("\nGot Some IPv4 addresses:\n",
# color='blue', attrs=['bold']))
# for ip in ipv4list:
# if socket.getfqdn(ip) != ip:
# print(termcolor.colored(ip + ' - ' + socket.getfqdn(ip),
# color='green', attrs=['bold']))
print(termcolor.colored(
"\nWriting all the subdomains to given file...\n", color='yellow', attrs=['bold']))
with open(args.output, 'w+') as f:
for item in tldSorting(finalset):
f.write(item + '\n')
print(termcolor.colored("\nWriting Done..\n",
color='yellow', attrs=['bold'])) | 5,352,788 |
def urlsafe_b64decode_nopadding(val):
"""Deal with unpadded urlsafe base64."""
# Yes, it accepts extra = characters.
return base64.urlsafe_b64decode(str(val) + '===') | 5,352,789 |
def _subimg_bbox(img, subimage, xc, yc):
"""
Find the x/y bounding-box pixel coordinates in ``img`` needed to
add ``subimage``, centered at ``(xc, yc)``, to ``img``. Returns
``None`` if the ``subimage`` would extend past the ``img``
boundary.
"""
ys, xs = subimage.shape
y, x = img.shape
y0 = int(yc - (ys - 1) / 2.0)
y1 = y0 + ys
x0 = int(xc - (xs - 1) / 2.0)
x1 = x0 + xs
if (x0 >= 0) and (y0 >= 0) and (x1 < x) and (y1 < y):
return (x0, x1, y0, y1)
else:
return None | 5,352,790 |
def draw_rectangle(faces, img):
""" Draws the box and text around the intruder's face
Keyword arguments:
faces -- frames that have faces detected
img -- the frame itself, this is what we are drawing on.
"""
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2) | 5,352,791 |
def _ToDatetimeObject(date_str):
"""Converts a string into datetime object.
Args:
date_str: (str) A date and optional time for the oldest article
allowed. This should be in ISO 8601 format. (yyyy-mm-dd)
Returns:
datetime.datetime Object.
Raises:
ValueError: Invalid date format.
"""
if not date_str:
date_str = datetime.now().strftime('%Y-%m-%d')
if not any(date_.match(date_str) for date_ in DATE_REGEXES):
raise ValueError('Invalid date format %s' % date_str)
return datetime.strptime(date_str, '%Y-%m-%d') | 5,352,792 |
def select_most_uncertain_patch(x_image_pl, y_label_pl, fb_pred, ed_pred, fb_prob_mean_bald, kernel_window, stride_size,
already_select_image_index, previously_selected_binary_mask, num_most_uncert_patch,
method):
"""This function is used to acquire the #most uncertain patches in the pooling set.
Args:
x_image_pl: [Num_Im, Im_h, Im_w,3]
y_label_pl: [Num_Im, Im_h, Im_w,1]
fb_pred: [Num_Im, Im_h, Im_w, 2]
ed_pred: [Num_Im, Im_h, Im_w, 2]
fb_prob_mean_bald: [num_im, imw, imw]
kernel_window: [kh, kw] determine the size of the region
stride_size: int, determine the stride between every two regions
already_select_image_index: if it's None, then it means that's the first acquistion step,
otherwise it's the numeric image index for the previously selected patches
previously_selected_binary_mask: [num_already_selected_images, Im_h, Im_w,1]
num_most_uncert_patch: int, number of patches that are selected in each acquisition step
method: acquisition method: 'B', 'C', 'D'
Returns:
Most_Uncert_Im: [Num_Selected, Im_h, Im_w, 3]imp
Most_Uncert_FB_GT: [Num_Selected, Im_h, Im_w,1]
Most_Uncert_ED_GT: [Num_Selected, Im_h, Im_w,1]
Most_Uncert_Binary_Mask: [Num_Selected, Im_h, Im_w,1]
Selected_Image_Index: [Num_Selected]
"""
num_im = np.shape(x_image_pl)[0]
uncertainty_map_tot = []
for i in range(num_im):
if method == 'B':
var_stat = get_uncert_heatmap(x_image_pl[i], fb_pred[i])
elif method == 'C':
var_stat = get_entropy_heatmap(fb_pred[i])
elif method == 'D':
var_stat = get_bald_heatmap(fb_prob_mean_bald[i], fb_pred[i])
uncertainty_map_tot.append(var_stat)
uncertainty_map_tot = np.array(uncertainty_map_tot)
if already_select_image_index is None:
print("--------This is the beginning of the selection process-------")
else:
print(
"----------Some patches have already been annotated, I need to deal with that")
previously_selected_binary_mask = np.squeeze(previously_selected_binary_mask, axis=-1)
for i in range(np.shape(previously_selected_binary_mask)[0]):
uncertainty_map_single = uncertainty_map_tot[already_select_image_index[i]]
uncertainty_map_updated = uncertainty_map_single * (1 - previously_selected_binary_mask[i])
uncertainty_map_tot[already_select_image_index[i]] = uncertainty_map_updated
selected_numeric_image_index, binary_mask_updated_tot = calculate_score_for_patch(uncertainty_map_tot,
kernel_window, stride_size,
num_most_uncert_patch)
pseudo_fb_la_tot = []
pseudo_ed_la_tot = []
for index, single_selected_image_index in enumerate(selected_numeric_image_index):
pseudo_fb_la, pseudo_ed_la = return_pseudo_label(y_label_pl[single_selected_image_index],
fb_pred[single_selected_image_index],
ed_pred[single_selected_image_index],
binary_mask_updated_tot[index])
pseudo_fb_la_tot.append(pseudo_fb_la)
pseudo_ed_la_tot.append(pseudo_ed_la)
most_uncert_im_tot = x_image_pl[selected_numeric_image_index]
most_uncertain = [most_uncert_im_tot,
pseudo_fb_la_tot,
pseudo_ed_la_tot,
binary_mask_updated_tot,
selected_numeric_image_index]
return most_uncertain | 5,352,793 |
def submit_extraction(connector, host, key, datasetid, extractorname):
"""Submit dataset for extraction by given extractor.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including http and port, should end with a /
key -- the secret key to login to clowder
datasetid -- the dataset UUID to submit
extractorname -- registered name of extractor to trigger
"""
url = "%sapi/datasets/%s/extractions?key=%s" % (host, datasetid, key)
result = requests.post(url,
headers={'Content-Type': 'application/json'},
data=json.dumps({"extractor": extractorname}),
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
return result.status_code | 5,352,794 |
def fix_sys_path():
# XXX - MarkH had the bright idea *after* writing this that we should
# ensure the CVS version of SpamBayes is *not* used to resolve SB imports.
# This would allow us to effectively test the distutils setup script, so
# any modules or files missing from the installed version raise errors.
"""Fix sys.path so that the core SpamBayes package,
*and* the SpamBayes scripts can be imported.
"""
this_dir = os.path.dirname(__file__)
try:
import spambayes.Version
except ImportError:
# Apparently SpamBayes is yet to be "setup.py install"
# We are in 'spambayes\spambayes\test' - 2 parents up should
# do it.
sb_dir = os.path.abspath(
os.path.join(this_dir, "..", ".."))
sys.path.insert(0, sb_dir)
import spambayes.Version
# Now do the same for the sb_* scripts
try:
import sb_server
except ImportError:
# Scripts are usually in "spambayes/scripts" (for an
# installed SpamBayes, they appear to be in
# os.path.join(sys.prefix(), "scripts"), which we may like to
# leverage - however, these test scripts are not currently
# installed).
script_dir = os.path.abspath(
os.path.join(this_dir, "..", "..", "scripts"))
sys.path.insert(0, script_dir)
import sb_server | 5,352,795 |
def fcmp(x, y, precision):
"""fcmp(x, y, precision) -> -1, 0, or 1"""
if math.fabs(x-y) < precision:
return 0
elif x < y:
return -1
return 1 | 5,352,796 |
def destroy_nodes(cluster):
"""Call deleteinstance on all nodes in cluster."""
for z in cluster.keys():
for node in cluster[z]:
print "...deleting node %s in zone %s" % (node['name'], node['zone'])
_ = subprocess.call(["gcutil",
"--service_version=%s" % API_VERSION, "deleteinstance",
"-f", "--zone=%s" % node['zone'],
"--delete_boot_pd", node['name']],
stdout=NULL, stderr=NULL) | 5,352,797 |
def parse_vcf_line(line):
"""
Args:
line (str): line in VCF file obj.
Returns:
parsed_line_lst (lst): with tuple elem (chr, pos, ref, alt)
Example:
deletion
pos 123456789012
reference ATTAGTAGATGT
deletion ATTA---GATGT
VCF:
CHROM POS REF ALT
N 4 AGTA A
Bambino:
chr pos ref alt
chr_N 5 GTA -
insertion
pos 1234***56789012
reference ATTA***GTAGATGT
insertion ATTAGTAGTAGATGT
VCF:
CHROM POS REF ALT
N 4 A AGTA
Bambino:
chr pos ref alt
chr_N 5 - GTA
"""
parsed_line_lst = []
# skip header lines
if line.startswith("#"):
return parsed_line_lst
lst = line.rstrip().split("\t")
chr = lst[0]
vcf_pos = int(lst[1])
vcf_ref = lst[3]
vcf_alts = lst[4].split(",") # possibly multi-allelic
if not chr.startswith("chr"):
chr = "chr" + chr
# skip non canonical chrmosomes
if not is_canonical_chromosome(chr):
return parsed_line_lst
for vcf_alt in vcf_alts:
n = count_padding_bases(vcf_ref, vcf_alt)
pos = vcf_pos + n
if len(vcf_ref) < len(vcf_alt):
ref = "-"
alt = vcf_alt[n:]
parsed_line_lst.append((chr, pos, ref, alt))
elif len(vcf_ref) > len(vcf_alt):
ref = vcf_ref[n:]
alt = "-"
parsed_line_lst.append((chr, pos, ref, alt))
else:
pass # not indel
return parsed_line_lst | 5,352,798 |
def new_auth():
"""
Performs the new authentication song and dance. Waves the dead chicken in the air in just the right way.
@see: https://api-portal.digikey.com/node/188
"""
magic_code = invoke_auth_magic_one()
invoke_auth_magic_two(magic_code)
return | 5,352,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.