content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import torch
def sampler(value, percentile):
"""Score based on sampling task model output distribution
Args:
value: The output of the task model
percentile: the (sorted) index of the sample we use
Returns:
The percentile largest distance from the mean of the samples.
"""
softmaxed = nn.functional.softmax(value[0], dim=1)
samples = torch.tensor(
np.array(
list(
torch.utils.data.WeightedRandomSampler(
softmaxed, 10000)))).float()
mean_value = samples.mean(dim=1)
dist_from_mean = torch.abs(((
samples-mean_value.unsqueeze(1).repeat(
1, samples.shape[1]))+180)%360 - 180)
sorted_val = torch.sort(dist_from_mean).values
if percentile == 10000:
percentile = percentile-1
return sorted_val[:, percentile] | 905665d5219df7737adaf2c7fd435cef3f3c7f1d | 3,656,800 |
def gists_by(username, number=-1, etag=None):
"""Iterate over gists created by the provided username.
.. deprecated:: 1.2.0
Use :meth:`github3.github.GitHub.gists_by` instead.
:param str username: (required), if provided, get the gists for this user
instead of the authenticated user.
:param int number: (optional), number of gists to return. Default: -1,
return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`
"""
if username:
return gh.gists_by(username, number, etag)
return iter([]) | b98f478dbac25c0334296da5055952a776af9d39 | 3,656,801 |
import pyspark
def needs_spark(test_item):
"""
Use as a decorator before test classes or methods to only run them if Spark is usable.
"""
test_item = _mark_test('spark', test_item)
try:
# noinspection PyUnresolvedReferences
except ImportError:
return unittest.skip("Skipping test. Install PySpark to include this test.")(test_item)
except:
raise
else:
return test_item | f4d40b7119f753162ed5f6377ebef3b42d2bf549 | 3,656,802 |
from typing import Generator
def get_school_years_from_db() -> Generator:
"""Get all school years from the database.
:return: iterable with all availabe school years
"""
session: db.orm.session.Session = Session()
return (e[0] for e in set(session.query(Holiday.school_year).all())) | 48651fab2364e03a2d18224c7a798d8754cca911 | 3,656,803 |
def get_api(context=None):
"""
This function tries to detect if the app is running on a K8S cluster or locally
and returns the corresponding API object to be used to query the API server.
"""
if app.config.get("MODE") == "KUBECONFIG":
return client.CustomObjectsApi(config.new_client_from_config(context=context))
elif app.config.get("MODE") == "CLUSTER":
return client.CustomObjectsApi() | 89808a80c3ad4ae1260ffbb9611543b6e33298ee | 3,656,804 |
def is_variant(title) -> bool:
"""
Check if an issue is variant cover.
"""
return "variant" in title.lower() | 5e0bab3030c069d7726bbc8c9909f561ed139cb8 | 3,656,805 |
def _decode_common(hparams):
"""Common graph for decoding."""
features = get_input(hparams, FLAGS.data_files)
decode_features = {}
for key in features:
if key.endswith("_refs"):
continue
decode_features[key] = features[key]
_, _, _, references = seq2act_model.compute_logits(
features, hparams, mode=tf.estimator.ModeKeys.EVAL)
decode_utils.decode_n_step(seq2act_model.compute_logits,
decode_features, references["areas"],
hparams, n=20,
beam_size=FLAGS.beam_size)
decode_mask = generate_action_mask(decode_features)
return decode_features, decode_mask, features | a9eac81d9fe5e0480c679e41a61699b9e281fdd5 | 3,656,806 |
from typing import Tuple
def _lex_single_line_comment(header: str) -> Tuple[str, str]:
"""
>>> _lex_single_line_comment("a=10")
('', 'a=10')
>>> _lex_single_line_comment("//comment\\nb=20")
('', 'b=20')
"""
if header[:2] != "//":
return "", header
line_end_pos = header.find("\n")
return "", header[line_end_pos + 1 :] | 4d562557db11c7279042e439a56cc7864fa259ef | 3,656,807 |
from thelper.data.loaders import LoaderFactory as LoaderFactory
import os
import logging
import sys
import pprint
import json
def create_loaders(config, save_dir=None):
"""Prepares the task and data loaders for a model trainer based on a provided data configuration.
This function will parse a configuration dictionary and extract all the information required to
instantiate the requested dataset parsers. Then, combining the task metadata of all these parsers, it
will evenly split the available samples into three sets (training, validation, test) to be handled by
different data loaders. These will finally be returned along with the (global) task object.
The configuration dictionary is expected to contain two fields: ``loaders``, which specifies all
parameters required for establishing the dataset split, shuffling seeds, and batch size (these are
listed and detailed below); and ``datasets``, which lists the dataset parser interfaces to instantiate
as well as their parameters. For more information on the ``datasets`` field, refer to
:func:`thelper.data.utils.create_parsers`.
The parameters expected in the 'loaders' configuration field are the following:
- ``<train_/valid_/test_>batch_size`` (mandatory): specifies the (mini)batch size to use in data
loaders. If you get an 'out of memory' error at runtime, try reducing it.
- ``<train_/valid_/test_>collate_fn`` (optional): specifies the collate function to use in data
loaders. The default one is typically fine, but some datasets might require a custom function.
- ``shuffle`` (optional, default=True): specifies whether the data loaders should shuffle
their samples or not.
- ``test_seed`` (optional): specifies the RNG seed to use when splitting test data. If no seed
is specified, the RNG will be initialized with a device-specific or time-related seed.
- ``valid_seed`` (optional): specifies the RNG seed to use when splitting validation data. If no
seed is specified, the RNG will be initialized with a device-specific or time-related seed.
- ``torch_seed`` (optional): specifies the RNG seed to use for torch-related stochastic operations
(e.g. for data augmentation). If no seed is specified, the RNG will be initialized with a
device-specific or time-related seed.
- ``numpy_seed`` (optional): specifies the RNG seed to use for numpy-related stochastic operations
(e.g. for data augmentation). If no seed is specified, the RNG will be initialized with a
device-specific or time-related seed.
- ``random_seed`` (optional): specifies the RNG seed to use for stochastic operations with python's
'random' package. If no seed is specified, the RNG will be initialized with a device-specific or
time-related seed.
- ``workers`` (optional, default=1): specifies the number of threads to use to preload batches in
parallel; can be 0 (loading will be on main thread), or an integer >= 1.
- ``pin_memory`` (optional, default=False): specifies whether the data loaders will copy tensors
into CUDA-pinned memory before returning them.
- ``drop_last`` (optional, default=False): specifies whether to drop the last incomplete batch
or not if the dataset size is not a multiple of the batch size.
- ``sampler`` (optional): specifies a type of sampler and its constructor parameters to be used
in the data loaders. This can be used for example to help rebalance a dataset based on its
class distribution. See :mod:`thelper.data.samplers` for more information.
- ``augments`` (optional): provides a list of transformation operations used to augment all samples
of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``train_augments`` (optional): provides a list of transformation operations used to augment the
training samples of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``valid_augments`` (optional): provides a list of transformation operations used to augment the
validation samples of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``test_augments`` (optional): provides a list of transformation operations used to augment the
test samples of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``eval_augments`` (optional): provides a list of transformation operations used to augment the
validation and test samples of a dataset. See :func:`thelper.transforms.utils.load_augments` for more info.
- ``base_transforms`` (optional): provides a list of transformation operations to apply to all
loaded samples. This list will be passed to the constructor of all instantiated dataset parsers.
See :func:`thelper.transforms.utils.load_transforms` for more info.
- ``train_split`` (optional): provides the proportion of samples of each dataset to hand off to the
training data loader. These proportions are given in a dictionary format (``name: ratio``).
- ``valid_split`` (optional): provides the proportion of samples of each dataset to hand off to the
validation data loader. These proportions are given in a dictionary format (``name: ratio``).
- ``test_split`` (optional): provides the proportion of samples of each dataset to hand off to the
test data loader. These proportions are given in a dictionary format (``name: ratio``).
- ``skip_verif`` (optional, default=True): specifies whether the dataset split should be verified
if resuming a session by parsing the log files generated earlier.
- ``skip_split_norm`` (optional, default=False): specifies whether the question about normalizing
the split ratios should be skipped or not.
- ``skip_class_balancing`` (optional, default=False): specifies whether the balancing of class
labels should be skipped in case the task is classification-related.
Example configuration file::
# ...
"loaders": {
"batch_size": 128, # batch size to use in data loaders
"shuffle": true, # specifies that the data should be shuffled
"workers": 4, # number of threads to pre-fetch data batches with
"train_sampler": { # we can use a data sampler to rebalance classes (optional)
# see e.g. 'thelper.data.samplers.WeightedSubsetRandomSampler'
# ...
},
"train_augments": { # training data augmentation operations
# see 'thelper.transforms.utils.load_augments'
# ...
},
"eval_augments": { # evaluation (valid/test) data augmentation operations
# see 'thelper.transforms.utils.load_augments'
# ...
},
"base_transforms": { # global sample transformation operations
# see 'thelper.transforms.utils.load_transforms'
# ...
},
# optionally indicate how to resolve dataset loader task vs model task incompatibility if any
# leave blank to get more details about each case during runtime if this situation happens
"task_compat_mode": "old|new|compat",
# finally, we define a 80%-10%-10% split for our data
# (we could instead use one dataset for training and one for testing)
"train_split": {
"dataset_A": 0.8
"dataset_B": 0.8
},
"valid_split": {
"dataset_A": 0.1
"dataset_B": 0.1
},
"test_split": {
"dataset_A": 0.1
"dataset_B": 0.1
}
# (note that the dataset names above are defined in the field below)
},
"datasets": {
"dataset_A": {
# type of dataset interface to instantiate
"type": "...",
"params": {
# ...
}
},
"dataset_B": {
# type of dataset interface to instantiate
"type": "...",
"params": {
# ...
},
# if it does not derive from 'thelper.data.parsers.Dataset', a task is needed:
"task": {
# this type must derive from 'thelper.tasks.Task'
"type": "...",
"params": {
# ...
}
}
},
# ...
},
# ...
Args:
config: a dictionary that provides all required data configuration information under two fields,
namely 'datasets' and 'loaders'.
save_dir: the path to the root directory where the session directory should be saved. Note that
this is not the path to the session directory itself, but its parent, which may also contain
other session directories.
Returns:
A 4-element tuple that contains: 1) the global task object to specialize models and trainers with;
2) the training data loader; 3) the validation data loader; and 4) the test data loader.
.. seealso::
| :func:`thelper.data.utils.create_parsers`
| :func:`thelper.transforms.utils.load_augments`
| :func:`thelper.transforms.utils.load_transforms`
"""
logstamp = thelper.utils.get_log_stamp()
repover = thelper.__version__ + ":" + thelper.utils.get_git_stamp()
session_name = config["name"] if "name" in config else "session"
data_logger_dir = None
if save_dir is not None:
thelper.utils.init_logger() # make sure all logging is initialized before attaching this part
data_logger_dir = os.path.join(save_dir, "logs")
os.makedirs(data_logger_dir, exist_ok=True)
data_logger_path = os.path.join(data_logger_dir, "data.log")
data_logger_format = logging.Formatter("[%(asctime)s - %(process)s] %(levelname)s : %(message)s")
data_logger_fh = logging.FileHandler(data_logger_path)
data_logger_fh.setLevel(logging.NOTSET)
data_logger_fh.setFormatter(data_logger_format)
thelper.data.logger.addHandler(data_logger_fh)
thelper.data.logger.info(f"created data log for session '{session_name}'")
logger.debug("loading data usage config")
# todo: 'data_config' field is deprecated, might be removed later
if "data_config" in config:
logger.warning("using 'data_config' field in configuration dictionary is deprecated; switch it to 'loaders'")
loaders_config = thelper.utils.get_key(["data_config", "loaders"], config)
# noinspection PyProtectedMember
loader_factory = LoaderFactory(loaders_config)
datasets, task = create_parsers(config, loader_factory.get_base_transforms())
assert datasets and task is not None, "invalid dataset configuration (got empty list)"
for dataset_name, dataset in datasets.items():
logger.info(f"parsed dataset: {str(dataset)}")
logger.info(f"task info: {str(task)}")
logger.debug("splitting datasets and creating loaders...")
train_idxs, valid_idxs, test_idxs = loader_factory.get_split(datasets, task)
if save_dir is not None:
with open(os.path.join(data_logger_dir, "task.log"), "a+") as fd:
fd.write(f"session: {session_name}-{logstamp}\n")
fd.write(f"version: {repover}\n")
fd.write(str(task) + "\n")
for dataset_name, dataset in datasets.items():
dataset_log_file = os.path.join(data_logger_dir, dataset_name + ".log")
if not loader_factory.skip_verif and os.path.isfile(dataset_log_file):
logger.info(f"verifying sample list for dataset '{dataset_name}'...")
log_content = thelper.utils.load_config(dataset_log_file, as_json=True, add_name_if_missing=False)
assert isinstance(log_content, dict), "old split data logs no longer supported for verification"
samples_old, samples_new = None, None
if "samples" in log_content:
assert isinstance(log_content["samples"], list), \
"unexpected dataset log content (bad 'samples' field, should be list)"
samples_old = log_content["samples"]
samples_new = dataset.samples if hasattr(dataset, "samples") and dataset.samples is not None \
and len(dataset.samples) == len(dataset) else []
if len(samples_old) != len(samples_new):
query_msg = f"old sample list for dataset '{dataset_name}' mismatch with current list; proceed?"
answer = thelper.utils.query_yes_no(query_msg, bypass="n")
if not answer:
logger.error("sample list mismatch with previous run; user aborted")
sys.exit(1)
break
for set_name, idxs in zip(["train_idxs", "valid_idxs", "test_idxs"],
[train_idxs[dataset_name], valid_idxs[dataset_name], test_idxs[dataset_name]]):
# index values were paired in tuples earlier, 0=idx, 1=label --- we unpack in the miniloop below
if not np.array_equal(np.sort(log_content[set_name]), np.sort([idx for idx, _ in idxs])):
query_msg = f"Old indices list for dataset '{dataset_name}' mismatch with current indices" \
f"list ('{set_name}'); proceed anyway?"
answer = thelper.utils.query_yes_no(query_msg, bypass="n")
if not answer:
logger.error("indices list mismatch with previous run; user aborted")
sys.exit(1)
break
printer = pprint.PrettyPrinter(indent=2)
log_sample_metadata = thelper.utils.get_key_def(["log_samples", "log_samples_metadata"], config, default=False)
for dataset_name, dataset in datasets.items():
dataset_log_file = os.path.join(data_logger_dir, dataset_name + ".log")
samples = dataset.samples if hasattr(dataset, "samples") and dataset.samples is not None \
and len(dataset.samples) == len(dataset) else []
log_content = {
"metadata": {
"session_name": session_name,
"logstamp": logstamp,
"version": repover,
"dataset": str(dataset),
},
# index values were paired in tuples earlier, 0=idx, 1=label
"train_idxs": [int(idx) for idx, _ in train_idxs[dataset_name]],
"valid_idxs": [int(idx) for idx, _ in valid_idxs[dataset_name]],
"test_idxs": [int(idx) for idx, _ in test_idxs[dataset_name]]
}
if log_sample_metadata:
log_content["samples"] = [printer.pformat(sample) for sample in samples]
# now, always overwrite, as it can get too big otherwise
with open(dataset_log_file, "w") as fd:
json.dump(log_content, fd, indent=4, sort_keys=False)
train_loader, valid_loader, test_loader = loader_factory.create_loaders(datasets, train_idxs, valid_idxs, test_idxs)
return task, train_loader, valid_loader, test_loader | 41a40951dc0f5848120cc88ac6abc34ea0367b04 | 3,656,808 |
def in_box(X, box):
"""Get a boolean array indicating whether points X are within a given box
:param X: n_pts x n_dims array of points
:param box: 2 x n_dims box specs (box[0, :] is the min point and box[1, :] is the max point)
:return: n_pts boolean array r where r[idx] is True iff X[idx, :] is within the box
>>> import numpy as np
>>> X = np.arange(12).reshape((4, 3))
>>> print(X)
[[ 0 1 2]
[ 3 4 5]
[ 6 7 8]
[ 9 10 11]]
>>> in_box(X, [[1, 2, 3], [6, 7, 8]])
array([False, True, True, False])
>>> in_box(X, box=[[2] * 3, [7] * 3])
array([False, True, False, False])
"""
MINS_ROW_IDX = 0
MAXS_ROW_IDX = 1
X, box = map(np.array, (X, box))
n_rows_in_box_matrix, ndims = box.shape
assert (
n_rows_in_box_matrix == 2
), 'box must have 2 rows only: [0] the min point and [1] the max point of the box'
assert (
X.shape[1] == ndims
), f"ndims of X should be aligned with box's ({ndims}): Was {X.shape[1]}"
return np.all((box[MINS_ROW_IDX, :] <= X) & (X <= box[MAXS_ROW_IDX, :]), axis=1) | 8ee516937b3a19a27fed81cfee0ca19356cb5249 | 3,656,809 |
def test_partial_field_square():
"""Fields that do not extend over the whole wall"""
field = np.zeros((40, 40))
field[:10, 0] = 1
fields = {kw_field_map: field}
walls = "L"
assert func(fields, "s", walls=walls) == 0.25
field[:20, 0] = 1
assert func(fields, "s", walls=walls) == 0.5
field[:30, 0] = 1
assert func(fields, "s", walls=walls) == 0.75
print("test_partial_field() passed")
return True | 94af1cf9e500ddddd16c9fea61eeb43874589b68 | 3,656,810 |
import sys
from sys import path
def get_countries():
"""
The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Returns:
Dictionary: A dictionary with the country codes as the keys and the
country names as the values.
"""
#Initialize the countries dictionary.
countries = {}
#Set the data directory based on if the script is a frozen executable.
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
data_dir = path.dirname(sys.executable)
else:
data_dir = path.dirname(__file__)
#Create the country codes file object.
f = open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r')
#Read the file.
data = f.read()
#Check if there is data.
if not data:
return {}
#Parse the data to get the DOM.
dom = parseString(data)
#Retrieve the country entries.
entries = dom.getElementsByTagName('ISO_3166-1_Entry')
#Iterate through the entries and add to the countries dictionary.
for entry in entries:
#Retrieve the country code and name from the DOM.
code = entry.getElementsByTagName(
'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data
name = entry.getElementsByTagName(
'ISO_3166-1_Country_name')[0].firstChild.data
#Add to the countries dictionary.
countries[code] = name.title()
return countries | 4491097e1b3f0a5cd4034b4745a7597688a952fd | 3,656,811 |
def get_empty_faceid(current_groupid, uuid, embedding,
img_style, number_people, img_objid, forecast_result):
"""
当softmax无结果时(无模型/预测置信度低)调用遍历数据库识别
:param current_groupid:
:param uuid:
:param embedding:
:param img_style:
:param number_people:
:param img_objid:
:return:
"""
json_data = {'detected': True, 'recognized': False}
face_id = img_objid + str(all_face_index).zfill(4)
json_data['recognized'] = False
json_data['face_id'] = face_id
json_data['accuracy'] = 0
json_data['style'] = img_style
forecast_result['face_id'] = face_id
forecast_result['face_accuracy'] = 0
embedding_string = ','.join(str(x) for x in embedding)
forecast_result['embedding_string'] = embedding_string
return json_data, forecast_result | 265250564de0ac160c5f0110293fc52693edaeda | 3,656,812 |
def dz_and_top_to_phis(
top_height: xr.DataArray, dz: xr.DataArray, dim: str = COORD_Z_CENTER
) -> xr.DataArray:
""" Compute surface geopotential from model top height and layer thicknesses"""
return _GRAVITY * (top_height + dz.sum(dim=dim)) | 14e19781cdac7a743d26db7d29317ea33ae94517 | 3,656,813 |
import scipy
def altPDF(peaks,mu,sigma=None,exc=None,method="RFT"):
"""
altPDF: Returns probability density using a truncated normal
distribution that we define as the distribution of local maxima in a
GRF under the alternative hypothesis of activation
parameters
----------
peaks: float or list of floats
list of peak heigths
mu:
sigma:
returns
-------
fa: float or list
probability density of the peaks heights under Ha
"""
#Returns probability density of the alternative peak distribution
peaks = np.asarray(peaks)
if method == "RFT":
# assert type(sigma) is in [float, int]
# assert sigma is not None
ksi = (peaks-mu)/sigma
alpha = (exc-mu)/sigma
num = 1/sigma * scipy.stats.norm.pdf(ksi)
den = 1. - scipy.stats.norm.cdf(alpha)
fa = num/den
elif method == "CS":
fa = [peakdistribution.peakdens3D(y-mu,1) for y in peaks]
return fa | 0346d1efcad2a3f8e1548857c980fb6c92ea07f3 | 3,656,814 |
def implicit_quantile_network(num_actions, quantile_embedding_dim,
network_type, state, num_quantiles):
"""The Implicit Quantile ConvNet.
Args:
num_actions: int, number of actions.
quantile_embedding_dim: int, embedding dimension for the quantile input.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
num_quantiles: int, number of quantile inputs.
Returns:
net: _network_type object containing the tensors output by the network.
"""
model = atari_lib.ImplicitQuantileNetwork(num_actions, quantile_embedding_dim)
net = model(state, num_quantiles)
return network_type(quantile_values=net.quantile_values,
quantiles=net.quantiles) | 2782f94b2003dca0a8865298dab2dbb17ec4cb45 | 3,656,815 |
def get_waitlist(usercode):
"""
Запрос /api/waitlists/{usercode} - возвращает waitlist контент по usercode
"""
user_by_usercode = (
AppUsers.query.filter(AppUsers.usercode == usercode).one_or_none()
)
if user_by_usercode is None:
abort(
409,
"Usercode {usercode} does not exists".format(
usercode=usercode
),
)
string_array_waitlist = user_by_usercode.waitlist
# Запрос по id
try:
array_waitlist = [int(s) for s in string_array_waitlist.split(',')]
except ValueError:
abort(
404,
"Waitlist empty or wrong format. Format of waitlist string should be - 1,2,3,4,5 etc",
)
except AttributeError:
abort(
404,
"Waitlist empty or wrong format. Format of waitlist string should be - 1,2,3,4,5 etc",
)
content = Content.query.filter(Content.content_id.in_(array_waitlist)).all()
# Проверка на наличие id
if content is not None:
# Сериализация
content_schema = ContentSchema(many=True)
data = content_schema.dump(content).data
return data
# Ошибка, если нет
else:
abort(
404,
"Empty show list with this IDs",
) | 0592d188020b967198c1cb052d1e4b3adbc1ed21 | 3,656,816 |
def not_empty(message=None) -> Filter_T:
"""
Validate any object to ensure it's not empty (is None or has no elements).
"""
def validate(value):
if value is None:
_raise_failure(message)
if hasattr(value, '__len__') and value.__len__() == 0:
_raise_failure(message)
return value
return validate | f1ee9b43936978dfbd81550b9931d0cc8800eef2 | 3,656,817 |
def temporal_affine_forward(x, W, b):
"""
Run a forward pass for temporal affine layer. The dimensions are consistent with RNN/LSTM forward passes.
Arguments:
x: input data with shape (N, T, D)
W: weight matrix for input data with shape (D, M)
b: bias with shape (M,)
Outputs:
out: output data with shape (N, T, M)
cache: cache for back-prop
"""
N, T, D = x.shape
M = b.shape[0]
out = np.dot(x.reshape(N * T, D), W).reshape(N, T, M) + b
cache = x, W, b, out
return out, cache | 2eca1c3ef36eb8bdbcaaad88c3b2f1234227e2d4 | 3,656,818 |
def uniform_regular_knot_vector(n, p, t0=0.0, t1=1.0):
"""
Create a p+1-regular uniform knot vector for
a given number of control points
Throws if n is too small
"""
# The minimum length of a p+1-regular knot vector
# is 2*(p+1)
if n < p+1:
raise RuntimeError("Too small n for a uniform regular knot vector")
# p+1 copies of t0 left and p+1 copies of t1 right
# but one of each in linspace
return [t0]*p + list(np.linspace(t0, t1, n+1-p)) + [t1]*p | e0e1bc9f2e2ea2e74d70c76d35479efebc42d2f7 | 3,656,819 |
def generateCM(labelValue, predictValue):
"""Generates the confusion matrix and rteturn it.
Args:
labelValue (np.ndarray): true values.
predictValue (np.ndarray): predicted values.
"""
FPMtx = np.logical_and((labelValue <= 0), (predictValue > 0))
FPIndices = np.argwhere(FPMtx)
FPNum = np.sum(FPMtx)
FNMtx = np.logical_and((labelValue > 0), (predictValue <= 0))
FNIndices = np.argwhere(FNMtx)
FNNum = np.sum(FNMtx)
TPMtx = np.logical_and((labelValue > 0), (predictValue > 0))
TPIndices = np.argwhere(TPMtx)
TPNum = np.sum(TPMtx)
TNMtx = np.logical_and((labelValue <= 0), (predictValue <= 0))
TNIndices = np.argwhere(TNMtx)
TNNum = np.sum(TNMtx)
accuracy = (TPNum+TNNum) / (TPNum+TNNum+FPNum+FNNum)
FPrate = FPNum / (FPNum+TNNum)
FNrate = FNNum / (TPNum+FNNum)
TNrate = TNNum / (FPNum+TNNum)
TPrate = TPNum / (TPNum+FNNum)
print(
"TP: {:.0f}, FN: {:.0f}, FP: {:.0f}, TN: {:.0f}".format(
TPNum, FNNum, FPNum, TNNum
)
)
cm = np.array([[TPrate, FNrate], [FPrate, TNrate]])
return cm, accuracy, TPIndices, FNIndices, FPIndices, TNIndices | 3ea5751bb9c9153edf4fdce12512319b75f80484 | 3,656,820 |
def get_cosine_similarity(word2vec: Word2Vec) -> np.ndarray:
"""Get the cosine similarity matrix from the embedding.
Warning; might be very big!
"""
return cosine_similarity(word2vec.wv.vectors) | b9a976de8faef0cd85265c4afdb80dd8720128f5 | 3,656,821 |
def get_bot_id() -> str:
"""
Gets the app bot ID
Returns:
The app bot ID
"""
response = CLIENT.auth_test()
return response.get('user_id') | 6dcf2121fb11fb4af1615c9d739923e86299cc0a | 3,656,822 |
import os
def extract_rawfile_unique_values(
file: str
) -> list:
"""Extract the unique raw file names from "R.FileName" (Spectronaut output), "Raw file" (MaxQuant output),
"shortname" (AlphaPept output) or "Run" (DIA-NN output) column or from the "Spectral Count" column from the
combined_peptide.tsv file without modifications for the FragPipe.
Args:
file (str): The name of a file.
Raises:
ValueError: if a column with the unique raw file names is not in the file.
Returns:
list: A sorted list of unique raw file names from the file.
"""
file_ext = os.path.splitext(file)[-1]
if file_ext == '.csv':
sep = ','
elif file_ext in ['.tsv', '.txt']:
sep = '\t'
with open(file) as filelines:
i = 0
filename_col_index = None
filename_data = []
for l in filelines:
l = l.split(sep)
# just do it for the first line
if i == 0:
for col in ['R.FileName', 'Raw file', 'Run', 'shortname']:
if col in l:
filename_col_index = l.index(col)
break
if not isinstance(filename_col_index, int):
# to check the case with the FragPipe peptide.tsv file when we don't have the info about the experiment name
if ("Assigned Modifications" in "".join(l)) and ("Protein ID" in "".join(l)) and ("Peptide" in "".join(l)):
return []
# to check the case with the FragPipe combined_peptide.tsv file when the experiment name is included in the "Spectral Count" column
elif ("Sequence" in "".join(l)) and ("Assigned Modifications" in "".join(l)) and ("Protein ID" in "".join(l)):
return sorted(list(set([col.replace('_', '').replace(' Spectral Count', '') for col in l if 'Spectral Count' in col])))
else:
raise ValueError('A column with the raw file names is not in the file.')
else:
filename_data.append(l[filename_col_index])
i += 1
unique_filenames = set(filename_data)
sorted_unique_filenames = sorted(list(unique_filenames))
return sorted_unique_filenames | 594744409537f977530e111826237161e6121d47 | 3,656,823 |
def _fetch_from_s3(bucket_name, path):
"""Fetch the contents of an S3 object
Args:
bucket_name (str): The S3 bucket name
path (str): The path to the S3 object
Returns:
str: The content of the S3 object in string format
"""
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(path)
data = obj.get()['Body'].read().decode('utf-8')
return data | 3e284b653c046a826b92e82bf60e7e12547280b2 | 3,656,824 |
def vrv_getatttype(schema, module, gp, aname, includes_dir = ""):
""" returns the attribut type for element name, or string if not detectable."""
# Look up if there is an override for this type in the current module, and return it
# Note that we do not honor pseudo-hungarian notation
attype, hungarian = vrv_get_att_config_type(module, gp, aname)
if attype:
return (attype, hungarian)
# No override, get it from the schema
# First numbers
el = schema.xpath("//tei:attDef[@ident=$name]/tei:datatype/rng:data/@type", name=aname, namespaces=TEI_NS)
if el:
if el[0] == "nonNegativeInteger" or el[0] == "positiveInteger":
return ("int", "")
elif el[0] == "decimal":
return ("double", "")
# The data types
ref = schema.xpath("//tei:classSpec[@ident=$gp]//tei:attDef[@ident=$name]/tei:datatype/rng:ref/@name", gp=gp, name=aname, namespaces=TEI_NS)
if ref:
return (vrv_getformattedtype("{0}".format(ref[0])), "")
# Finally from val lists
vl = schema.xpath("//tei:classSpec[@ident=$gp]//tei:attDef[@ident=$name]/tei:valList[@type=\"closed\"]", gp=gp, name=aname, namespaces=TEI_NS)
if vl:
element = vl[0].xpath("./ancestor::tei:classSpec", namespaces=TEI_NS)
attName = vl[0].xpath("./parent::tei:attDef/@ident", namespaces=TEI_NS)
if element:
return(vrv_getformattedvallist(element[0].get("ident"),attName[0]), "")
#data_list = "{0}.{1}".format(element[0].get("ident"),attName[0])
#elif attName:
# elName = vl[0].xpath("./ancestor::tei:elementSpec/@ident", namespaces=TEI_NS)
# lg.debug("VALLIST {0} --- {1}".format(elName[0],attName[0]))
# Otherwise as string
return ("std::string", "") | 88d014aedcc17a307e32daebd205f93e9c1e2e89 | 3,656,825 |
def inc_date(date_obj, num, date_fmt):
"""Increment the date by a certain number and return date object.
as the specific string format.
"""
return (date_obj + timedelta(days=num)).strftime(date_fmt) | 560d82b8e72614b8f9011ab97c10a7612d1c50b0 | 3,656,826 |
def recombine_edges(output_edges):
"""
Recombine a list of edges based on their rules.
Recombines identical Xe isotopes. Remove isotopes.
:param output_edges:
:return:
"""
mol = Chem.MolFromSmiles(".".join(output_edges))
# Dictionary of atom's to bond together and delete if they come in pairs
iso_dict = {}
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 54:
# Get the isotope
iso = atom.GetIsotope()
if iso in iso_dict:
iso_dict[iso].append(get_info(atom))
else:
iso_dict[iso] = [get_info(atom)]
mw = Chem.RWMol(mol)
# Add bonds first
del_indices = []
for isotope in iso_dict:
if len(iso_dict[isotope]) > 1:
mw.AddBond(
iso_dict[isotope][0][1], iso_dict[isotope][1][1], Chem.BondType.SINGLE
)
del_indices.append(iso_dict[isotope][0][0])
del_indices.append(iso_dict[isotope][1][0])
# Now delete atoms
del_count = 0
for atom_index in sorted(del_indices):
mw.RemoveAtom(atom_index - del_count)
del_count += 1
Chem.SanitizeMol(mw)
return Chem.MolToSmiles(mw, isomericSmiles=True) | 1bbce0bb315990f758aa47a0dae2dc34bff9fb2a | 3,656,827 |
from typing import List
import os
def parse_comments(content: str) -> List[str]:
"""Parses comments in LDF files
:param content: LDF file content as string
:type content: str
:returns: a list of all comments in the LDF file
:rtype: List[str]
"""
comment = os.path.join(os.path.dirname(__file__), 'lark', 'comment.lark')
parser = Lark(grammar=open(comment), parser='lalr')
tree = parser.parse(content)
return CommentCollector().transform(tree) | aed403e1888c0db73f86b172888b95e8320adabc | 3,656,828 |
def excludevars(vdict, filters):
"""
Remove dictionary items by filter
"""
vdict_remove = dict()
for filtr in filters:
a = filtervars_sub(vdict, filtr)
vdict_remove.update(a)
vdict_filtered = vdict.copy()
for key in vdict_remove.keys():
del vdict_filtered[key]
return vdict_filtered | 5050e946454c096a11c664d1a0910d2b2f7d985a | 3,656,829 |
def __updateEntityAttributes(fc, fldList, dom, logFile):
"""For each attribute (field) in fldList,
adds attribute definition and definition source,
classifies as range domain, unrepresentable-value domain or enumerated-value domain, and
for range domains, adds rangemin, rangemax, and units;
for unrepresentable value domains, adds unrepresentable value statement;
for enumerated value domains:
1) Finds all controlled-vocabulary fields in the table sent to it
2) Builds a set of unique terms in each field, ie, the domain
3) Matches each domain value to an entry in the glossary
4) Builds a dictionary of term:(definition, source) items
5) Takes the dictionary items and put them into the metadata
document as Attribute_Domain_Values
Field MapUnit in table DescriptionOfMapUnits is treated as a special case.
"""
cantfindTerm = []
cantfindValue = []
for fld in fldList:
addMsgAndPrint( ' Field: '+ fld)
# if is _ID field or if field definition is available, update definition
if fld.find('_ID') > -1 or fld in attribDict:
dom = __updateAttrDef(fld,dom)
else:
cantfindTerm.append(fld)
#if this is an _ID field
if fld.find('_ID') > -1:
dom = __updateUdom(fld,dom,unrepresentableDomainDict['_ID'])
#if this is another unrepresentable-domain field
if fld in unrepresentableDomainDict:
dom = __updateUdom(fld,dom,unrepresentableDomainDict[fld])
#if this is a defined range-domain field
elif fld in rangeDomainDict:
dom = __updateRdom(fld,dom)
#if this is MapUnit in DMU
elif fld == 'MapUnit' and fc == 'DescriptionOfMapUnits':
dom = __updateUdom(fld,dom,unrepresentableDomainDict['default'])
#if this is a defined Enumerated Value Domain field
elif fld in enumeratedValueDomainFieldList:
valList = []
#create a search cursor on the field
rows = arcpy.SearchCursor(fc,'','', fld)
row = next(rows)
#collect all values/terms in that field
while row:
if not row.getValue(fld) is None:
valList.append(row.getValue(fld))
row = next(rows)
#uniquify the list by converting it to a set object
valList = set(valList)
#create an empty dictionary object to hold the matches between the unique terms
#and their definitions (grabbed from the glossary)
defs = {}
#for each unique term, try to create a search cursor of just one record where the term
#matchs a Term field value from the glossary
if fld == 'MapUnit' and fc != 'DescriptionOfMapUnits':
for t in valList:
query = '"MapUnit" = \'' + t + '\''
rows = arcpy.SearchCursor(DMU, query)
row = next(rows)
#if the searchcursor contains a row
if row:
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
if row.FullName != None:
defs[t].append(row.FullName.encode('utf_8'))
defs[t].append('this report, table DescriptionOfMapUnits')
else:
addMsgAndPrint('MapUnit = '+t+', FullName not defined')
defs[t].append(row.Name.encode('utf_8'))
defs[t].append('this report, table DescriptionOfMapUnits')
else:
if not t in ('',' '): cantfindValue.append([fld,t])
elif fld == 'GeoMaterialConfidence' and fc == 'DescriptionOfMapUnits':
if debug:
addMsgAndPrint('DMU / GeoMaterialsConfidence')
defs = GeoMatConfDict
elif fld == 'GeoMaterial' and fc == 'DescriptionOfMapUnits':
if debug:
addMsgAndPrint('DMU / GeoMaterials!')
for t in valList:
query = '"GeoMaterial" = \'' + t + '\''
if debug:
addMsgAndPrint('query='+query)
rows = arcpy.SearchCursor(gmDict, query)
row = next(rows)
#if the searchcursor contains a row
if row:
if debug:
addMsgAndPrint(row.GeoMaterial+' : '+row.Definition.encode('utf_8'))
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
defs[t].append(row.Definition.encode('utf_8'))
defs[t].append(' GeMS documentation')
else:
addMsgAndPrint('GeoMaterial = '+t+': not defined in GeoMaterialDict')
cantfindValue.append([fld,t])
elif fld.find('SourceID') > -1: # is a source field
for t in valList:
query = '"DataSources_ID" = \'' + t + '\''
rows = arcpy.SearchCursor(dataSources, query)
row = next(rows)
#if the searchcursor contains a row
if row:
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
defs[t].append(row.Source.encode('utf_8'))
defs[t].append('this report, table DataSources')
else:
cantfindValue.append([fld,t])
else:
for t in valList:
query = '"Term" = '+"'"+ t + "'"
if debug:
addMsgAndPrint('query='+query)
rows = arcpy.SearchCursor(gloss, query)
row = next(rows)
#if the searchcursor contains a row
if row:
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
defs[t].append(row.Definition.encode('utf_8'))
defs[t].append(__findInlineRef(row.DefinitionSourceID).encode('utf_8'))
else:
if fld != 'GeoMaterial' and fc != 'GeoMaterialDict':
cantfindValue.append([fld,t])
dom = __updateEdom(fld, defs, dom)
else: #presumed to be an unrepresentable domain
dom = __updateUdom(fld,dom,unrepresentableDomainDict['default'])
if len(cantfindValue) > 0:
logFile.write('Missing enumerated-domain values\n')
logFile.write(' ENTITY TERM VALUE\n')
for term in cantfindValue:
logFile.write(' '+fc+' '+term[0]+' **'+term[1]+'**\n')
if len(cantfindTerm) > 0:
logFile.write('Missing terms\n')
logFile.write(' ENTITY TERM\n')
for term in cantfindTerm:
logFile.write(' '+fc + ' '+term+'\n')
return dom | f304f31ea64dab2b186736af6a7177fee93afefe | 3,656,830 |
def make_laplace_pyramid(x, levels):
"""
Make Laplacian Pyramid
"""
pyramid = []
current = x
for i in range(levels):
pyramid.append(laplacian(current))
current = tensor_resample(
current,
(max(current.shape[2] // 2, 1), max(current.shape[3] // 2, 1)))
pyramid.append(current)
return pyramid | 88b8c94a8f5ca3fda3329c0ac8fa871693c1482f | 3,656,831 |
def create_component(ctx: NVPContext):
"""Create an instance of the component"""
return ToolsManager(ctx) | 24cf48073bb16233046abdde966c28c570cf16c0 | 3,656,832 |
import os
def _load_csv_key(symbol_key):
"""
针对csv存储模式,通过symbol_key字符串找到对应的csv具体文件名称,
如从usTSLA->找到usTSLA_2014-7-26_2016_7_26这个具体csv文件路径
:param symbol_key: str对象,eg. usTSLA
"""
# noinspection PyProtectedMember
csv_dir = ABuEnv.g_project_kl_df_data_example if ABuEnv._g_enable_example_env_ipython \
else ABuEnv.g_project_kl_df_data_csv
if file_exist(csv_dir):
for name in os.listdir(csv_dir):
# 从csv缓存文件夹下进行模糊查询通过fnmatch匹配具体csv文件路径,eg. usTSLA->usTSLA_2014-7-26_2016_7_26
# if fnmatch(name, '{}*'.format(symbol_key)):
"""
这里不能模糊匹配,否则会因为TSL匹配上TSLA导致删除原有的symbol
而且必须要加'_'做为symbol结束匹配标记
"""
if name.startswith(symbol_key + '_'):
# []只是为了配合外面针对不同store统一使用key[0]
return [name]
return None | ebfdb88713e5943863f75455aa9d276d8f780450 | 3,656,833 |
def get_routing_table() -> RouteCommandResult:
"""
Execute route command via subprocess. Blocks while waiting for output.
Returns the routing table in the form of a list of routes.
"""
return list(subprocess_workflow.exec_and_parse_subprocesses(
[RouteCommandParams()],
_get_route_command_args_list,
parse_route_output,
))[0] | 817d8350e7a2af514e3b239ec5d7dbc278fb7649 | 3,656,834 |
import array
def xor_arrays(arr1, arr2):
""" Does a XOR on 2 arrays, very slow"""
retarr = array('B')
for i in range(len(arr1)):
retarr.append(arr1[i] ^ arr2[i])
return retarr | 5ff978aa1a48a537a40132a5213b907fb7b14b4b | 3,656,835 |
def delete_category():
"""Delete category specified by id from database"""
category = Category.query.get(request.form['id'])
db.session.delete(category)
db.session.commit()
return '' | 47347299dd39c6afa9fd8d1cd10e1dc0906f6806 | 3,656,836 |
def gen_dd(acc, amt):
"""Generate a DD (low-level)"""
read()
dd_num = dd_no()
while dd_num in dds.keys():
dd_num = dd_no()
dd = {
'ac_no': acc,
'amount': amt
}
return dd_num, dd | 251a36131dae66f4d24dc2ce45db27f81da39845 | 3,656,837 |
def coranking_matrix(high_data, low_data):
"""Generate a co-ranking matrix from two data frames of high and low
dimensional data.
:param high_data: DataFrame containing the higher dimensional data.
:param low_data: DataFrame containing the lower dimensional data.
:returns: the co-ranking matrix of the two data sets.
"""
n, m = high_data.shape
high_distance = distance.squareform(distance.pdist(high_data))
low_distance = distance.squareform(distance.pdist(low_data))
high_ranking = high_distance.argsort(axis=1).argsort(axis=1)
low_ranking = low_distance.argsort(axis=1).argsort(axis=1)
Q, xedges, yedges = np.histogram2d(high_ranking.flatten(),
low_ranking.flatten(),
bins=n)
Q = Q[1:, 1:] # remove rankings which correspond to themselves
return Q | 7cc77cd5ef70d7adef9020cab6f33a5dbf290557 | 3,656,838 |
def gaussian_dist_xmu1xmu2_product_x(mu1,Sigma1,mu2,Sigma2):
"""Compute distribution of N(x|mu1,Sigma1)N(x|mu2,Sigma2)"""
InvSigmaHat = np.linalg.inv(Sigma1) + np.linalg.inv(Sigma2)
SigmaHat = np.linalg.inv(InvSigmaHat)
muHat = np.dot(SigmaHat,np.linalg.solve(Sigma1, mu1) + np.linalg.solve(Sigma2,mu2))
logC = gaussian_logprob(mu1,mu2,Sigma1 + Sigma2)
return (logC,muHat,SigmaHat) | 5eb50e98165bc77bc0754a93eef4f62b0665ea30 | 3,656,839 |
def default_marker_size(fmt):
""" Find a default matplotlib marker size such that different marker types
look roughly the same size.
"""
temp = fmt.replace('.-', '')
if '.' in temp:
ms = 10
elif 'D' in temp:
ms = 7
elif set(temp).intersection('<>^vd'):
ms = 9
else:
ms = 8
return ms | feebe9bdda47a2e041636f15c9b9595e5cd6b2cc | 3,656,840 |
def vote_smart_candidate_rating_filter(rating):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param rating:
:return:
"""
rating_filtered = {
'ratingId': rating.ratingId,
'rating': rating.rating,
'timeSpan': rating.timespan, # Seems to be typo with lower case "s"
'ratingName': rating.ratingName,
'ratingText': rating.ratingText,
'sigId': rating.sigId,
}
return rating_filtered | f4fec92e46f58444abb8dab56f28acc7e670aab0 | 3,656,841 |
def get_syntax(view):
""" get_syntax(view : sublime.View) -> str
>>> get_syntax(view)
'newLISP'
>>> get_syntax(view)
'Lisp'
Retuns current file syntax/language
"""
syntax = view.settings().get('syntax')
syntax = syntax.split('/')[-1].replace('.tmLanguage', '')
return syntax | a5be75f51de105af63ce53df7c3b7094537d28f3 | 3,656,842 |
def random_otp():
"""
:return: OTP for Event
:return type: string
"""
try:
all_events = Events.query.all() # Here Error if no Event
all_holded_events = HoldedEvents.query.all()
used_otps = set()
for otp_ in all_events:
used_otps.add(str(otp_.otp))
for otp_ in all_holded_events:
used_otps.add(str(otp_.otp))
total_otps = set()
available_otps = set()
for otp_ in range(0, 999999+1):
otp = str(otp_)
if len(otp)!=6:
diff = 6-len(otp)
otp = '0'*diff + otp
total_otps.add(otp)
available_otps = total_otps - used_otps
if len(available_otps) == 1:
return available_otps.pop()
else:
return 'Fail'
except:
return 'Fail' | e343addc9252de4ca9d69a344beea05254c9ebb0 | 3,656,843 |
def read_config(path):
"""Read the complete INI file and check its version number
if OK, pass values to config-database
"""
return _read_config(path) | bbb95e5e02d54dd831082d556e19307109e1113d | 3,656,844 |
def getPath(file):
"""Get the path of a source file.
Use this to extract the path of a file/directory when the file
could be specified either as a FileTarget, DirectoryTarget or string.
@param file: The object representing the file.
@type file: L{FileTarget}, L{DirectoryTarget} or C{basestring}
"""
assert not isinstance(file, AsyncResult)
if isinstance(file, (FileTarget, DirectoryTarget)):
return file.path
elif isinstance(file, basestring):
return file
else:
return None | b80e5f0ead8be98dd40bbd444bc8ae9201eb54ed | 3,656,845 |
import torch
def optical_flow_to_rgb(flows):
"""
Args:
A tensor with a batch of flow fields of shape [b*num_src, 2, h, w]
"""
flows = flows.cpu().numpy()
_, h, w = flows[0].shape
rgbs = []
for i in range(len(flows)):
mag, ang = cv2.cartToPolar(flows[i, 0, ...], flows[i, 1, ...])
hsv = np.zeros(shape=(h, w, 3), dtype="float32")
# true_angle / 2, hue range [0, 180]
hsv[..., 0] = (ang * 180 / np.pi) / 2
hsv[..., 1] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv[..., 2] = 255
rgb = cv2.cvtColor(hsv.astype("uint8"), cv2.COLOR_HSV2BGR)
rgbs.append(rgb)
rgbs = np.array(rgbs).transpose([0, 3, 1, 2])
return torch.tensor(rgbs) | d27074eab88f0f1181c5e1acae4839cbed984e17 | 3,656,846 |
from re import MULTILINE
def get_motes_from_simulation(simfile, as_dictionary=True):
"""
This function retrieves motes data from a simulation file (.csc).
:param simfile: path to the simulation file
:param as_dictionary: flag to indicate that the output has to be formatted as a dictionary
:return: the list of motes formatted as dictionaries with 'id', 'x', 'y' and 'motetype_identifier' keys if
short is False or a dictionary with each mote id as the key and its tuple (x, y) as the value
"""
motes = []
with open(simfile) as f:
content = f.read()
iterables, fields = [], ['mote_id']
for it in ['id', 'x', 'y', 'motetype_identifier']:
iterables.append(finditer(r'^\s*<{0}>(?P<{0}>.*)</{0}>\s*$'.format(it), content, MULTILINE))
for matches in zip(*iterables):
mote = {}
for m in matches:
mote.update(m.groupdict())
motes.append(mote)
if as_dictionary:
motes = {int(m['id']): (float(m['x']), float(m['y'])) for m in motes}
return motes | bbf09378a45cee9a96dca136bc7751ea9372eeac | 3,656,847 |
def menu_bar():
"""each mini-game has a menu bar that allows direct access to
the main menu. This allows story mode to be bypassed after
starting war, but the game state will not be saved"""
pygame.draw.rect(SCREEN, TEAL, (0, 460, 640, 40))
menu_font = pygame.font.Font('freesansbold.ttf', 15)
menu_txt = menu_font.render("Menu", True, BLACK, TEAL)
menu_rect = menu_txt.get_rect()
menu_rect.center = (60, 480)
SCREEN.blit(menu_txt, menu_rect)
instr_txt = menu_font.render("Instructions", True, BLACK, TEAL)
instr_rect = instr_txt.get_rect()
instr_rect.center = (150, 480)
SCREEN.blit(instr_txt, instr_rect)
return menu_rect, instr_rect | 0b5b16db2f53c1cbb45236512597d954bb28e7da | 3,656,848 |
def merge_sort(a, p, r):
""" merge sort
:param a: a array to sort, a[p:r+1] need to be sorted
:param p: index of array, p < r, if p >= r , the length of a is 1, return
:param r: index of array, p < r, if p >= r , the length of a is 1, return
"""
if p < r:
q = int((p + r) / 2)
# divider
a = merge_sort(a, p, q)
a = merge_sort(a, q + 1, r)
# conquer
merge(a, p, q, r)
return a | 07aab16ea75cb01f2f1fb3ae32f1b1ac31c76cfb | 3,656,849 |
def get_flow_graph(limit, period):
"""
:type limit int
:type period int
:rtype: list[dict]
"""
rows = ElasticsearchQuery(
es_host=ELASTICSEARCH_HOST,
period=period,
index_prefix='logstash-other'
).query_by_string(
query='kubernetes.labels.job-name:* AND '
'kubernetes.container_name: "portability-metric" AND ("SELECT" OR "UPDATE")',
fields=[
'log',
'kubernetes.labels.job-name'
],
limit=limit
)
entries = []
for row in rows:
for entry in get_portability_metrics_query(
row['log'], row['kubernetes']['labels']['job-name']):
entries.append(entry)
# print(entries)
# process the logs
def _map(item):
return '{}'.join(item)
def _reduce(items):
# ('MetricArticleProvider.py', 'UPDATE', 'articledata')
first = items[0]
script = 'cron:{}'.format(first[0])
query_type = first[1]
table_name = 'db:{}'.format(first[2])
return {
'source': table_name if query_type == 'SELECT' else script,
'edge': query_type,
'target': table_name if query_type != 'SELECT' else script,
}
return logs_map_and_reduce(entries, _map, _reduce) | f51bb6aa6132303e2cd5ed3090507435739c0452 | 3,656,850 |
import re
import time
def upload(server_ip, share, username, password, domain, remote_path, local_path, verbose=True):
""" Get file and folder on the remote file server.
server_ip (str): This value is the ip smb server's ip.
share (str): This value is the share file name.
username (str): This value is the login username required to connect to smb service.
password (str): This value is the login password required to connect to smb service.
domain (str): This value is the server domain name.
remote_path (str): This value is the remote file path to uploaded.
local_path (str): This value is the remote path where the file will uploaded.
verbose (boolean): Print information about function progress.
Returns:
boolean: 0 if fuction runs correctly. If an error occured return 1.
"""
try:
smb = connect_samba_server(server_ip, share, username, password, domain, verbose=True)
smb.upload(local_path, remote_path)
smb.close()
regex = re.compile("((?:[^/]*/)*)(.*)")
for file in get_remote_dir(server_ip, share, username, password, domain, "/", verbose=True):
if regex.match(remote_path).group(2) in file:
print(Fore.GREEN+" ===> [upload] {"+regex.match(local_path).group(2)+"} -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return True
print(Fore.RED+" ===> [upload] {"+regex.match(local_path).group(2)+"} failed! -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return False
except Exception as e:
print(Fore.RED+" ===> [upload] failed during execution! -- "+time.strftime("%H:%M:%S", time.localtime())+Style.RESET_ALL)
return False | 552079181faa10b50c306b1ee9e02c190b9711a4 | 3,656,851 |
from typing import Union
import platform
def list_directory_command(api_client: CBCloudAPI, device_id: str, directory_path: str, limit: Union[int, str]):
"""
Get list of directory entries in the remote device
:param api_client: The API client
:param device_id: The device id
:param directory_path: Directory to list. This parameter should end with the path separator
:param limit: Limit the result entries count to be the given limit
:return: CommandResult represent the API command result
:rtype: ``CommandResults``
"""
session = api_client.select(platform.Device, device_id).lr_session()
items = [item for item in session.list_directory(directory_path) if item['filename'] not in IGNORED_FILES_IN_DIR]
items, partial_res_msg = get_limited_results(original_results=items, limit=limit)
directories_readable = []
context_entry_items = []
headers = ['name', 'type', 'date_modified', 'size']
for item in items:
context_entry_items.append(item)
directories_readable.append({
'name': item['filename'],
'type': 'Directory' if item['attributes'] and 'DIRECTORY' in item['attributes'] else 'File',
'date_modified': item['last_write_time'],
'size': item['size'],
})
context_entry = dict(content=context_entry_items, device_id=device_id, directory_path=directory_path)
readable_output = tableToMarkdown(f'Directory of {directory_path}{partial_res_msg}',
t=directories_readable,
headers=headers,
headerTransform=string_to_table_header,
removeNull=True)
return CommandResults(
outputs_prefix='CarbonBlackDefenseLR.Directory',
outputs_key_field=['device_id', 'directory_path'],
outputs=context_entry,
readable_output=readable_output,
raw_response=items,
) | 228d4884d2fd4f69e8c7a44d737bfeca7b40f753 | 3,656,852 |
def _hparams(network, random_seed):
"""
Global registry of hyperparams. Each entry is a (default, random) tuple.
New algorithms / networks / etc. should add entries here.
"""
hparams = {}
def _hparam(name, default_val, random_val_fn):
"""Define a hyperparameter. random_val_fn takes a RandomState and
returns a random hyperparameter value."""
random_state = np.random.RandomState(
misc.seed_hash(random_seed, name)
)
hparams[name] = (default_val, random_val_fn(random_state))
# Unconditional hparam definitions.
_hparam('lr', 0.001, lambda r: 10**r.uniform(-5, -2)) #
_hparam('weight_decay', 0, lambda r: 10**r.uniform(-6, -2))
_hparam('batch_size', 16, lambda r: int(r.choice([8,12,16])))
_hparam('epoch', 100, lambda r: int(r.choice([60,90,120,150])))
_hparam('transform_aug', False, lambda r: bool(r.choice([True,False])))
_hparam('lr_schedule', 1, lambda r: int(r.choice([0,1,2,3])))
if network == 'PoseResNet':
_hparam('num_layers', 50, lambda r: int(r.choice([50]))) #[18,34,50,101,152]
_hparam('pretrained', False, lambda r: bool(r.choice([False]))) #True,
return hparams | 34ea9ac295f3150b870e8d1c7ce0f1b867f75122 | 3,656,853 |
def bidding_search(request):
"""
"""
query = ''
form = BiddingSearchForm(shop=request.shop, data=request.GET)
if form.is_valid():
query = form.get_query()
results = form.search()
else:
results = form.all_results()
pager = Paginator(results, PAGE_SEARCH)
try:
page = int(request.GET.get('page','1'))
except:
page = 1
try:
products = pager.page(page)
except (EmptyPage, InvalidPage):
products = pager.page(pager.num_pages)
paged = (pager.num_pages > 1)
t = loader.get_template('bidding/blocks/search.html')
c = RequestContext(request, {'form': form,
'products' : products,
'pages': pager.page_range,
'paged': paged })
block_search = (t.render(c))
getvars = "&q=%s" % form.cleaned_data.get("q")
t = loader.get_template('paginator.html')
filter_params = {'q': form.cleaned_data.get("q", '')}
c = RequestContext(request, {'objects': products,
'getvars': getvars,
'filter_params': filter_params,
'pages': pager.page_range,
'paged': paged})
paginator = (t.render(c))
try:
page = DynamicPageContent.objects.filter(shop=request.shop, page="search").get()
description = striptags(page.meta_content)
except DynamicPageContent.DoesNotExist:
description = "No meta description found"
return HttpResponse(my_render(request, {'results': block_search,
'paginator': paginator,
'page_title': 'Search',
'page_description': description
}, 'search')) | cda6c4ebccec88c40ae714cd81392946165b184f | 3,656,854 |
def clean_code(code, code_type):
""" Returns the provided code string as a List of lines """
if code_type.startswith(BOOTSTRAP):
if code_type.endswith(CLEAN):
return code.split("\n")
code = code.replace("\\", "\\\\")
if code_type.startswith(PERMUTATION):
if code_type.endswith(CLEAN):
return code.split("\n")
if code_type.startswith(FRAGMENT):
if code_type.endswith(CLEAN):
return bytes(code, encoding="ascii").decode('unicode_escape')
code = code.replace("{", "{\\n").replace("}", "\\n}\\n").replace(";", ";\\n")
code = retab(bytes(code, encoding="ascii").decode('unicode_escape'))
return code.split("\n") | fff65103003a202a039fd4683da83735b0342a7a | 3,656,855 |
def level(arr, l, ax=2, t=None, rounding=False):
"""
As level 1D but accepts general arrays and level is taken is some
specified axis.
"""
return np.apply_along_axis(level1D, ax, arr, l, t, rounding) | 80835140850dbf7883f9b4cb1f92543ee2253845 | 3,656,856 |
def updateStore(request, storeId):
""" view for updating store """
if canViewThisStore(storeId, request.user.id):
# get the corresponding store
store = Store.objects.get(id=storeId)
metadata = getFBEOnboardingDetails(store.id)
if request.method == "POST":
# Create a form instance and populate it with data from the request (binding):
form = UpdateStoreForm(request.POST)
# Check if the form is valid:
if form.is_valid():
store.name = form.cleaned_data["business_name"]
store.save()
return redirect("viewStore", storeId)
form = UpdateStoreForm(initial={"business_name": store.name})
breadcrumbs = [(store.name, "viewStore", store.id)]
context = {
"form": form,
"store": store,
"fb_metadata": metadata,
"page_title": "Update Shop",
"breadcrumbs": breadcrumbs,
"button": "Update",
}
return render(request, "core/update.html", context)
else:
return render(request, "403.html") | 8cb92e31f2dc59a28281c45d698a7d810b573587 | 3,656,857 |
def i(t, T, r, a, b, c):
"""Chicago design storm equation - intensity. Uses ia and ib functions.
Args:
t: time in minutes from storm eginning
T: total storm duration in minutes
r: time to peak ratio (peak time divided by total duration)
a: IDF A parameter - can be calculated from getABC
b: IDF B parameter - can be calculated from getABC
c: IDF C parameter - can be calculated from getABC
Returns:
Returns intensity in mm/hr.
"""
if t < T*r:
return ib(T*r - t, r, a, b, c)
elif t > T*r:
return ia(t - T*r, r, a, b, c)
else:
# Should be infinity, but this does the job
return 1000 | 3d31d64502fc4590b1d83e0d3a32a5de9cae2a56 | 3,656,858 |
def get_primary_id_from_equivalent_ids(equivalent_ids, _type):
"""find primary id from equivalent id dict
params
------
equivalent_ids: a dictionary containing all equivalent ids of a bio-entity
_type: the type of the bio-entity
"""
if not equivalent_ids:
return None
id_rank = [('bts:' + _item) for _item in id_ranks.get(_type)]
# loop through id_rank, if the id is found in equivalent ids, return it
for _item in id_rank:
if equivalent_ids.get(_item):
return (_item[4:] + ':' + equivalent_ids[_item][0])
# if no id found, return a random one from equivalent ids
for k, v in equivalent_ids.items():
if v:
return (k[4:] + ':' + v[0]) | 489f9d431e553772f114f1e4a3a2577c831addda | 3,656,859 |
def get_L_max_C(L_CS_x_t_i, L_CL_x_t_i):
"""1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
Args:
L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h)
L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h)
Returns:
float: 1日当たりの冷房全熱負荷の年間最大値(MJ/d)
"""
# 暖冷房区画軸合算(暖冷房区画の次元をなくす)
L_CS_x_t = np.sum(L_CS_x_t_i, axis=0)
L_CL_x_t = np.sum(L_CL_x_t_i, axis=0)
# L_CS_x_tとL_CL_x_tの要素同士を足す
L_C_x_t = L_CS_x_t + L_CL_x_t
# 1次元配列を2次元配列に形状変換する
L_C_x_t = np.reshape(L_C_x_t, (365, 24))
# 時間軸合算
L_C_x = np.sum(L_C_x_t, axis=1)
# 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
L_max_C = np.max(L_C_x)
return L_max_C | 2c5e769baacfec0d75e711b3493b07ab65796690 | 3,656,860 |
def complete_session(session: namedtuple, speeches: list) -> dict:
"""
This will result in loss of data bc content will be reduced to speeches.
HTML_classes, speaker_flow, speaker_role etc. will not be given any longer
since it's assumed that speakers are either members of parliament or
ministers.
Another important reduction is that speeches have been stripped of
annotations like applause or calls.
Updated keys in speeches:
date
protocol_no
agenda_item - topic
speaker
party - if mop, will be ministry if minister
speech - complete speech; no hall action, no interruptions
Updated keys in session:
date
period
index
content - all speeches of a single session
Speeches are given as a list of complete sentences.
"""
reduced_data = {}
period = int(session.protocol_no.split('/')[0])
index = int(session.protocol_no.split('/')[-1])
reduced_data["date"] = session.date
reduced_data["period"] = period
reduced_data["index"] = index
reduced_data["content"] = speeches
return reduced_data | 185e1518e48252fcdc222aeddf8f2ba30884c93e | 3,656,861 |
import fileinput
import re
def replace_text_in_file(file_path, replace_this, for_that, case_insensitive=False, is_regex=False, keep_copy=False,
number_of_subs=0):
""" replace a string or regex (if is_regex is set) from a file given in file_path, with another string.
This is a replacement for sed if needed.
@param str file_path: path to the file to be changed
@param str replace_this: string or regex to match and replace
@param str for_that: string that will replace the match
@param bool case_insensitive: flag to indicate if case is important
@param bool is_regex: flag to indicate if replace_this is a regular expression or a plain string
@param bool keep_copy: flag to keep copy of original file or not. The original file will be timestamped
@param int number_of_subs: number of times to do the substitution. A zero means replace all
@rtype: tuple
"""
if not is_regex:
replace_this = re.escape(replace_this)
new_file_path = duplicate_file_with_stamp(file_path) if keep_copy else file_path
for current_line in fileinput.input(file_path, inplace=True):
current_line, num_subs_made = re.subn(replace_this, for_that, current_line,
flags=(re.IGNORECASE if case_insensitive else 0), count=number_of_subs)
number_of_subs = 0 if not number_of_subs else (number_of_subs - num_subs_made)
return file_path, new_file_path | 12c978759fd5a31bacb396d3068ab762b442dd27 | 3,656,862 |
def load_annotations(ann_file):
"""Load the annotation according to ann_file into video_infos."""
video_infos = []
anno_database = mmcv.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos | ac337917f313e5c695a5388c481e12787d7d78a0 | 3,656,863 |
import argparse
def commandline(args):
"""
Settings for the commandline arguments.
Returns the parsed arguments.
"""
parser = argparse.ArgumentParser(description='Checks the timestamps for files in a directory.')
parser.add_argument("-p", "--path", required=True,
help="Path to offline backup list file or directory")
parser.add_argument("-w", "--warning",
help="Threshold for warnings in days. Default: 2 Days")
parser.add_argument("-c", "--critical",
help="Threshold for criticals in days. Default: 5 Days")
parser.add_argument("-f", "--format",
help="Format of the date in the file. Default: Y-m-d")
parser.add_argument("-r", "--regex",
help="Regular Expression to extract date from file. Default: [0-9]{4}-[0-9]{2}-[0-9]{2}")
parser.add_argument("-v", "--verbose",
help="Increase output verbosity",
action="store_true")
parser.set_defaults(verbose=False,
critical=5,
warning=2)
return parser.parse_args(args) | f3c1726e0dfde2bce6cd3e62a2300abbace7900e | 3,656,864 |
from typing import List
from typing import Dict
def seq_hist(seq_lens: List[int]) -> Dict[int, int]:
"""Returns a dict of sequence_length/count key/val pairs.
For each entry in the list of sequence lengths, tabulates
the frequency of appearance in the list and returns the
data as a dict. Useful for histogram operations on sequence
length.
"""
seq_count = {}
for slen in seq_lens:
if slen in seq_count:
seq_count[slen] += 1
else:
seq_count[slen] = 1
return seq_count | 5778b7566d1b64e8db0e2dce6bbf53e06cdb196d | 3,656,865 |
import os
def make_ms_url( syndicate_host, syndicate_port, no_tls, urlpath="" ):
"""
Make a URL to the MS.
Return the URL.
"""
scheme = "https://"
default_port = 80
if no_tls:
default_port = 443
scheme = "http://"
if syndicate_port != default_port:
return scheme + os.path.join( syndicate_host.strip("/") + ":%s" % syndicate_port, urlpath )
else:
return scheme + os.path.join( syndicate_host.strip("/"), urlpath ) | 4aec60c48285a8e8f8d58b18ea29928e338fa1bc | 3,656,866 |
from typing import List
def clifford_canonical_F(
pauli_layer: List[int], gamma: np.ndarray, delta: np.ndarray
) -> Circuit:
"""
Returns a Hadamard free Clifford circuit using the canonical form of elements of the Borel group
introduced in https://arxiv.org/abs/2003.09412. The canonical form has the structure O P CZ CX where
O is a pauli operator, P is a layer of sqrt(Z) gates, CZ is a layer of CZ gates, and CX is a layer of
CX gates. The inputs describe on which qubits the gates in these layers act.
:param pauli_layer: Description of which Pauli gate should act on each qubits. This is an element of {0,1,2,3}^n
with 0 -> I, 1->X, 2->Y, 3->Z.
:type pauli_layer: List[int]
:param gamma: Describes on which qubits CX acts. In particular the circuit contains CX_{i,j} if
gamma[i][j]=1. The gates are ordered such the control qubit index increases with time.
:type gamma: List[List[int]]
:param delta: Describes on which qubits CZ acts. In particular the circuit contains CX_{i,j} if
delta[i][j]=1. The gates are ordered such the control qubit index increases with time. The circuit include S_i
if delta[i][i]=1.
:type delta: List[List[int]]
:return: A Hadamard free Clifford circuit.
:rtype: Circuit
"""
circ = Circuit(len(pauli_layer))
# Add layer of CX gates
for j in range(len(delta)):
for i in range(j):
if delta[i][j]:
circ.CX(i, j, opgroup="Clifford 2")
# Add layer of CZ gates
for j in range(len(gamma)):
for i in range(j):
if gamma[i][j]:
circ.CZ(i, j, opgroup="Clifford 2")
# Add layer of S gates
for i in range(len(gamma)):
if gamma[i][i]:
circ.S(i, opgroup="Clifford 1")
# Add Pauli gate
for i, gate in enumerate(pauli_layer):
if gate == 0:
circ.X(i, opgroup="Clifford 1")
elif gate == 1:
circ.Y(i, opgroup="Clifford 1")
elif gate == 2:
circ.Z(i, opgroup="Clifford 1")
return circ | 9818866b3196ccf9608f7ea8a17145bfa9ddb2d2 | 3,656,867 |
def calculate_second_moment_nondegenerate(
mu1: float, mu2: float, sigma1: float, sigma2: float, a: float, alpha: float
) -> float:
"""The second (raw) moment of a random variable :math:`\\min(Y_1, Y_2)`.
Args:
mu1: mean of the first Gaussian random variable :math:`Y_1`
mu2: mean of the second Gaussian random variable :math:`Y_2`
sigma1: standard deviation of the first Gaussian random variable :math:`Y_1`
sigma2: standard deviation of the second Gaussian random variable :math:`Y_2`
a: value of a(X1, X2)
alpha: value of alpha(X1, X2)
Note:
For a Gaussian variable, the relationship between the raw second moment, mean, and the standard deviation
(which is calculated using the *central* moment) is
.. math::
\\nu_2 = \\nu_1^2 + \\sigma^2
"""
# The first, second and third term
first = (mu1 ** 2 + sigma1 ** 2) * numeric.normal_cdf(alpha)
secnd = (mu2 ** 2 + sigma2 ** 2) * numeric.normal_cdf(-alpha)
third = (mu1 + mu2) * a * numeric.normal_pdf(alpha)
return first + secnd - third | 74869b0b461777ae9cce658829c2c90ff9a4adff | 3,656,868 |
from re import X
def q_make( x, y, z, angle):
"""q_make: make a quaternion given an axis and an angle (in radians)
notes:
- rotation is counter-clockwise when rotation axis vector is
pointing at you
- if angle or vector are 0, the identity quaternion is returned.
double x, y, z : axis of rotation
double angle : angle of rotation about axis in radians
"""
length=0
cosA=0
sinA=0
destQuat = [0.0,0.0,0.0,0.0]
#/* normalize vector */
length = sqrt( x*x + y*y + z*z )
#/* if zero vector passed in, just return identity quaternion */
if ( length < Q_EPSILON ) :
destQuat[X] = 0
destQuat[Y] = 0
destQuat[Z] = 0
destQuat[W] = 1
return
x /= length
y /= length
z /= length
cosA = cos(angle / 2.0)
sinA = sin(angle / 2.0)
destQuat[W] = cosA
destQuat[X] = sinA * x
destQuat[Y] = sinA * y
destQuat[Z] = sinA * z
return destQuat | bd95a3d6f89599297a089d75882aa319e474a1b7 | 3,656,869 |
def create_mssql_pymssql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mssql database using pymssql.
"""
return create_engine(
_create_mssql_pymssql(username, password, host, port, database),
**kwargs
) | a4d644839879ae374ba091f1b9e79fd210c03e3e | 3,656,870 |
def get_right_list_elements(result):
"""Some of the results are empty - therefore, the try-except.
Others are lists with more than one element and only specific
elements are relevant.
Args:
result (dict of lists): result of the xpath elements.
Returns:
dict of strs
"""
for key in ["title", "ort", "merkmale", "weitere_eigenschaften", "beschreibung"]:
try:
result[key] = result[key][0]
except:
pass
for key in ["preis", "anzahl_raeume", "wohnflaeche", "grundstuecksflaeche"]:
try:
result[key] = result[key][1]
except:
pass
return result | b81e80363f82dfe43878b3d8cb319f7129ebfc50 | 3,656,871 |
def gen_pixloc(frame_shape, xgap=0, ygap=0, ysize=1., gen=True):
"""
Generate an array of physical pixel coordinates
Parameters
----------
frame : ndarray
uniformly illuminated and normalized flat field frame
xgap : int (optional)
ygap : int (optional)
ysize : float (optional)
gen : bool, optional
Only allows True right now
Returns
-------
locations : ndarray
A 3D array containing the x center, y center, x width and y width of each pixel.
The returned array has a shape: frame.shape + (4,)
"""
#dnum = settings.get_dnum(det)
msgs.info("Deriving physical pixel locations on the detector")
locations = np.zeros((frame_shape[0],frame_shape[1],4))
if gen:
msgs.info("Pixel gap in the dispersion direction = {0:4.3f}".format(xgap))
msgs.info("Pixel size in the dispersion direction = {0:4.3f}".format(1.0))
xs = np.arange(frame_shape[0]*1.0)*xgap
xt = 0.5 + np.arange(frame_shape[0]*1.0) + xs
msgs.info("Pixel gap in the spatial direction = {0:4.3f}".format(ygap))
msgs.info("Pixel size in the spatial direction = {0:4.3f}".format(ysize))
ys = np.arange(frame_shape[1])*ygap*ysize
yt = ysize*(0.5 + np.arange(frame_shape[1]*1.0)) + ys
xloc, yloc = np.meshgrid(xt, yt)
# xwid, ywid = np.meshgrid(xs,ys)
msgs.info("Saving pixel locations")
locations[:,:,0] = xloc.T
locations[:,:,1] = yloc.T
locations[:,:,2] = 1.0
locations[:,:,3] = ysize
else:
msgs.error("Have not yet included an algorithm to automatically generate pixel locations")
return locations | e09bb42cc0b003f6cedf5eed79ee65293aab13e2 | 3,656,872 |
def select(df: pd.DataFrame, time_key,
from_time='00-00-00 00', to_time='99-01-01 00'):
"""
:param df:
:param time_key:
:param from_time:
:param to_time:
:return:
:rtype: pandas.DataFrame
"""
select_index = (df[time_key] >= from_time) & (df[time_key] < to_time)
return df.loc[select_index, :].reset_index(drop=True) | e925c2543bfabf9091fae18d9dc47c01364e1df8 | 3,656,873 |
def load_apogee_distances(dr=None, unit='distance', cuts=True, extinction=True, keepdims=False):
"""
Load apogee distances (absolute magnitude from stellar model)
:param dr: Apogee DR
:type dr: int
:param unit: which unit you want to get back
- "absmag" for absolute magnitude
- "fakemag" for fake magnitude
- "distance" for distance in parsec
:type unit: string
:param cuts: Whether to cut bad data (negative parallax and percentage error more than 20%), or a float to set the threshold
:type cuts: Union[boolean, float]
:param extinction: Whether to take extinction into account, only affect when unit is NOT 'distance'
:type extinction: bool
:param keepdims: Whether to preserve indices the same as APOGEE allstar DR14, no effect when cuts=False, set to -9999 for bad indices when cuts=True keepdims=True
:type keepdims: boolean
:return: numpy array of ra, dec, array, err_array
:rtype: ndarrays
:History:
| 2018-Jan-25 - Written - Henry Leung (University of Toronto)
| 2021-Jan-29 - Updated - Henry Leung (University of Toronto)
"""
fullfilename = apogee_distances(dr=dr)
with fits.open(fullfilename) as F:
hdulist = F[1].data
# Convert kpc to pc
distance = hdulist['BPG_dist50'] * 1000
dist_err = (hdulist['BPG_dist84'] - hdulist['BPG_dist16']) * 1000
allstarfullpath = allstar(dr=dr)
with fits.open(allstarfullpath) as F:
k_mag = F[1].data['K']
if extinction:
k_mag = extinction_correction(k_mag, F[1].data['AK_TARG'])
ra = F[1].data['RA']
dec = F[1].data['DEC']
# Bad index refers to nan index
bad_index = np.argwhere(np.isnan(distance))
if unit == 'distance':
# removed astropy units because of -9999. is dimensionless, will have issues
output = distance
output_err = dist_err
elif unit == 'absmag':
absmag, absmag_err = mag_to_absmag(k_mag, 1 / distance * u.arcsec, (1 / distance) * (dist_err / distance))
output = absmag
output_err = absmag_err
elif unit == 'fakemag':
# fakemag requires parallax (mas)
fakemag, fakemag_err = mag_to_fakemag(k_mag, 1000 / distance * u.mas, (1000 / distance) * (dist_err / distance))
output = fakemag
output_err = fakemag_err
else:
raise ValueError('Unknown unit')
# Set the nan index to -9999. as they are bad and unknown. Not magic_number as this is an APOGEE dataset
output[bad_index], output_err[bad_index] = -9999., -9999.
if cuts is False:
pass
else:
distance[bad_index], dist_err[bad_index] = -9999., -9999.
good_idx = ((dist_err / distance < (0.2 if cuts is True else cuts)) & (distance != -9999.))
if not keepdims:
ra = ra[good_idx]
dec = dec[good_idx]
output = output[good_idx]
output_err = output_err[good_idx]
else:
output[(dist_err / distance > (0.2 if cuts is True else cuts))] = -9999.
output_err[(dist_err / distance > (0.2 if cuts is True else cuts))] = -9999.
return ra, dec, output, output_err | 763d646c284cb056295ae57d7a7c9ced87964406 | 3,656,874 |
import json
import logging
def userstudy(config, data_train):
"""
Update the model based on feedback from user study.
- [config]: hyperparameters for model fine-tuning
- [data_train]: data pool to sample from
"""
def preprocess_data(doc, queries):
"""
Create a new field in [doc] called [antecedent_map] which processes
the user-labeled [antecedents]. Add all labeled spans to [queries].
in queries).
"""
ante_map = {}
for entry in doc['antecedents']:
span = tuple(entry[0])
if entry[1] == -1:
label = None
elif entry[1] == 0:
label = '0'
else:
label = [tuple(entry[1])]
ante_map[span] = label
doc['antecedent_map'] = ante_map
del doc['antecedents']
# update queries to know what has been queried
queries[doc['doc_key']] = list(ante_map.keys())
# return # spans labeled
return len(ante_map)
# preprocess antecedents and get queries
data_fp = config['userstudy'] / 'train_data.jsonl'
data = []
queries = defaultdict(list)
num_queries = 0
with open(data_fp, 'r') as f:
for line in f:
doc = json.loads(line)
# update doc and queries
n = preprocess_data(doc, queries)
num_queries += n
data.append(doc)
# finetune model on data
src_path = config['src_path']
logging.info(
f'Finetuning src model on {num_queries} queries from {len(data)} docs'
)
scores_dev, model = finetune_on_queries(config, data, config['userstudy'], src_path)
# test model
results_fp = config['userstudy'] / 'results_test.json'
scores_test = eval_scores(model, config, "test")
output_results(results_fp, config, 1, scores_test) | d3240142b55b202833364a9583b9c7c7e237bea2 | 3,656,875 |
import re
def clique_create(request):
"""
Creates a new grouping in the database (this integration must be stored in the db to be useful)
Arguments: /group-create "groupname" "@user1 @user2"
"""
requesting_user_id = request.POST.get('user_id')
args = re.findall(DOUBLE_QUOTE_ARG_REGEX, request.POST.get("text"))
# Check to see if everything looks right
if len(args) != 2:
return make_clique_group_error("Error in arguments (Double quotes are required!). Usage:\n"
"`/group-create \"groupName\" \"@user1 @user2\"")
if CliqueGroup.objects.filter(name=args[0]).count() > 0:
return make_clique_group_error("This group <{}> already exists!".format(args[0]))
# Move on to creating the group
raw_group_members = re.findall(SLACK_ID_REGEX, args[1])
group_users = []
for slack_id in raw_group_members:
try:
group_users.append(CliqueUser.objects.get(slack_id=slack_id))
except CliqueUser.DoesNotExist:
# This is the first time that we've seen this user
# we need to add them to the db
new_user = CliqueUser(slack_id=slack_id)
new_user.save()
group_users.append(new_user)
# Case where the owner is 1) new and 2) not in the group
try:
CliqueUser.objects.get(slack_id=requesting_user_id)
except CliqueUser.DoesNotExist:
# This is the first time that we've seen this user
# we need to add them to the db
CliqueUser(slack_id=requesting_user_id).save()
new_group = CliqueGroup(
creator=CliqueUser.objects.get(slack_id=requesting_user_id),
name=args[0]
)
new_group.save()
for clique_user in group_users:
new_group.members.add(clique_user)
new_group.save()
# Testing response string
resp_string = 'Group <{0}> has been created with users:'.format(args[0])
resp_string += ' '.join(format_user(user.slack_id) for user in new_group.members.all())
return JsonResponse({"replace_original": True, "text": resp_string}) | b990079ad0685b8c524dec65166da40f0e664ef7 | 3,656,876 |
import warnings
def cg_atoms(atoms, units, sites, scale, scaleValue, siteMap, keepSingleAtoms,
package):
"""
Get positions for atoms in the coarse-grained structure and the final
bond description. Returns a dictionary of the lattice, fractional
coordinates, and bonds. Also provides the option to scale the lattice.
Args
----
atoms: pymatgen.core.Structure
Pymatgen Structure object.
units: list
List of tuple(atomIndex, Image) for all atoms found in the building unit
so far in the algorithm.
sites: list
Specifying atoms in each site-type. One list per site-type. I.e. for
ZIF-8 (Zn(mIm)2) Zn is an A site, and the C, N, H (imidazolate ring)
are B sites, so you would pass:
scale: str
Scaling method to be used. Currently supported:
"min_xx": minimum bond length between any atoms.
"min_ab": minimum bond length between building units.
"avg_ab": average bond length between building units.
scaleValue: float
Length (Å) to scale the characteristic bond length (defined by
"scale") to.
siteMap: list
A list of atoms to map each building unit to. Should be of the same
length as the number of site-types. E.g. to map Zn(mIm)2 to a
coarse-grained structure,
siteMap = ["Si", "O"]
would map all A sites (Zn) to Si, and all B sites (mIm) to O. If
not set, will default to "Dummy Species" with labels DA, DB, DC, ...
Note if creating an ASE Atoms object, real atoms must be used, and
so siteMap *must* be set.
keepSingleAtoms: bool
If True, the chemical identity of the single atom building units
will be preserved. E.g. for BIF-1-Li ( [LiB(im)]4 ) where Li and B
are A sites, the final coarse-grained structure would keep the Li
and B atoms, but add dummy species for the imidazolate units.
package: str
"pymatgen" or "ase". If set, will return the Structure/Atoms object
of the specified package, respectively. As noted in siteMap, ASE
requires that real elements are set for the Atoms object.
"""
# Extract unit cell.
lattice = atoms.lattice.copy()
# Extract labels, positions, and images for each building unit.
l, p, _ = zip(*[(l,*u.frac_img) for l,u in units.items()])
# Extract bonds in format consistent with TopoCIF specification; i.e.
# node1_label, node2_label, distance, sym_op1, x1, y1, z1, sym_op2,
# x2, y2, z2, link_type, multiplicity. There will be a list of tuples,
# one tuple per unit, and the length of each tuple will be the number of
# bonds stored.
b = [u.unit_bonds for u in units.values()]
# Determine scaling type now, because can avoid calling next section
# twice to calculate the bond distances if it is "min_xx" scaling.
if scale is not None:
scale = scale.lower()
if scale == "min_xx":
# Get all distances (ignoring self-distances along diagonal).
d = lattice.get_all_distances(p,p)
np.fill_diagonal(d, 1000)
# Get scale factor and scale the lattice to the new volume.
sf = ( scaleValue / np.amin(d) )**3
lattice = lattice.scale(lattice.volume * sf)
elif scale in ["min_ab", "avg_ab"]:
# Get the bond distances from the formatted bonds.
_, d = format_bonds(lattice,l,p,b,return_lengths=True)
# Get scale factor and scale the lattice to new volume.
if scale == "min_ab":
sf = ( scaleValue / np.amin(d) )**3
elif scale == "avg_ab":
sf = ( scaleValue / np.mean(d) )**3
lattice = lattice.scale(lattice.volume * sf)
else:
warnings.warn(f"Scale method {scale} is not supported.")
# Get the final TopoCIF-formatted bonds.
b = format_bonds(lattice, l, p, b)
# The atomMap must provide a one-to-one mapping for every site-type
# in the structure.
assert len(siteMap) == len(sites), "Povide a one-to-one " + \
f"mapping of dummy-sites to atomic symbols " + \
f"({len(sites)} != {len(siteMap)})"
# Relabel each atom with a new symbol.
l, symbols, b = relabel(units, siteMap, keepSingleAtoms, b)
# Sort structure information into a dictionary.
s_info = { "lattice": lattice,
"symbols": symbols,
"labels": l,
"frac_coords": p,
"bonds": b }
# If package specified return either a Pymatgen Structure object, or an ASE
# atoms object.
s = py_structure(s_info["lattice"],s_info["symbols"],s_info["frac_coords"])
if package is not None and package.lower() == "ase":
s = AseAtomsAdaptor.get_atoms(s)
return s_info, s | 5cd2a6b8b0ce886b92912eb3dd8b14f0ea14f602 | 3,656,877 |
def read_tickers(video, ocr = None, debug = False, **kwargs):
"""
Reads news stories from sliding tickers on video.
Returns lists of dictionaries which contain:
text: news story text
start time: time when news story shows up
end time: time when news story disappears
Each list corresponds to one ticker.
"""
if debug:
print('Language: ', video.language)
if ocr is None:
ocr = TesseractOCR(**kwargs)
tickers, height, width = get_tickers_hw(video)
kwargs['height'] = height
kwargs['width'] = width
ocr._preprocesses['height'] = height
if debug:
print('tickers')
print(tickers)
stories = []
for ticker in tickers:
stories.append(read_ticker(video, ticker, ocr, **kwargs))
return stories | 9fc853d6af98c67434fcb0d18d09f42a15b7318c | 3,656,878 |
def compute_msa_weights(msa, threshold=.8):
"""
msa (Bio.Align.MultipleSeqAlignment): alignment for which sequence frequency based weights are to be computed
threshold (float): sequence identity threshold for reweighting
NOTE that columns where both sequences have a gap will not be taken into account when computing identity
"""
weights = np.zeros(len(msa))
seq_identities = np.zeros((len(msa), len(msa)))
for i in range(len(msa)):
for j in range(i+1, len(msa)):
seq_identities[i, j] = _compute_sequence_identity(msa[i], msa[j])
seq_identities = seq_identities + np.diag(np.ones(len(msa)))
ms = np.sum(seq_identities>threshold, 1)
weights = 1./ms
return weights | 13663501f57eef204533795c2271276a99b4a403 | 3,656,879 |
def search_storefront(client, phrase):
"""Execute storefront search on client matching phrase."""
resp = client.get(reverse("search:search"), {"q": phrase})
return [prod for prod, _ in resp.context["results"].object_list] | 0509c39cd9adb0b4c6d0849c8b068dcb3455b807 | 3,656,880 |
def is_repo_in_config(config, repo, rev, hook_id):
"""Get if a repository is defined in a pre-commit configuration.
Parameters
----------
config : dict
Pre-commit configuration dictionary.
repo : str
Repository to search.
rev : str
Repository tag revision.
hook_id : Hook identifier.
Returns
-------
dict : Information about if the repository and the hook have been found.
"""
response = {"repo_found": False, "hook_found": False, "same_rev": False}
for repo_ in config["repos"]:
if repo_["repo"] == repo:
response["repo_found"] = True
response["hook_found"] = hook_id in [hook["id"] for hook in repo_["hooks"]]
response["same_rev"] = repo_["rev"] == rev
break
return response | 855315c50f4bfe53a4f9b7a5d392bb539e364617 | 3,656,881 |
def mat33_to_quat(mat):
"""
Convert matrix to quaternion.
:param mat: 3x3 matrix
:return: list, quaternion [x, y, z, w]
"""
wxyz = transforms3d.quaternions.mat2quat(mat)
return [wxyz[1], wxyz[2], wxyz[3], wxyz[0]] | 1dcedf919674895a1ed647314c328525e4068dfe | 3,656,882 |
def reshape(x, new_shape):
"""
Reshapes a tensor without changing its data.
Args:
x (Tensor): A tensor to be reshaped.
new_shape (Union[int, list(int), tuple(int)]): The new shape should be
compatible with the original shape. If the tuple has only one element,
the result will be a 1-D tensor of that length. One shape dimension
can be :math:`-1`. In this case, the value is inferred from the length of
the tensor and remaining dimensions.
Returns:
Reshaped Tensor. Has the same data type as the original tensor `x`.
Raises:
TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor.
ValueError: If new_shape is not compatible with the original shape.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x = np.asarray([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
>>> output = np.reshape(x, (3, 2))
>>> print(output)
[[-0.1 0.3]
[ 3.6 0.4]
[ 0.5 -3.2]]
>>> output = np.reshape(x, (3, -1))
>>> print(output)
[[-0.1 0.3]
[ 3.6 0.4]
[ 0.5 -3.2]]
>>> output = np.reshape(x, (6, ))
>>> print(output)
[-0.1 0.3 3.6 0.4 0.5 -3.2]
"""
_check_input_tensor(x)
return x.reshape(new_shape) | 583ffc9e40c328586ec9d21b7a73cbc610eb5c29 | 3,656,883 |
def update_depth(depth_grid, elapsed_ts, depth_factor):
"""Just in time Update Depth for lake to pond
Parameters
----------
depth_grid: np.array like (float)
grid of current lake depths
elapsed_ts: float
number timesteps since start year
depth_factor: float
Returns
-------
np.array
updated depth grid
"""
new = np.zeros(depth_grid.shape)
for row in range(depth_grid.shape[0]):
for col in range(depth_grid.shape[0]):
new[row,col] = \
depth_grid[row,col] + (np.sqrt(elapsed_ts) / depth_factor)
return new | e3fe2498421697ce584b385544a7501f54c02b85 | 3,656,884 |
def get_submissions(config, event_name, state='new'):
"""
Retrieve a list of submissions and their associated files
depending on their current status
Parameters
----------
config : dict
configuration
event_name : str
name of the RAMP event
state : str, optional
state of the requested submissions (default is 'new')
Returns
-------
List of tuples (int, List[str]) :
(submission_id, [path to submission files on the db])
Raises
------
ValueError :
when mandatory connexion parameters are missing from config
UnknownStateError :
when the requested state does not exist in the database
"""
if state not in STATES:
raise UnknownStateError("Unrecognized state : '{}'".format(state))
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submissions = select_submissions_by_state(session, event_name, state)
if not submissions:
return []
subids = [submission.id for submission in submissions]
subfiles = [submission.files for submission in submissions]
filenames = [[f.path for f in files] for files in subfiles]
return list(zip(subids, filenames)) | e724c44b00db489f27c42acf7b21ba06a4ce0def | 3,656,885 |
def split_dataframe(df, size=10*1024*1024):
"""Splits huge dataframes(CSVs) into smaller segments of given size in bytes"""
# size of each row
row_size = df.memory_usage().sum() / len(df)
# maximum number of rows in each segment
row_limit = int(size // row_size)
# number of segments
seg_num = (len(df)+row_limit-1)//row_limit
# split df into segments
segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]
return segments | 46f34d388e6f596bfcf803b4569eb3015344bafb | 3,656,886 |
from pathlib import Path
from typing import Optional
def convert_table_codes(input_filename: Path, output_filename: Path = None, column: str = 'countryCode',
namespace: Optional[str] = None, fuzzy:int = 0) -> Path:
"""
Adds a 'regionCode' column to the given table containing iso-3 country codes.
Parameters
----------
input_filename: Path
output_filename: Path
column: str, default 'countryCode
namespace: {'iso2', 'iso3', 'm49'}; default None
fuzzy: int; default 0
The score to use when fuzzy matching when above 0. If 0, the regular code search is used instead.
Returns
-------
path: Path
Location of the output table.
"""
table = load_table(input_filename)
if column not in table.columns:
message = "'{}' is not a valid column. Expected one of {}".format(column, list(table.columns))
raise ValueError(message)
old_values = table[column].values
if fuzzy:
new_values = [fuzzy_search(i,fuzzy) for i in old_values]
else:
new_values = [get_codes(i, namespace) for i in old_values]
new_values = [(v['iso3'] if v else v) for v in new_values]
table['regionCode'] = new_values
if output_filename is None:
output_filename = input_filename.with_suffix('.edited.tsv')
elif output_filename.is_dir():
output_filename = output_filename / input_filename.name
opath = save_table(table, output_filename)
return opath | 1bba37fda512cf13e99a08a0ab7ebfdf10a4e330 | 3,656,887 |
def allow_view(user):
"""Is the current user allowed to view the user account?
Yes, if current user is admin, staff or self.
"""
if not flask.g.current_user: return False
if flask.g.am_admin: return True
if flask.g.am_staff: return True
if flask.g.current_user['username'] == user['username']: return True
return False | 334e56796235e5bfab6f2220443c80fc5ff68a51 | 3,656,888 |
from datetime import datetime
import time
def httptimestamp(inhttpdate):
"""
Return timestamp from RFC1123 (HTTP/1.1).
"""
dat = datetime.datetime(*eut.parsedate(inhttpdate)[:5])
return int(time.mktime(dat.timetuple())) | acfcdbea1a9d331b7623478841c6cd1d45fd45bf | 3,656,889 |
from datetime import datetime
def calculate_duration(start_date, end_date=None):
""" Calculate how many years and months have passed between start and end dates """
# If end date not defined, use current date
if not end_date:
end_date = datetime.date.today()
years = end_date.year - start_date.year
months = end_date.month - start_date.month
if months < 0:
years = years - 1
months = months + 12
return years, months | d41c52d4d0274ce3b829b33f07c9730a34ab4cbd | 3,656,890 |
import os
def GetSourceRoot(filename):
"""Try to determine the root of the package which contains |filename|.
The current heuristic attempts to determine the root of the Chromium source
tree by searching up the directory hierarchy until we find a directory
containing src/.gn.
"""
# If filename is not absolute, then we are going to assume that it is
# relative to the current directory.
if not os.path.isabs(filename):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
raise NoSourceRootError('File not found: {}'.format(filename))
source_root = os.path.dirname(filename)
while True:
gnfile = os.path.join(source_root, 'src', '.gn')
if os.path.exists(gnfile):
return source_root
new_package_root = os.path.dirname(source_root)
if new_package_root == source_root:
raise NoSourceRootError("Can't determine package root")
source_root = new_package_root | cb1956998cef01138a74e3278df7c27651260f73 | 3,656,891 |
import typing
def actives(apikey: str) -> typing.List[typing.Dict]:
"""
Query FMP /actives/ API
:param apikey: Your API key.
:return: A list of dictionaries.
"""
path = f"actives"
query_vars = {"apikey": apikey}
return __return_json_v3(path=path, query_vars=query_vars) | de4eecc6f3006158407efd51d67b6a5ac40d2cfd | 3,656,892 |
def print_unicodeinfo(val: str, key: str) -> str:
"""
Prints the occurrence, unicode character or guideline rules and additional information
:param args: arguments instance
:param val: count of the occurrences of key
:param key: key (glyph or guideline rules)
:return:
"""
return f"{val:-{6}} {'{'}{repr(key) if controlcharacter_check(key) else key}{'}'}{addinfo(key)}" | 194bb1d03613e9708f8deea8c233d02dacd3e3b6 | 3,656,893 |
def qx_to_npx(df):
""" Return df with qx converted to npx.
"""
df = 1 - df
out = df.cumprod().shift()
for i in df.index:
out.loc[i, i] = 1
return out | 683a26f57dfb7ae1762df84f74186f0b88cb4688 | 3,656,894 |
def homepage(selenium, config):
"""Get homepage with selenium."""
selenium.get(config.BASE_URL)
selenium.set_window_size(config.WINDOW_WIDTH, config.WINDOW_HEIGHT)
custom_click_cookie_rollbar(selenium, config.MAX_WAIT_TIME)
return selenium | 39217a38ac09d41093070ed06803e36485f04e2b | 3,656,895 |
import torch
def _if_scalar_type_as(g, self, tensor):
"""
Convert self into the same type of tensor, as necessary.
We only support implicit casting for scalars, so we never
actually need to insert an ONNX cast operator here; just
fix up the scalar.
"""
if isinstance(self, torch._C.Value):
return self
elif tensor.type().kind() == "TensorType" or tensor.type().kind() == "CompleteTensorType":
ty = tensor.type().scalarType().lower()
return getattr(self, ty)()
else:
return self | 8e53bf67c8bbc78f142ffcb8027c0876eed951fe | 3,656,896 |
def read_images_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images | 2aed7477e43bdcb73ad9eb866960b814278bbf0c | 3,656,897 |
def emailIsValid(email):
"""Return true if email is valid otherwise false"""
return EMAIL_RE.match(email) is not None | d9e28b68e31f1ab95c63aa80cd4a2a461cbac852 | 3,656,898 |
def calculate_line_number(text):
"""Calculate line numbers in the text"""
return len([line for line in text.split("\n") if line.strip() != ""]) | f35533945203ec2f47a89e7072ddd9b172f5554b | 3,656,899 |
Subsets and Splits