content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def links_at_node(shape):
"""Get link ids for each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
(N, 4) ndarray of int
Array of link ids.
Examples
--------
>>> from landlab.grid.structured_quad.links import links_at_node
>>> links_at_node((4, 3)) # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 3, 0, -1], [-1, 4, 1, -1],
[ 5, 7, -1, 2], [ 6, 8, 5, 3], [-1, 9, 6, 4],
[10, 12, -1, 7], [11, 13, 10, 8], [-1, 14, 11, 9],
[15, -1, -1, 12], [16, -1, 15, 13], [-1, -1, 16, 14]])
"""
(south_links, west_links) = _node_in_link_ids(shape)
(north_links, east_links) = _node_out_link_ids(shape)
return (
np.vstack(
(east_links.flat, north_links.flat, west_links.flat, south_links.flat)
)
.transpose()
.copy()
) | 0f354530d5c6b415c886df25e1b15ba2477de8c9 | 3,656,900 |
def manage_addFancyContent(self, id, REQUEST=None):
"""Add the fancy fancy content."""
id = self._setObject(id, FancyContent(id))
return '' | 47efd8df7d0ccc12894729d142a09a8a53562ff5 | 3,656,901 |
def convert_sentences(sentences, tokenizer):
"""
Truncate each sentence to 512 bpes in order to fit on BERT and convert it to bpes.
:param tokenizer: The BERT tokenizer we used in order convert each sentence to ids.
:param sentences: The tokenized sentences of the summary we are processing.
:return: The ids of the summary sentences.
"""
sentences_ids = []
for i, sent in enumerate(sentences):
if len(sent) > 512:
sentences[i] = sentences[i][:511].append('[SEP]')
sentences_ids.append(tokenizer.convert_tokens_to_ids(sentences[i]))
return sentences_ids | 48cde2cba0af288bff9f49cb2ffc66dd22cfd952 | 3,656,902 |
from typing import Union
from typing import Tuple
def imscale(image: Imagelike, scale: Union[float, Tuple[float, float]],
**kwargs) -> np.ndarray:
"""Scale the given image. The result will be a new image
scaled by the specified scale.
"""
global _resizer
if _resizer is None:
_resizer = ImageResizer()
return _resizer.scale(image, scale, **kwargs) | 1c0949b445620febe1482ea4d32ae2dd4ac44e04 | 3,656,903 |
from typing import Union
def parse_tooltip(spell: Union[ChampionSpell, SummonerSpell], tooltip: str) -> str:
"""
Improved tooltip parser based on the built-in Cassiopeia `Spell.__replace_variables`
"""
for dto in spell._data.values():
try:
costs_burn = dto.costBurn
effects_burn = dto.effectBurn
break
except AttributeError:
pass
else:
costs_burn = effects_burn = "?"
tooltip = tooltip.replace("{{ cost }}", costs_burn)
for x, effect in enumerate(effects_burn):
tooltip = tooltip.replace(f"{{{{ e{x} }}}}", effect)
try:
variables = spell.variables
except:
# Bug in SummonerSpell.variables throws exception
# TODO: submit patch
variables = []
for var in variables:
if var.link in SPELL_SCALINGS:
vals = '/'.join(f'{coeff * 100:g}' for coeff in var.coefficients)
replacement = f"{vals}% {SPELL_SCALINGS[var.link]}"
elif var.link == "@player.level":
replacement = f"{var.coefficients[0]:g}-{var.coefficients[-1]:g} (based on level)"
elif var.link == "@text":
replacement = '/'.join(f'{coeff:g}' for coeff in var.coefficients)
elif var.link == "@stacks":
replacement = f"{spell.name} stacks"
elif var.link == "@special.viw":
replacement = f"1% per {'/'.join(f'{coeff:g}' for coeff in var.coefficients)} **Bonus** AD"
elif var.link in {"@special.jaxrarmor", "@special.jaxrmr", "@special.BraumWArmor", "@special.BraumWMR"}:
# idk why the spell tooltips even have these variables. the actual numbers are static inside the text...
replacement = "bonus"
elif var.link == "@special.nautilusq":
replacement = ""
else:
replacement = f"{var.coefficients} {var.link}"
tooltip = tooltip.replace(f"{{{{ {var.key} }}}}", replacement)
return tooltip | ed729e7ce47b393d64c9f83d6bc8cb8337dee0f7 | 3,656,904 |
def _packages_info() -> dict:
"""Return a dict with installed packages version"""
return Dependencies.installed_packages() | a4095968c7553aad017e97ab88322c616586961f | 3,656,905 |
import warnings
import os
import gzip
import pickle
def _save_mnist_recreation_indices():
"""Code to find MNIST train, validation and test indices for recreation of
MNIST MAF dataset.
Note this should not be called directly. This is only here for reproducibility."""
warnings.warn('This function should generally not be called because it '
'requires special setup but is kept here in order to reproduce functions if '
'needed.')
# Import maf data
datasets_root = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..', '..', 'maf', 'data',
)
mnist_path = os.path.join(datasets_root, 'mnist', 'mnist.pkl.gz')
with gzip.open(mnist_path, 'rb') as f:
maf_train, maf_val, maf_test = pickle.load(f)
# Import raw mnist data
data_obj = fetch_mldata('MNIST original') # , data_home=custom_data_home)
# Prepare comparison matrices
X_all = data_obj.data / 256.0
y_all = data_obj.target
maf_data_tuple = (maf_train[0], maf_val[0], maf_test[0])
n_maf = [X.shape[0] for X in maf_data_tuple]
X_maf = np.vstack(maf_data_tuple)
y_maf = np.concatenate((maf_train[1], maf_val[1], maf_test[1]))
# Sort maf using all columns
mnist_ind = np.lexsort(np.hstack((X_all, y_all.reshape(-1, 1))).T)
maf_ind = np.lexsort(np.hstack((X_maf, y_maf.reshape(-1, 1))).T)
rev_maf_ind = np.argsort(maf_ind)
# Show that matrices match when sorted by indices
print('Checking if the datasets are the same (should all be 0)')
def n_diff(X, Y):
"""
Parameters
----------
X :
Y :
Returns
-------
"""
return np.count_nonzero(X - Y)
def print_n_diff(X, Y):
"""
Parameters
----------
X :
Y :
"""
print('Number different = %d' % n_diff(X, Y))
print_n_diff(X_all[mnist_ind], X_maf[maf_ind])
print_n_diff(y_all[mnist_ind], y_maf[maf_ind])
# Retrieve indices and show that they are the same
train_idx, val_idx, test_idx = (
mnist_ind[
rev_maf_ind[np.sum(n_maf[:i], dtype=np.int):np.sum(n_maf[:(i + 1)], dtype=np.int)]]
for i in range(3)
)
for idx, maf in zip((train_idx, val_idx, test_idx), (maf_train, maf_val, maf_test)):
print_n_diff(X_all[idx], maf[0])
print_n_diff(y_all[idx], maf[1])
gzip_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'maf_mnist_splits.txt.gz'
)
with gzip.open(gzip_file, 'w+') as f:
f.write('# Indices of MNIST dataset retrieved using '
'sklearn.datasets.fetch_mldata(\'MNIST original\') that correspond to the train, '
'validation and test sets of the MAF paper (one line each).\n')
for i, idx in enumerate([train_idx, val_idx, test_idx]):
s = str(idx.tolist())
s = s[1:-1] # Trim off ends
f.write(s)
if i < 2:
f.write('\n') | ff4956a22067fab3ef539c2460c6b7f779447714 | 3,656,906 |
def _first(root: TreeNode) -> TreeNode:
"""Return a first in "inorder" traversal order
of the `root` subtree
Args:
root (TreeNode): root of subtree
Returns:
TreeNode: first node in subtree
"""
if root.left is None:
return root
return _first(root.left) | 6464b7b920b32d3e3fd309eb1a7de26bd21a5710 | 3,656,907 |
def get_library_version() -> str:
"""
Returns the version of minecraft-launcher-lib
"""
return __version__ | aaa0703835cb00370bf30e96f2988f4c2e16bb51 | 3,656,908 |
import os
import tarfile
def download_voc_pascal(data_dir='../data'):
"""Download the Pascal VOC2012 Dataset."""
voc_dir = os.path.join(data_dir, 'VOCdevkit/VOC2012')
url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar"
sha1 = '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'
fname = gutils.download(url, data_dir, sha1_hash=sha1)
with tarfile.open(fname, 'r') as f:
f.extractall(data_dir)
return voc_dir | 04d14ac2d5260f2f05ac6223e6efcd2fa0940287 | 3,656,909 |
def load_image(image):
"""reshape and convert image to fit the model"""
img = cv2.imread(image) # Load images
img = cv2.resize(img, (257, 257), interpolation=cv2.INTER_LINEAR) # resize
img = (np.float32(img) - 127.5) / 127.5 # change image to float and normalize
img = img.reshape((1, 257, 257, 3)) # resize
return img | 642f1da152b7e852e46c57d4c2608e469ba7bddb | 3,656,910 |
def hist_trigger_time_diff(df_dev):
"""
plots
"""
df = devices_trigger_time_diff(df_dev.copy())
fig = go.Figure()
trace = go.Histogram(x=np.log(df['row_duration'].dt.total_seconds()/60),
nbinsx=200,
)
fig.add_trace(trace)
return fig | 43ae70a2ff9a6b7f9927d91c88c2d540f7b8ca24 | 3,656,911 |
def verify_spec(spec_utid, proxy_utid):
"""
For a specific unit test id (utid) compares the spec with the proxy
"""
results=''
for key in spec_utid:
results += '%s: spec=%s, proxy=%s (%s) *** ' % (key,spec_utid[key],proxy_utid[key],(spec_utid.get(key)==proxy_utid.get(key)))
return results | b9854e23f0d88ed4f9abcc0c16236a2d543b9eb0 | 3,656,912 |
def lammps_created_gsd(job):
"""Check if the mdtraj has converted the production to a gsd trajectory for the job."""
return job.isfile("trajectory-npt.gsd") | a66c899a20e9602098150f46067d5505572232c2 | 3,656,913 |
from datetime import datetime
def neo4j_data_age(data, max_data_age=None):
"""
Checks the noclook_last_seen property against datetime.datetime.now() and
if the difference is greater than max_data_age (hours)
(django_settings.NEO4J_MAX_DATA_AGE will be used if max_data_age is not specified)
and the noclook_auto_manage is true the data is said to be expired.
Returns noclook_last_seen as a datetime and a "expired" boolean.
"""
if not max_data_age:
max_data_age = django_settings.NEO4J_MAX_DATA_AGE
max_age = timedelta(hours=int(max_data_age))
now = datetime.now()
last_seen = isots_to_dt(data)
expired = False
if last_seen and (now-last_seen) > max_age and data.get('noclook_auto_manage', False):
expired = True
return last_seen, expired | 77f703f972b7b67ec5de48c9f8a0aceef3cd0646 | 3,656,914 |
import optparse
def ProfileOptions(parser):
"""Build option group for profiling chrome.
Args:
parser: OptionParser object for parsing the command-line.
Returns:
Option group that contains profiling chrome options.
"""
profile_options = optparse.OptionGroup(parser, 'Profile Chrome Options')
browsers = sorted(util.get_supported_browsers().keys())
profile_options.add_option('-b',
'--browser',
help='Select among installed browsers. '
'One of ' + ', '.join(browsers) +
'. "stable" is used by '
'default.',
type='choice',
choices=browsers,
default='stable')
profile_options.add_option('-t',
'--time',
help=('Stops tracing after N seconds. '
'Default is 5 seconds'),
default=5,
metavar='N',
type='int',
dest='trace_time')
profile_options.add_option('-e',
'--serial',
help='adb device serial number.',
type='string',
default=util.get_default_serial(),
dest='device_serial_number')
profile_options.add_option('-f',
'--trace_format',
help='Format of saved trace: proto, json, html.'
' Default is proto.',
default='proto',
dest='trace_format')
profile_options.add_option('-p',
'--platform',
help='Device platform. Only Android is supported.',
default='android',
dest='platform')
profile_options.add_option('--buf-size',
help='Use a trace buffer size '
' of N KB.',
type='int',
metavar='N',
dest='trace_buf_size')
profile_options.add_option(
'--enable_profiler',
help='Comma-separated string of '
'profiling options to use. Supports options for memory or '
'cpu or both. Ex: --enable_profiler=memory '
'or --enable_profiler=memory,cpu. ',
dest='enable_profiler')
profile_options.add_option('--chrome_categories',
help='Chrome tracing '
'categories to record.',
type='string',
default=_DEFAULT_CHROME_CATEGORIES)
profile_options.add_option(
'--skip_symbolize',
help='Skips symbolization after recording trace profile, if specified.',
action='store_true',
dest='skip_symbolize')
profile_options.add_option('--compress',
help='Compress the resulting trace '
'with gzip. ',
action='store_true')
# This is kept for backwards compatibility. Help is suppressed because this
# should be specified through the newer |trace_format| flag.
profile_options.add_option('--json',
help=optparse.SUPPRESS_HELP,
dest='write_json')
return profile_options | 57b41cf7a629b566aec995be2d6181357000fc1c | 3,656,915 |
def _clean_unicode(value):
"""Return the value as a unicode."""
if isinstance(value, str):
return value.decode('utf-8')
else:
return unicode(value) | be04bf30cecd7f25d0c39c05f6d5e6d995438c0b | 3,656,916 |
def deslugify_province(prov):
"""
Province slug to name, i.e. dashes to spaces and title case.
KZN is a special case.
"""
if prov == 'kwazulu-natal':
return 'KwaZulu-Natal'
return prov.replace('-', ' ').title() | 8e88ea7325c3b911495780b4437bc02784fbad82 | 3,656,917 |
def color_debug():
"""
Color for info
"""
return read_config_color("COLOR", "debug", "grey") | 87eb9e867c34149b605ade2152dec0f9bf74e6c4 | 3,656,918 |
def replace_sym(data: str) -> str:
"""
Converts currency strings such as ``£5.00`` to ``5.00 GBP`` - or ``10 kr`` to ``10 SEK``
"""
origdata = data
data = data.strip()
for s, r in settings.CUR_SYMBOLS.items():
if data.startswith(s) or data.endswith(s):
log.debug(f"Replacing symbol {s!r} with {r!r}")
return f"{data.replace(s, '').strip()} {r}".strip()
if data.upper().startswith(s) or data.upper().endswith(s):
log.debug(f"Replacing symbol {s!r} with {r!r} (uppercase)")
return f"{data.upper().replace(s, '').strip()} {r}".strip()
return origdata | 75cad28158839618195f7c5b7eefeece9a59e001 | 3,656,919 |
import re
def parse_vectors(vectors):
""" Basic cleanup of vector or vectors
Strip out V from V#s. Similar to parse tables, this by no means guarantees
a valid entry, just helps with some standard input formats
Parameters
----------
vectors : list of str or str
A string or list of strings of vector names to be parsed
Returns
-------
list of str
vectors with unnecessary characters removed
"""
def parse_vector(vector):
"""Strip string to numeric elements only"""
if isinstance(vector, int): # Already parsed earlier
return vector
return int(re.sub(r'\D', '', vector))
if isinstance(vectors, str):
return [parse_vector(vectors)]
return [parse_vector(v) for v in vectors] | d2161e45bae51db21d7668ea6008ddb9ada16c4e | 3,656,920 |
def sort_slopes(sds):
"""Sort slopes from bottom to top then right to left"""
sds = np.array(sds)
scores = sds[:, 0, 1] + sds[:, 1, 1] * 1e6
inds = np.argsort(scores)
return sds[inds] | 3bb62bf3be98176ae096bfe5f55b203173c3a425 | 3,656,921 |
import os
import json
def _get_mock_dataset(root_dir, base_dir_name):
"""
root_dir: directory to the mocked dataset
"""
base_dir = os.path.join(root_dir, base_dir_name)
os.makedirs(base_dir, exist_ok=True)
if base_dir_name == SQuAD1.__name__:
file_names = ("train-v1.1.json", "dev-v1.1.json")
else:
file_names = ("train-v2.0.json", "dev-v2.0.json")
mocked_data = defaultdict(list)
for file_name in file_names:
txt_file = os.path.join(base_dir, file_name)
with open(txt_file, "w", encoding="utf-8") as f:
mock_json_data = _get_mock_json_data()
f.write(json.dumps(mock_json_data))
split = "train" if "train" in file_name else "dev"
dataset_line = next(
iter(_ParseSQuADQAData([("file_handle", mock_json_data)]))
)
mocked_data[split].append(dataset_line)
return mocked_data | a34432f044c17d2ebb35445a1c1a081114a57058 | 3,656,922 |
def serialize_skycoord(o):
"""
Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification.
Args:
o (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
representation = o.representation.get_name()
frame = o.frame.name
r = o.represent_as('spherical')
d = dict(
_type='astropy.coordinates.SkyCoord',
frame=frame,
representation=representation,
lon=r.lon,
lat=r.lat)
if len(o.distance.unit.to_string()):
d['distance'] = r.distance
return d | 52830d9243cac36573c358f1579987eb43435892 | 3,656,923 |
def redis_sentinel(create_sentinel, sentinel, loop):
"""Returns Redis Sentinel client instance."""
redis_sentinel = loop.run_until_complete(
create_sentinel([sentinel.tcp_address], timeout=2, loop=loop))
assert loop.run_until_complete(redis_sentinel.ping()) == b'PONG'
return redis_sentinel | 3b779c9ef73e3bc5949afadbace34a9dcca1273a | 3,656,924 |
from typing import Tuple
from typing import Dict
def compute_features(
seq_path: str,
map_features_utils_instance: MapFeaturesUtils,
social_features_utils_instance: SocialFeaturesUtils,
) -> Tuple[np.ndarray, Dict[str, np.ndarray]]:
"""Compute social and map features for the sequence.
Args:
seq_path (str): file path for the sequence whose features are to be computed.
map_features_utils_instance: MapFeaturesUtils instance.
social_features_utils_instance: SocialFeaturesUtils instance.
Returns:
merged_features (numpy array): SEQ_LEN x NUM_FEATURES
map_feature_helpers (dict): Dictionary containing helpers for map features
"""
args = parse_arguments()
df = pd.read_csv(seq_path, dtype={"TIMESTAMP": str})
# Get social and map features for the agent
agent_track = df[df["OBJECT_TYPE"] == "AGENT"].values
# Social features are computed using only the observed trajectory
social_features = social_features_utils_instance.compute_social_features(
df, agent_track, args.obs_len, args.obs_len + args.pred_len,
RAW_DATA_FORMAT)
# agent_track will be used to compute n-t distances for future trajectory,
# using centerlines obtained from observed trajectory
map_features, map_feature_helpers = map_features_utils_instance.compute_map_features(
agent_track,
args.obs_len,
args.obs_len + args.pred_len,
RAW_DATA_FORMAT,
args.mode,
)
# Combine social and map features
# If track is of OBS_LEN (i.e., if it's in test mode), use agent_track of full SEQ_LEN,
# But keep (OBS_LEN+1) to (SEQ_LEN) indexes having None values
if agent_track.shape[0] == args.obs_len:
agent_track_seq = np.full(
(args.obs_len + args.pred_len, agent_track.shape[1]), None)
agent_track_seq[:args.obs_len] = agent_track
merged_features = np.concatenate(
(agent_track_seq, social_features, map_features), axis=1)
else:
merged_features = np.concatenate(
(agent_track, social_features, map_features), axis=1)
return merged_features, map_feature_helpers | bd8414b81bc3b1856773767d4f8db8897436ddf3 | 3,656,925 |
def summarizeTitlesByLength(titlesAlignments, limit=None):
"""
Sort match titles by sequence length.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param limit: An C{int} limit on the number of results to show.
@return: An C{IPython.display.HTML} instance with match titles sorted by
sequence length.
"""
return _sortHTML(titlesAlignments, 'length', limit) | 31f9a358032018b51910148dfaa82d4deb08191f | 3,656,926 |
def _diff_tail(msg):
"""`msg` is an arbitrary length difference "path", which could
be coming from any part of the mapping hierarchy and ending in any kind of
selector tree. The last item is always the change message: add, replace,
delete <blah>. The next to last should always be a selector key of some kind.
Back up from there to find the first mapping tuple.
"""
tail = []
for part in msg[::-1]:
if isinstance(part, tuple) and len(part) == 2 and isinstance(part[0], str) and part[0].endswith("map"):
tail.append(part[1])
break
else:
tail.append(part)
return tuple(reversed(tail)) | 224a4ca5f73b1f147c27599b62f0540480e40a0d | 3,656,927 |
def select_standard_name(session, cluster, importance_table_name):
"""
Use cluster members for a WHERE ... IN (...) query
Use SQLAlchemy to handle the escaping
"""
stmt = session.query('name from %s' % importance_table_name) \
.filter(column('name').in_(list(cluster))) \
.order_by('"count" DESC') \
.limit(1)
rv = session.execute(stmt)
res = list(rv)
return res[0][0] | 173113f8abf6b675fefe7279cfa1e28579747085 | 3,656,928 |
def calculate_depth(experiment):
""" Calculate the minor, major, total depth
Args:
experiment (remixt.Experiment): experiment object
Returns:
pandas.DataFrame: read depth table with columns, 'major', 'minor', 'total', 'length'
"""
data = remixt.analysis.experiment.create_segment_table(experiment)
data['segment_length'] = data['end'] - data['start'] + 1
data['length_ratio'] = data['length'] / data['segment_length']
data['allele_readcount'] = data['minor_readcount'] + data['major_readcount']
data['high_quality'] = (
(data['length'] > np.percentile(data['length'].values, 10)) &
(data['allele_readcount'] > np.percentile(data['allele_readcount'].values, 10)) &
(data['length_ratio'] > np.percentile(data['length_ratio'].values, 10)))
phi = remixt.likelihood.estimate_phi(experiment.x)
p = remixt.likelihood.proportion_measureable_matrix(phi)
# Filter segments for which read depth calculation will be nan/inf
data = data[(data['length'] > 0) & np.all(p > 0, axis=1)]
data.rename(columns={
'major_depth': 'major',
'minor_depth': 'minor',
'total_depth': 'total',
}, inplace=True)
data = data[[
'chromosome',
'start',
'end',
'length',
'major',
'minor',
'total',
'high_quality',
]]
return data | d4db665eff37f6590a2362af8896db25b8ae758b | 3,656,929 |
import random
def checkerboard(key, nsq, size, dtype=np.float32):
"""Create a checkerboard background image with random colors.
NOTE: only supports a single value for nsq (number squares).
Args:
key: JAX PRNGkey.
nsq (int): number of squares per side of the checkerboard.
size (int): size of one side of the checkerboard in pixels.
dtype: desired return data type.
Returns:
canvas (np.array): checkerboard background image.
"""
assert size % nsq == 0
sq = size // nsq
color1, color2 = random.uniform(key, (2, 3), dtype=dtype)
canvas = np.full((nsq, sq, nsq, sq, 3), color1, dtype=dtype)
canvas = canvas.at[::2, :, 1::2, :, :].set(color2)
canvas = canvas.at[1::2, :, ::2, :, :].set(color2)
return canvas.reshape(sq * nsq, sq * nsq, 3) | 4f6428450a05fcb92ba05e22e336d887860fb143 | 3,656,930 |
import torch
def choice(x, a):
"""Generate a random sample from an array of given size."""
if torch.is_tensor(x):
return x[torch.randint(len(x), (a,))]
return x | af21321bcd12fe5f1a5eb59b8f0db14096899b5d | 3,656,931 |
def correct_gene_names(df):
""" Fix datetime entries in Gene names
"""
update_symbols = []
for i, gs in enumerate(df.Gene_Symbol):
if (not (isinstance(gs, str))) or (':' in gs):
update_symbols.append(mapping.get_name_from_uniprot(df.Uniprot_Id.iloc[i]))
else:
update_symbols.append(gs)
df.Gene_Symbol = update_symbols
return df | 5ca1aa1da60f238f9c377640b9f1a350658ea9d0 | 3,656,932 |
def process_repl_args(args):
""" Process PANDA replay-related arguments.
"""
assert False, 'Not implemented yet.'
cmd = []
cmd.extend(['-display', 'none'])
return cmd
# p_test "${panda_rr}-rr-snp" f "trace memory snapshot"
# p_test "${panda_rr}-rr-nondet.log" f "trace nondet log"
# -pandalog ${opts[-plog]} -replay $panda_rr | 660495454f3b04f76d9aa0447262cb3a8c06b543 | 3,656,933 |
def choose(n, k):
"""
A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1): # changed from xrange
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0 | 22c8639b3e110673164faa1ea84d669d5f8816d4 | 3,656,934 |
import pickle
def _get_ReaLiSe_dataset(which="15"):
"""
For its
"""
print("Loading ReaLiSe Dataset !")
print("Hint: The Data You loading now is the preprocessed sighan from ReaLise, ")
ddp_exec("os.system('date')")
path = "../SE_tmp_back/milestone/ReaLiSe/data/"
train_dataset = pickle.load(open(path + "trainall.times2.pkl", "rb"))
eval_dataset = pickle.load(open(path + "test.sighan" + which + ".pkl", "rb"))
test_dataset = pickle.load(open(path + "test.sighan" + which + ".pkl", "rb"))
print("Hint: Using **SIGHAN" + which + "** for eval & test !")
def trans2mydataset(features):
new = []
for feature in features:
tmp = {}
tmp["input_ids"] = feature["src_idx"][:128]
tmp["labels"] = feature["tgt_idx"][:128]
tmp["attention_mask"] = ([1] * len(tmp["input_ids"]))[:128]#feature["lengths"])[:128]
new.append(tmp)
return mydataset(new)
print("Loaded successfully !")
ddp_exec("os.system('date')")
print("over")
return trans2mydataset(train_dataset), trans2mydataset(eval_dataset), trans2mydataset(test_dataset) | 418f443fa3e2094b5288bcaf3490780632b2922c | 3,656,935 |
def generate_check_phrase() -> bytes:
""" Generate check-phrase for connecting of auxiliary socket.
:return: some array of ATOM_LENGTH bytes.
"""
return get_random_bytes(ATOM_LENGTH) | 9bcd270bd1f9c3a7943d4910c065cc9fdee02141 | 3,656,936 |
import pickle
def load_pickle(filename: str):
"""
Load a file from disk.
Parameters
----------
filename: str
Name of the file that is loaded.
Returns
-------
"""
return pickle.load(open(filename, 'rb')) | cae6710ba18664f244c55525c14a6bda0bea314d | 3,656,937 |
import os
def find_pssm_missing_proteins(fasta_dict, pssm_dir):
"""find_pssm_missing_proteins function finds the missing pssm files of the proteins in fasta file.
Args:
fasta_dict (dict): This is a dict of fasta file. The keys of fasta_dict are protein ids and
values are protein sequences.
pssm_dir (str): It is full path to the directory that contains pssm files.
Returns:
list: The list of proteins that does not have pssm file in pssm_dir
"""
set_missing_prots = set()
set_prots_pssm_exists = set()
for file in os.listdir(pssm_dir):
protein_id = file.split(".")[0]
set_prots_pssm_exists.add(protein_id)
for protein_id in set_prots_pssm_exists:
file = protein_id + ".pssm"
flag = False
sequence = ""
with open(pssm_dir+"/"+file, "r") as fp:
for line in fp:
list_line = line.strip().split()
if len(list_line) > 0:
if list_line[0] == '1':
flag = True
if len(list_line) == 0:
flag = False
if flag:
sequence += list_line[1]
if protein_id in fasta_dict:
if sequence != fasta_dict[protein_id]:
set_missing_prots.add(protein_id)
set_missing_prots = set_missing_prots.union(set(fasta_dict.keys()) - set_prots_pssm_exists)
return list(set_missing_prots) | d3ab3011216329ba7dc9a6d7449d930ea3e536c7 | 3,656,938 |
import os
def _format_echo(text):
"""Compose system echo command outputs text"""
quote = '' if os.name == 'nt' else '"'
return 'echo {}{}{}'.format(quote, text, quote) | 65dde7b473b618a957c3eddd4bb205df5d9cb674 | 3,656,939 |
def crop_to_reference(dataset: xr.Dataset, ref_dataset: xr.Dataset) -> xr.Dataset:
""" Crops horizontal coordinates to match reference dataset """
if "longitude" not in dataset.coords.keys():
raise ValueError("Longitude is not a coordinate of dataset.")
if "longitude" not in ref_dataset.coords.keys():
raise ValueError("Longitude is not a coordinate of reference dataset.")
if "latitude" not in dataset.coords.keys():
raise ValueError("Latitude is not a coordinate of dataset.")
if "latitude" not in ref_dataset.coords.keys():
raise ValueError("Latitude is not a coordinate of reference dataset.")
dataset = dataset.where(dataset.latitude == ref_dataset.latitude, drop=True)\
.where(dataset.longitude == ref_dataset.longitude, drop=True)
return dataset | c915ec99dca5cd33531c049447e23e380590b1af | 3,656,940 |
def parse_line(description, inline_comments=_INLINE_COMMENT_PREFIXES):
"""
Parse a line and correctly add the description(s) to a collection
"""
# manually strip out the comments
# py2 cannot ignore comments on a continuation line
# https://stackoverflow.com/q/9110428/1177288
#
# PY3 can do it for you with 'inline_comment_prefixes' = '#;'
if PY2:
for comment_prefix in inline_comments:
pos = description.find(comment_prefix)
if pos != -1:
# comment line or inline comment (after a space)
if pos == 0 or description[pos - 1].isspace():
description = description[:pos]
if not description:
return None
# there can be trailing commas if you copy from source code
descriptions = description.strip(',').split(',')
# strip all the spaces and quotes
descriptions = [desc.strip().strip("'").strip('"').strip()
for desc in descriptions]
return descriptions | 6fc58aef5b103ce429ed82378bce81a4550abb0f | 3,656,941 |
def target_frame():
"""Input frame."""
return 'IAU_ENCELADUS' | 8c57ab924a7b4471ac2f549493ebc176e853c652 | 3,656,942 |
def cards(cs):
"""Parse cards"""
cs = cs.split(' ')
result = np.zeros([len(valueL), len(colorL)], int)
for c in cs:
result[np.where(valueL == c[0])[0][0], np.where(colorL == c[1])[0][0]] = 1
return result | 9db7aa3ae9b42fb7b3fcd67371bca02b455fd8e4 | 3,656,943 |
def _get_max_diag_idx(m, n_A, n_B, diags, start, percentage):
"""
Determine the diag index for when the desired percentage of distances is computed
Parameters
----------
m : int
Window size
n_A : int
The length of the time series or sequence for which to compute the matrix
profile `T_A`
n_B : int
The length of the time series or sequence that contain your query subsequences
of interest `T_B`
diags : ndarray
The diag of diagonals to process and compute
start : int
The (inclusive) diag index from which to start
percentage : float
Approximate percentage completed. The value is between 0.0 and 1.0.
Returns
-------
max_diag_id : int
The diag index that corresponds to desired percentage of distances to compute
n_dist_computed : int
The number of distances computed
"""
max_n_dist = 0
for diag_idx in range(diags.shape[0]):
k = diags[diag_idx]
if k >= 0:
max_n_dist += min(n_A - m + 1 - k, n_B - m + 1)
else:
max_n_dist += min(n_A - m + 1, n_B - m + 1 + k)
n_dist_computed = 0
for diag_idx in range(start, diags.shape[0]):
k = diags[diag_idx]
if k >= 0:
n_dist_computed += min(n_A - m + 1 - k, n_B - m + 1)
else:
n_dist_computed += min(n_A - m + 1, n_B - m + 1 + k)
if n_dist_computed / max_n_dist > percentage: # pragma: no cover
break
max_diag_idx = diag_idx + 1
return max_diag_idx, n_dist_computed | b6f86ee110ae4fa16638f86f2dcf324e7ebfb674 | 3,656,944 |
def get_argument_values(arg_defs, arg_asts, variables):
"""Prepares an object map of argument values given a list of argument
definitions and list of argument AST nodes."""
if arg_asts:
arg_ast_map = {arg.name.value: arg for arg in arg_asts}
else:
arg_ast_map = {}
result = {}
for arg_def in arg_defs:
name = arg_def.name
value_ast = arg_ast_map.get(name)
if value_ast:
value_ast = value_ast.value
value = value_from_ast(
value_ast,
arg_def.type,
variables
)
if value is None:
value = arg_def.default_value
if value is not None:
result[name] = value
return result | 0bad38e7155d04ac297e2112b8f9b70e5fcc18a0 | 3,656,945 |
def get_identifier(positioner_id, command_id, uid=0, response_code=0):
"""Returns a 29 bits identifier with the correct format.
The CAN identifier format for the positioners uses an extended frame with
29-bit encoding so that the 11 higher bits correspond to the positioner
ID, the 8 middle bits are the command number, the following 6 bits are the
unique identifier, and the 4 lower bits are the response code.
Parameters
----------
positioner_id : int
The Id of the positioner to command, or zero for broadcast.
command_id : int
The ID of the command to send.
uid : int
The unique identifier
response_code : int
The response code.
Returns
-------
identifier : `int`
The decimal integer corresponding to the 29-bit identifier.
Examples
--------
::
>>> get_identifier(5, 17, uid=5)
1328128
>>> bin(1328128)
'0b101000100010000000000'
"""
posid_bin = format(positioner_id, "011b")
cid_bin = format(command_id, "08b")
cuid_bin = format(uid, "06b")
response_bin = format(int(response_code), "04b")
identifier = posid_bin + cid_bin + cuid_bin + response_bin
assert len(identifier) == 29
return int(identifier, 2) | 57a1ce7004186e8c1c88c06665311e71010705c4 | 3,656,946 |
def standardized(array):
"""Normalize the values in an array.
Arguments:
array (np.ndarray): Array of values to normalize.
Returns:
array with zero mean and unit standard deviation.
"""
return (array - array.mean()) / max(1e-4, array.std()) | 1764dfd1e4e173d2ca081edeb8b7165a79d63b7d | 3,656,947 |
import json
def newaddress(fn,passphrase,addr_type=0):
"""
getnetaddress
"""
wallet = Wallet(fn).fromFile(passphrase)
# Address Types
# addr_type == 0, deposit
# addr_type == 1, change
# addr_type == 2, staking
# addr_type == 3, Dealer
# Address types aren't programmatically important, but help to organize
if addr_type is None:
addr_type = 0
k = wallet.create_address(save=True,addr_type=addr_type)
d = { "new_address" : (k.address_type(),k.address(),k.address(True)) }
return json.dumps(d, sort_keys=True, indent=4) | 8afca8b83ea8464d3aeb02f5d2e406d2f5bebc53 | 3,656,948 |
import logging
def index(args):
"""Handles the index step of the program."""
if not args.index: # build index
logging.info(" Building index...")
index_list = generate_index(args.input_dir)
if not index_list: # list is empty
logging.error(" Empty index. Exiting...")
return
logging.info(" Index built!")
if not args.no_index: # save index
np.save(args.dump_index, index_list)
logging.info(" Index saved as: {}".format(args.dump_index))
return index_list
else: # load index from file
index_list = load_index(args.index)
return index_list | 5e8e37d387612eb81984c7bff48e747780475f78 | 3,656,949 |
import cv2
import torch
def setup_harness(bsize=16, workers=0):
"""
CommandLine:
python ~/code/netharn/netharn/examples/yolo_voc.py setup_harness
Example:
>>> # DISABLE_DOCTSET
>>> harn = setup_harness()
>>> harn.initialize()
"""
xpu = nh.XPU.cast('argv')
nice = ub.argval('--nice', default='Yolo2Baseline')
batch_size = int(ub.argval('--batch_size', default=bsize))
bstep = int(ub.argval('--bstep', 4))
workers = int(ub.argval('--workers', default=workers))
decay = float(ub.argval('--decay', default=0.0005))
lr = float(ub.argval('--lr', default=0.001))
ovthresh = 0.5
# We will divide the learning rate by the simulated batch size
datasets = {
'train': YoloVOCDataset(years=[2007, 2012], split='trainval'),
'test': YoloVOCDataset(years=[2007], split='test'),
}
loaders = {
key: dset.make_loader(batch_size=batch_size, num_workers=workers,
shuffle=(key == 'train'), pin_memory=True)
for key, dset in datasets.items()
}
if workers > 0:
cv2.setNumThreads(0)
simulated_bsize = bstep * batch_size
hyper = nh.HyperParams(**{
'nice': nice,
'workdir': ub.truepath('~/work/voc_yolo2'),
'datasets': datasets,
# 'xpu': 'distributed(todo: fancy network stuff)',
# 'xpu': 'cpu',
# 'xpu': 'gpu:0,1,2,3',
'xpu': xpu,
# a single dict is applied to all datset loaders
'loaders': loaders,
'model': (light_yolo.Yolo, {
'num_classes': datasets['train'].num_classes,
'anchors': datasets['train'].anchors,
'conf_thresh': 0.001,
# 'nms_thresh': 0.5, # reproduce original yolo
'nms_thresh': 0.4, # reproduce lightnet
}),
'criterion': (light_region_loss.RegionLoss, {
'num_classes': datasets['train'].num_classes,
'anchors': datasets['train'].anchors,
'object_scale': 5.0,
'noobject_scale': 1.0,
'class_scale': 1.0,
'coord_scale': 1.0,
'thresh': 0.6, # iou_thresh
}),
'initializer': (nh.initializers.Pretrained, {
# 'fpath': light_yolo.demo_voc_weights(),
'fpath': light_yolo.initial_imagenet_weights(),
}),
'optimizer': (torch.optim.SGD, {
'lr': lr / 10,
'momentum': 0.9,
'dampening': 0,
# multiplying by batch size was one of those unpublished details
'weight_decay': decay * simulated_bsize,
}),
# Pascal 2007 + 2012 trainval has 16551 images
# Pascal 2007 test has 4952 images
# In the original YOLO, one batch is 64 images,
# so one epoch is 16551 / 64 = 259 iterations.
#
# From the original YOLO VOC v2 config
# https://github.com/pjreddie/darknet/blob/master/cfg/yolov2-voc.cfg
# learning_rate=0.001
# burn_in=1000
# max_batches = 80200
# policy=steps
# steps=40000,60000
# scales=.1,.1
#
# However, the LIGHTNET values are
# LR_STEPS = [250, 25000, 35000]
#
# Based in this, the iter to batch conversion is
#
# ((np.array([250, 25000, 35000, 1000, 40000, 60000, 80200]) / 256) + 1).astype(np.int)
# array([ 1, 98, 137, 4, 157, 235, 314])
'scheduler': (nh.schedulers.ListedLR, {
'points': {
# dividing by batch size was one of those unpublished details
# 0: lr * 0.1 / simulated_bsize, # burnin
# 4: lr * 1.0 / simulated_bsize,
# 157: lr * 0.1 / simulated_bsize,
# 235: lr * 0.001 / simulated_bsize,
0: lr * 0.1 / simulated_bsize,
1: lr * 1.0 / simulated_bsize,
60: lr * 0.1 / simulated_bsize,
90: lr * 0.001 / simulated_bsize,
},
'interpolate': False
}),
'monitor': (nh.Monitor, {
'minimize': ['loss'],
'maximize': ['mAP'],
'patience': 314,
'max_epoch': 314,
}),
'augment': datasets['train'].augmenter,
'dynamics': {
# Controls how many batches to process before taking a step in the
# gradient direction. Effectively simulates a batch_size that is
# `bstep` times bigger.
'batch_step': bstep,
},
'other': {
# Other params are not used internally, so you are free to set any
# extra params specific to your algorithm, and still have them
# logged in the hyperparam structure. For YOLO this is `ovthresh`.
'batch_size': batch_size,
'nice': nice,
'ovthresh': ovthresh, # used in mAP computation
'input_range': 'norm01',
},
})
harn = YoloHarn(hyper=hyper)
harn.config['use_tqdm'] = False
harn.intervals['log_iter_train'] = 1
harn.intervals['log_iter_test'] = None
harn.intervals['log_iter_vali'] = None
return harn | 7ea7841646e4be1f4d776e78fa4e8a7d5e1117c3 | 3,656,950 |
def _output_object_or_file_map_configurator(prerequisites, args):
"""Adds the output file map or single object file to the command line."""
return _output_or_file_map(
output_file_map = prerequisites.output_file_map,
outputs = prerequisites.object_files,
args = args,
) | 7d362be5d6478764810ae3a9013ce1cb807efde3 | 3,656,951 |
def get_file_name():
"""This function asl the user for file and returns it"""
f_name = input('Input your file name: ')
return f_name | 5d3e524ebe423410f721afb070bfba9d804ed19f | 3,656,952 |
import six
import subprocess
def GetMinikubeVersion():
"""Returns the current version of minikube."""
return six.ensure_text(subprocess.check_output([_FindMinikube(), 'version'])) | bee4129bb9d63aa1aad39451290df136977027be | 3,656,953 |
import itertools
def minimum_distance(geo1, geo2):
""" get the minimum distance between atoms in geo1 and those in geo2
"""
xyzs1 = coordinates(geo1)
xyzs2 = coordinates(geo2)
return min(cart.vec.distance(xyz1, xyz2)
for xyz1, xyz2 in itertools.product(xyzs1, xyzs2)) | ce6493d7e12bd3f48db209a01fe85eb4305835d0 | 3,656,954 |
def prepare():
"""
Get the list of filtered tweets by target entity where each item contains the tweet
with its original attributes when downloaded from Twitter
:return:
"""
path = '../../Data.json'
List = loadData(path) # load data
tweets = [List[i]['text'] for i in range(len(List))] # store the text of each tweet in a list
tweets = [process(item, False) for item in tweets] # get the list of processed tweets
filtered_tweets = tweetsEntitiesMapping(tweets) # filter tweets by target entity
ids_list = filtered_tweets[3] # get the list of ids of the filtered tweets in the original list
count = 0
list_tweets = [] # store the filtered tweet objects
for item in List:
if count in ids_list:
list_tweets.append(item)
count = count + 1
return list_tweets | 0707993267bd6e76d432b08e947582f8a151f591 | 3,656,955 |
from typing import Dict
from typing import List
import os
import sys
import re
def get_console_script_specs(console: Dict[str, str]) -> List[str]:
"""
Given the mapping from entrypoint name to callable, return the relevant
console script specs.
"""
# Don't mutate caller's version
console = console.copy()
scripts_to_generate = []
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop("pip", None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append("pip = " + pip_script)
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
scripts_to_generate.append(
"pip{} = {}".format(sys.version_info[0], pip_script)
)
scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}")
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r"pip(\d(\.\d)?)?$", k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop("easy_install", None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append("easy_install = " + easy_install_script)
scripts_to_generate.append(
"easy_install-{} = {}".format(
get_major_minor_version(), easy_install_script
)
)
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r"easy_install(-\d\.\d)?$", k)
]
for k in easy_install_ep:
del console[k]
# Generate the console entry points specified in the wheel
scripts_to_generate.extend(starmap("{} = {}".format, console.items()))
return scripts_to_generate | 80d4c71f87ff1af9762c2a8a0bdf99c1efb8a3d7 | 3,656,956 |
import requests
def deletecall(bam_url,api_call,call_parameters,delete_entity,header):
"""API request to delete and return values"""
call_url = "http://"+bam_url+"/Services/REST/v1/"+api_call+"?"
print("You are requesting to delete:")
print(delete_entity)
answer = input("Do you want to proceed (y (yes) or n (no))? ")
try:
if answer.lower() == "y":
response = requests.delete(call_url,params=call_parameters, headers=header)
return response.json()
elif answer.lower() == "n":
return "You aborted deletion"
else:
return "You entered an invalid character"
except requests.exceptions.RequestException as e:
print(e) | f6cffd225b9dd8d4d387b472d5ef522e2a48d738 | 3,656,957 |
def haDecFromAzAlt (azAlt, lat):
"""Converts alt/az position to ha/dec position.
Inputs:
- azAlt (az, alt) (deg)
- lat latitude (degrees);
>0 is north of the equator, <0 is south
Returns a tuple containing:
- haDec (HA, Dec) (deg), a tuple;
HA is in the range (-180, 180]
- atPole true => object near the pole (see Error Conditions)
Error Conditions:
- If converted position is too near the north or south pole,
atPole is set true and HA is some arbitrary value.
Details:
Sign conventions:
- azimuth is 0 south and 90 east
- ha/dec is the usual left-handed coordinate system
History:
3/01 ROwen Converted to Python from TCC's sph_AzAlt2HADec 1-2.
2/02 ROwen Minor tweaks to header.
2002-07-02 ROwen Renamed from azAltToHADec.
2003-05-06 ROwen Changed HA range from [0, 360) to (-180, 180]
"""
# convert spherical az/alt (deg) to direction cosines
azAltDC = dcFromSC (azAlt)
# convert az/alt direction cosines to -ha/dec direction cosines
negHADecDC = Cnv.haDecFromAzAlt (azAltDC, lat)
# convert -ha/dec direction cosines to spherical -ha/dec (deg)
((negHA, dec), atPole) = scFromDC (negHADecDC)
return ((opscore.RO.MathUtil.wrapCtr(-negHA), dec), atPole) | 9387d6771dd3fd4754a874141679902954adbecf | 3,656,958 |
def get_description(expression, options=None):
"""Generates a human readable string for the Cron Expression
Args:
expression: The cron expression string
options: Options to control the output description
Returns:
The cron expression description
"""
descripter = ExpressionDescriptor(expression, options)
return descripter.get_description(DescriptionTypeEnum.FULL) | b52bb4bda67074e5b9270f33f68892e371234dc4 | 3,656,959 |
import os
def check_for_firefox():
""" Determine if Firefox is available. """
if os.path.exists('/Applications/Firefox.app/Contents/MacOS/firefox'):
return True
for exe in ('firefox',):
if find_executable(exe):
return True
return False | cf193934b6adafd2bfba44f06848f9d9ca6bbda0 | 3,656,960 |
def midpoint(close, length=None, offset=None, **kwargs):
"""Indicator: Midpoint"""
# Validate arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 1
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
lowest = close.rolling(length, min_periods=min_periods).min()
highest = close.rolling(length, min_periods=min_periods).max()
midpoint = 0.5 * (lowest + highest)
# Offset
if offset != 0:
midpoint = midpoint.shift(offset)
# Handle fills
if 'fillna' in kwargs:
midpoint.fillna(kwargs['fillna'], inplace=True)
if 'fill_method' in kwargs:
midpoint.fillna(method=kwargs['fill_method'], inplace=True)
# Name and Categorize it
midpoint.name = f"MIDPOINT_{length}"
midpoint.category = 'overlap'
return midpoint | 3b14546715bec61dfd73a70d4a83042366c1ef08 | 3,656,961 |
from ..distributions.baseclass import Dist
import numpy
def quad_fejer(order, domain=(0, 1), growth=False, segments=1):
"""
Generate the quadrature abscissas and weights in Fejer quadrature.
Args:
order (int, numpy.ndarray):
Quadrature order.
domain (chaospy.distributions.baseclass.Dist, numpy.ndarray):
Either distribution or bounding of interval to integrate over.
growth (bool):
If True sets the growth rule for the quadrature rule to only
include orders that enhances nested samples.
segments (int):
Split intervals into N subintervals and create a patched
quadrature based on the segmented quadrature. Can not be lower than
`order`. If 0 is provided, default to square root of `order`.
Nested samples only exist when the number of segments are fixed.
Returns:
(numpy.ndarray, numpy.ndarray):
abscissas:
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
number of samples.
weights:
The quadrature weights with ``weights.shape == (N,)``.
Example:
>>> abscissas, weights = quad_fejer(3, (0, 1))
>>> abscissas.round(4)
array([[0.0955, 0.3455, 0.6545, 0.9045]])
>>> weights.round(4)
array([0.1804, 0.2996, 0.2996, 0.1804])
>>> abscissas, weights = quad_fejer(3, (0, 1), segments=2)
>>> abscissas.round(4)
array([[0.125, 0.375, 0.625, 0.875]])
>>> weights.round(4)
array([0.2222, 0.2222, 0.2222, 0.2222])
"""
if isinstance(domain, Dist):
abscissas, weights = quad_fejer(
order, (domain.lower, domain.upper), growth)
weights *= domain.pdf(abscissas).flatten()
weights /= numpy.sum(weights)
return abscissas, weights
order = numpy.asarray(order, dtype=int).flatten()
lower, upper = numpy.array(domain)
lower = numpy.asarray(lower).flatten()
upper = numpy.asarray(upper).flatten()
dim = max(lower.size, upper.size, order.size)
order = order*numpy.ones(dim, dtype=int)
lower = lower*numpy.ones(dim)
upper = upper*numpy.ones(dim)
segments = segments*numpy.ones(dim, dtype=int)
if growth:
order = numpy.where(order > 0, 2**(order+1)-2, 0)
abscissas, weights = zip(*[_fejer(order_, segment)
for order_, segment in zip(order, segments)])
return combine_quadrature(abscissas, weights, (lower, upper)) | ec2472e134a2adab5cfa42703fdaafde844aee79 | 3,656,962 |
from pathlib import Path
def probe(app: FastFlixApp, file: Path) -> Box:
""" Run FFprobe on a file """
command = [
f"{app.fastflix.config.ffprobe}",
"-v",
"quiet",
"-loglevel",
"panic",
"-print_format",
"json",
"-show_format",
"-show_streams",
f"{file}",
]
result = execute(command)
try:
return Box.from_json(result.stdout)
except BoxError:
logger.error(f"Could not read output: {result.stdout} - {result.stderr}")
raise FlixError(result.stderr) | 055ac6003642bc78d1fcabbbb89765d1cacb3d80 | 3,656,963 |
def is_standard_time_series(time_series, window=180):
"""
Check the length of time_series. If window = 180, then the length of time_series should be 903.
The mean value of last window should be larger than 0.
:param time_series: the time series to check, like [data_c, data_b, data_a]
:type time_series: pandas.Series
:param window: the length of window
:return: True or False
:return type: boolean
"""
if len(time_series) == 5 * window + 3 and np.mean(time_series[(4 * window + 2):]) > 0:
return True
else:
return False | 7fb3212c69efb076dbab9555cf1eab9698475f9b | 3,656,964 |
def get_comment_type(token, comment_syntax):
"""
SQLエンジン関連コメントTypeを返す
"""
if is_block_comment(token):
return comment_syntax.get_block_comment_type(token)
elif is_line_comment(token):
return comment_syntax.get_line_comment_type(token) | 0ddd68b4cd12909c5689f5620b785ccb8a45cbeb | 3,656,965 |
from typing import Any
from typing import Dict
def edit(project: Any, params: Dict[str, str]) -> Dict[str, str]:
"""
Add a new method to a Python class in its given module.
TODO: See why an <EOF> char is added along with the new method
"""
eng = project.context().pathExpressionEngine()
res = eng.evaluate(project, "/Directory()/File()[@name='"+params['mod_name']+"']/PythonFile()//classdef()[/NAME[@value='"+params['class_name']+"']]")
for match in res.matches():
match.append("\n def "+params['method_name']+"(self):\n print('hey')\n")
return {"status":"OK", "message": "Method added to class"} | 3d5b0778d8c74ced8fb360e47c40648c17629748 | 3,656,966 |
def get_country_code(country_name):
""" Return the Pygal 2-digit country code for the given country."""
for code, name in COUNTRIES.items():
if name == country_name:
return code
# If the country wasn't found, return None.
return None | 485684fe01ade5e2ad558523ca839a468c083686 | 3,656,967 |
def get_divmod(up, down, minute=False, limit=2):
"""
获取商
:param up: 被除数
:param down: 除数
:param minute: 换算成分钟单位
:param limit: 保留小数的位数
:return: 商
"""
if up == 0:
return 0
if down == 0:
return 0
if minute:
return round(up/down/60.0, limit)
return round(float(up)/down, limit) | 253304cde82fd4a3aa70737f4caabb20b5166349 | 3,656,968 |
def find_kernel_base():
"""Find the kernel base."""
return idaapi.get_fileregion_ea(0) | 20315c1fecc8d2a4ecf7301ccedeca84d4027285 | 3,656,969 |
def get_padding(x, padding_value=0, dtype=tf.float32):
"""Return float tensor representing the padding values in x.
Args:
x: int tensor with any shape
padding_value: int value that
dtype: type of the output
Returns:
float tensor with same shape as x containing values 0 or 1.
0 -> non-padding, 1 -> padding
"""
# print("get_padding", dtype)
with tf.name_scope("padding"):
return tf.cast(tf.equal(x, padding_value), dtype=dtype) | d11650796b980a53a5790588ac123c5323b867bd | 3,656,970 |
import typing
def canonical_symplectic_form_inverse (darboux_coordinates_shape:typing.Tuple[int,...], *, dtype:typing.Any) -> np.ndarray:
"""
Returns the inverse of canonical_symplectic_form(dtype=dtype). See documentation for that function for more.
In particular, the inverse of the canonical symplectic form is
[ 0 I ]
[ -I 0 ]
The inverse of the canonical symplectic form is a section of
TM \wedge TM
or can be thought of (as it is used here) as an alternating section of
TM \otimes TM
and therefore "naturally converts" a covector field on M (i.e. a section of T^{*}M) into a vector field on M
(i.e. a section of TM).
This form is what's used in the definition of the symplectic gradient of a function.
"""
validate_darboux_coordinates_shape_or_raise(darboux_coordinates_shape)
assert vorpy.tensor.dimension_of_shape(darboux_coordinates_shape) % 2 == 0
configuration_space_dimension = vorpy.tensor.dimension_of_shape(darboux_coordinates_shape) // 2
omega_inv = vorpy.tensor.contract(
'ik,jl',
canonical_symplectic_form_abstract_inverse(dtype=dtype),
np.eye(configuration_space_dimension, dtype=dtype),
dtype=dtype,
)
assert omega_inv.shape == (2,configuration_space_dimension,2,configuration_space_dimension)
return omega_inv.reshape(darboux_coordinates_shape+darboux_coordinates_shape) | 4ef3c820c7919fcd1bb7fda3fdf2482f3cd70c03 | 3,656,971 |
def update_with_error(a, b, path=None):
"""Merges `b` into `a` like dict.update; however, raises KeyError if values of a
key shared by `a` and `b` conflict.
Adapted from: https://stackoverflow.com/a/7205107
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
update_with_error(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif a[key] is None:
a[key] = b[key]
elif (isinstance(a[key], (list, tuple)) and
not isinstance(a[key], str) and
isinstance(b[key], (list, tuple)) and
not isinstance(b[key], str) and
len(a[key]) == len(b[key]) and
all((av is None or av == bv) for av, bv in zip(a[key], b[key]))): # yapf: disable
a[key] = b[key]
else:
raise KeyError('Conflict at {}: {} vs. {}'.format('.'.join(path + [str(key)]), a[key], b[key]))
else:
a[key] = b[key]
return a | 201650bba4fcae21d353f88ff22a9559aea61ff4 | 3,656,972 |
import re
def tokenize(sent):
"""Return the tokens of a sentence including punctuation.
>>> tokenize("Bob dropped the apple. Where is the apple?")
["Bob", "dropped", "the", "apple", ".", "Where", "is", "the", "apple", "?"]
"""
return [x.strip() for x in re.split(r"(\W+)?", sent) if x and x.strip()] | 09456d2ae7d590ba8d6373a27993a52c0693027b | 3,656,973 |
def tree_unflatten(flat, tree, copy_from_tree=None):
"""Unflatten a list into a tree given the tree shape as second argument.
Args:
flat: a flat list of elements to be assembled into a tree.
tree: a tree with the structure we want to have in the new tree.
copy_from_tree: optional list of elements that we just copy from tree.
This argument is used when the flat version does not contain all elements
of the expected tree but just a subset, while the rest are filled from
the tree itself. It allows to omit "unnecessary" elements. For example,
consider trees (A, (B, X), X) and (X, (A, X), B) where X is some element
we do not care about. Flattening the first tree and removing X will yield
a flat list [A, B] and the second tree can then be reconstructed from this
list and the tree (X, (E, X), E) with copy_from_tree=[X]. One example
where this is used is the weights-tree of a model, where layers with no
weights have () in the tree and we use copy_from_tree=[()] to restore
a model from a file that only has a list of trainable weights.
Returns:
A pair (new_tree, rest_of_flat) where the new tree that has the structure
of tree but with leaves from flat, and the remaining elements of flat if
more were provided than the number of leaves of tree (useful for recursion).
"""
if copy_from_tree is not None and tree in copy_from_tree:
return tree, flat
if isinstance(tree, (list, tuple)):
new_tree, rest = [], flat
for t in tree:
new_t, rest = tree_unflatten(rest, t, copy_from_tree=copy_from_tree)
new_tree.append(new_t)
new_tree = tuple(new_tree) if isinstance(tree, tuple) else new_tree
return new_tree, rest
if isinstance(tree, dict):
new_tree, rest = {}, flat
for k in tree:
new_v, rest = tree_unflatten(rest, tree[k], copy_from_tree=copy_from_tree)
new_tree[k] = new_v
return new_tree, rest
return flat[0], flat[1:] | 711bc67a20835091360d0fbc64e0a8842eec53ba | 3,656,974 |
def ByteOffsetToCodepointOffset( line_value, byte_offset ):
"""The API calls for byte offsets into the UTF-8 encoded version of the
buffer. However, ycmd internally uses unicode strings. This means that
when we need to walk 'characters' within the buffer, such as when checking
for semantic triggers and similar, we must use codepoint offets, rather than
byte offsets.
This method converts the |byte_offset|, which is a utf-8 byte offset, into
a codepoint offset in the unicode string |line_value|."""
byte_line_value = ToBytes( line_value )
return len( ToUnicode( byte_line_value[ : byte_offset - 1 ] ) ) + 1 | 0a826157c43cb73a5dff31c20c906144b4a0eaa6 | 3,656,975 |
def get_authed_tweepy(access_token, token_secret):
"""Returns an authed instance of the twitter api wrapper tweepy for a given user."""
social_app_twitter = get_object_or_404(SocialApp, provider='twitter')
auth = tweepy.OAuthHandler(social_app_twitter.client_id, social_app_twitter.secret)
auth.set_access_token(access_token, token_secret)
return tweepy.API(auth) | 33bbf0cabdf2bbd3fc543efc4d921119d29c7729 | 3,656,976 |
def suffix_for_status(status):
"""Return ``title`` suffix for given status"""
suffix = STATUS_SUFFIXES.get(status)
if not suffix:
return ''
return ' {}'.format(suffix) | a908d28c6e461dcc8277784e82e383642b5ecfa3 | 3,656,977 |
import json
def login():
"""
login an existing user
"""
try:
username = json.loads(request.data.decode())['username'].replace(" ", "")
password = json.loads(request.data.decode())['password'].replace(" ", "")
user = User(username, "", "")
user = user.exists()
if check_password_hash(user.password_hash, password):
"""token if password is correct"""
token = auth_encode(user.user_id)
if token:
response = {'response': 'login successful', 'token': token.decode()}
return jsonify(response), 200
else:
return jsonify({'response': 'invalid username/password'}), 422
except (KeyError, ValueError) as ex:
print('error in login', ex)
return jsonify({'response': 'json body must contain username and password'}), 400
except (psycopg2.DatabaseError, psycopg2.IntegrityError, Exception) as ex:
print('error in login', ex)
return jsonify({'response': 'user not found'}), 404 | 8e09725c37ac897efefd3cd546ce929cdf799716 | 3,656,978 |
def soma_radius(morph):
"""Get the radius of a morphology's soma."""
return morph.soma.radius | 2f9991a2f9240965bdb69a1a14814ed99bf60f86 | 3,656,979 |
async def async_get_authorization_server(hass: HomeAssistant) -> AuthorizationServer:
"""Return authorization server."""
return AuthorizationServer(
authorize_url=AUTHORIZATION_ENDPOINT,
token_url=TOKEN_ENDPOINT,
) | 99d7c0d25168d07d0d27ee95e6ee0b59cb6d48c0 | 3,656,980 |
from typing import Optional
def check_proposal_functions(
model: Model, state: Optional[flow.SamplingState] = None, observed: Optional[dict] = None,
) -> bool:
"""
Check for the non-default proposal generation functions
Parameters
----------
model : pymc4.Model
Model to sample posterior for
state : Optional[flow.SamplingState]
Current state
observed : Optional[Dict[str, Any]]
Observed values (optional)
"""
(_, state, _, _, continuous_distrs, discrete_distrs) = initialize_state(
model, observed=observed, state=state
)
init = state.all_unobserved_values
init_state = list(init.values())
init_keys = list(init.keys())
for i, state_part in enumerate(init_state):
untrs_var, unscoped_tr_var = scope_remove_transformed_part_if_required(
init_keys[i], state.transformed_values
)
# get the distribution for the random variable name
distr = continuous_distrs.get(untrs_var, None)
if distr is None:
distr = discrete_distrs[untrs_var]
func = distr._default_new_state_part
if callable(func):
return True
return False | 3d0d14f800f3d499de0c823dd2df8b852573c56f | 3,656,981 |
def smaller_n(n1, n2):
""" Compare two N_Numbers and returns smaller one. """
p1, s1 = n1
p2, s2 = n2
p1l = len(str(p1)) + s1
p2l = len(str(p2)) + s2
if p1l < p2l:
return n1
elif p1l > p2l:
return n2
p1 = p1.ljust(36, '9')
p2 = p2.ljust(36, '9')
if p1 <= p2:
return n1
else:
return n2 | 1f5922b74bdb8e5ee4dba7a85a9a70efdb024c59 | 3,656,982 |
def sortDict(dictionary: dict):
"""Lambdas made some cringe and stupid thing some times, so this dirty thing was developed"""
sortedDictionary = {}
keys = list(dictionary.keys())
keys.sort()
for key in keys:
sortedDictionary[key] = dictionary[key]
return sortedDictionary | ed61adf95f2b8c1c4414f97d84b8863596681478 | 3,656,983 |
def elina_linexpr0_alloc(lin_discr, size):
"""
Allocate a linear expressions with coefficients by default of type ElinaScalar and c_double.
If sparse representation, corresponding new dimensions are initialized with ELINA_DIM_MAX.
Parameters
----------
lin_discr : c_uint
Enum of type ElinaLinexprDiscr that defines the representation (sparse or dense).
size : c_size_t
Size of the internal array.
Returns
-------
linexpr : ElinaLinexpr0Ptr
Pointer to the newly allocated ElinaLinexpr0
"""
linexpr = None
try:
elina_linexpr0_alloc_c = elina_auxiliary_api.elina_linexpr0_alloc
elina_linexpr0_alloc_c.restype = ElinaLinexpr0Ptr
elina_linexpr0_alloc_c.argtypes = [c_uint, c_size_t]
linexpr = elina_linexpr0_alloc_c(lin_discr, size)
except:
print('Problem with loading/calling "elina_linexpr0_alloc" from "libelinaux.so"')
print('Make sure you are passing c_uint, c_size_t to the function')
return linexpr | 56bbaa01ba3b9bbe657240abdf8fb92daa527f29 | 3,656,984 |
def FrameTag_get_tag():
"""FrameTag_get_tag() -> std::string"""
return _RMF.FrameTag_get_tag() | 21392f22a0b67f86c5a3842ab6befc4b1e3938c6 | 3,656,985 |
def noise4(x: float, y: float, z: float, w: float) -> float:
"""
Generate 4D OpenSimplex noise from X,Y,Z,W coordinates.
"""
return _default.noise4(x, y, z, w) | 75b5911e9b8b4a08abba9540992e812d2a1dee83 | 3,656,986 |
def damerau_levenshtein_distance(word1: str, word2: str) -> int:
"""Calculates the distance between two words."""
inf = len(word1) + len(word2)
table = [[inf for _ in range(len(word1) + 2)] for _ in range(len(word2) + 2)]
for i in range(1, len(word1) + 2):
table[1][i] = i - 1
for i in range(1, len(word2) + 2):
table[i][1] = i - 1
da = {}
for col, c1 in enumerate(word1, 2):
last_row = 0
for row, c2 in enumerate(word2, 2):
last_col = da.get(c2, 0)
addition = table[row - 1][col] + 1
deletion = table[row][col - 1] + 1
substitution = table[row - 1][col - 1] + (0 if c1 == c2 else 1)
transposition = (
table[last_row - 1][last_col - 1]
+ (col - last_col - 1)
+ (row - last_row - 1)
+ 1
)
table[row][col] = min(addition, deletion, substitution, transposition)
if c1 == c2:
last_row = row
da[c1] = col
return table[len(word2) + 1][len(word1) + 1] | 7b75bb94fe66897c1807ac185d8602ea2b3ebd67 | 3,656,987 |
from typing import Any
def ga_validator(value: Any) -> str | int:
"""Validate that value is parsable as GroupAddress or InternalGroupAddress."""
if isinstance(value, (str, int)):
try:
parse_device_group_address(value)
return value
except CouldNotParseAddress:
pass
raise vol.Invalid(
f"value '{value}' is not a valid KNX group address '<main>/<middle>/<sub>', '<main>/<sub>' "
"or '<free>' (eg.'1/2/3', '9/234', '123'), nor xknx internal address 'i-<string>'."
) | 84845c9dbf5db041e243bf462dec4533ff7e0e3e | 3,656,988 |
import time
import re
from datetime import datetime
def getTime(sim):
"""
Get the network time
@param sim: the SIM serial handle
"""
sim.write(b'AT+CCLK?\n')
line = sim.readline()
res = None
while not line.endswith(b'OK\r\n'):
time.sleep(0.5)
matcher = re.match(br'^\+CCLK: "([^+]+)\+[0-9]+"\r\n', line)
if matcher:
ts = matcher.group(1).decode('ascii')
res = datetime.datetime.strptime(ts[:ts.find('+')], "%y/%m/%d,%H:%M:%S")
line = sim.readline()
return res | 77c889a41b214046a5965126927ca7e7ee043129 | 3,656,989 |
import makehuman
def defaultTargetLicense():
"""
Default license for targets, shared for all targets that do not specify
their own custom license, which is useful for saving storage space as this
license is globally referenced by and applies to the majority of targets.
"""
return makehuman.getAssetLicense( {"license": "AGPL3",
"author": "MakeHuman",
"copyright": "2016 Data Collection AB, Joel Palmius, Jonas Hauquier"} ) | a638129f1674b14fbf0d72e5323c1725f6fb5035 | 3,656,990 |
import json
def get_repo_info(main_path):
""" Get the info of repo.
Args:
main_path: the file store location.
Return:
A json object.
"""
with open(main_path + '/repo_info.json') as read_file:
repo_info = json.load(read_file)
return repo_info | f4a538819add0a102f6cbe50be70f2c9a0f969b6 | 3,656,991 |
import yaml
def parse_settings(settings_file: str) -> dict:
"""
The function parses settings file into dict
Parameters
----------
settings_file : str
File with the model settings, must be in yaml.
Returns
-------
ydict : dict
Parsed settings used for modeling.
"""
with open(settings_file, 'r') as fstream:
ydict = yaml.safe_load(fstream)
return ydict | 1aec2a8be51376209db81d60115814ddefca7ea6 | 3,656,992 |
def get_mac_address(path):
"""
input: path to the file with the location of the mac address
output: A string containing a mac address
Possible exceptions:
FileNotFoundError - when the file is not found
PermissionError - in the absence of access rights to the file
TypeError - If the function argument is not a string.
"""
if type(path) is not str:
raise TypeError("The path must be a string value")
try:
file = open(path)
except FileNotFoundError as e:
raise e
except PermissionError as e:
raise e
return file.readline().strip().upper() | 814a530b63896103adcb8fbc84d17939644b9bbe | 3,656,993 |
def jwt_get_username_from_payload_handler(payload):
"""
Override this function if username is formatted differently in payload
"""
return payload.get('name') | 92d60ce714632571346e93459729dcf1d764617b | 3,656,994 |
import shlex
def grr_uname(line):
"""Returns certain system infornamtion.
Args:
line: A string representing arguments passed to the magic command.
Returns:
String representing some system information.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_uname.parser.parse_args(shlex.split(line))
return magics_impl.grr_uname_impl(args.machine, args.kernel_release) | 5e671fcffe415397edc3b7c6011cc4e21b72cb5a | 3,656,995 |
import requests
import warnings
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
"""
深证证券交易所-总貌-证券类别统计
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 证券类别统计
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = ["证券类别", "数量", "成交金额", "总市值", "流通市值"]
temp_df["数量"] = pd.to_numeric(temp_df["数量"])
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"])
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["流通市值"] = pd.to_numeric(temp_df["流通市值"], errors="coerce")
return temp_df | 6544b0d78baa76858c13a001287b35d2a0faf7ba | 3,656,996 |
def find_all_movies_shows(pms): # pragma: no cover
""" Helper of get all the shows on a server.
Args:
func (callable): Run this function in a threadpool.
Returns: List
"""
all_shows = []
for section in pms.library.sections():
if section.TYPE in ('movie', 'show'):
all_shows += section.all()
return all_shows | ca4a8a5f4b2c1632ea6e427c748ef790c896b3ba | 3,656,997 |
def parse_vars(vars):
"""
Transform a list of NAME=value environment variables into a dict
"""
retval = {}
for var in vars:
key, value = var.split("=", 1)
retval[key] = value
return retval | e2c6ae05cdf0151caaf8589eb7d7df90dcdd99a1 | 3,656,998 |
from typing import List
import collections
def find_dup_items(values: List) -> List:
"""Find duplicate items in a list
Arguments:
values {List} -- A list of items
Returns:
List -- A list of duplicated items
"""
dup = [t for t, c in collections.Counter(values).items() if c > 1]
return dup | 3a84c2f3b723bed9b7a82dc5f0cfd81d99c2bf48 | 3,656,999 |
Subsets and Splits