content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def time_to_text(time):
"""Get a representative text of a time (in s)."""
if time < 0.001:
return str(round(time * 1000000)) + " µs"
elif time < 1:
return str(round(time * 1000)) + " ms"
elif time < 60:
return str(round(time, 1)) + " s"
else:
return str(round(time / 60, 1)) + " min" | f87934f66c82c834f18d94189c67c22f6b8ef45f | 85,051 |
def get_results_value(results):
""" Gets the value in the last column of the first row of the ga results
:param results: dict: results from the ga analytics query
:return: int: the value from the results
"""
if results.get('rows', []):
return results.get('rows')[0][-1]
return None | 94089925dafdb1e77b927950fb83e2b90c1349c8 | 382,098 |
def s3_download(s3_bucket, file_key, output_path):
"""Download a file from S3."""
print(f"downloading [{file_key}] from [{s3_bucket.name}] to [{output_path.resolve().as_posix()}]")
s3_bucket.download_file(file_key, output_path.resolve().as_posix())
return output_path | 8a966b6f0230ffe037d054d798e05ff462036dbd | 63,139 |
def centerOfGravity( array ):
"""
Vypočítá ze zadaých bodů jejich těžiště.
Parametry:
----------
array: list
Pole s body.
Vrací:
-----
list
Pole s X- a Y-ovou souřadnicí těžiště.
"""
sum_X = 0
sum_Y = 0
# Sečte X-ové a Y-ové souřadnice všech bodů
for element in array:
sum_X += element[0]
sum_Y += element[1]
# Vratí průměr.
return sum_X/len(array), sum_Y/len(array) | a5ef7714b0ea97258530ba3d2d6d0483ca82623a | 686,714 |
def rplog_convert(df, pwave_sonic, shear_sonic):
"""
Convert usec/ft (DT/DTS) to velocity (VP/VS)
Create Impedance logs from velocity and density logs (IP/IS)
Create VP/VS ratio log
"""
try:
df['VP'] = 304800 / df[pwave_sonic]
df["IP"] = df.VP * df.RHOB
df['VS'] = 304800 / df[shear_sonic]
df['VPVS'] = df.VP / df.VS
df['IS'] = df.VS * df.RHOB
except Exception as e:
print(f"Error when creating log: {e}")
return df | 4c2cc1aa79dfc19d9427b9bd35ad594e09b973ee | 112,229 |
def RgbFromHex(color_hex):
"""Returns a RGB color from a color hex.
Args:
color_hex: A string encoding a single color. Example: '8f7358'.
Returns:
A RGB color i.e. a 3-int tuple. Example: (143, 115, 88).
"""
return tuple(int(color_hex[i:i + 2], 16) for i in (0, 2, 4)) | b6c385a334b45aae145008a35f372ee1b565020f | 267,330 |
def ema(df, price, ema, n):
"""
Exponential Moving Average (EMA) is a Weighted Moving Average (WMA) that
gives more weighting to recent price data than Simple Moving Average (SMA)
does.
The EMA formula is based on the previous day EMA value. Since we have to
start our calculation somewhere, the initial value for our first EMA will
actually be an SMA.
Parameters:
df (pd.DataFrame): DataFrame which contain the asset price.
price (string): the column name of the price of the asset.
ema (string): the column name for the n-day exponential moving average results.
n (int): the total number of periods.
Returns:
df (pd.DataFrame): Dataframe with n-day exponential moving average of the asset calculated.
"""
df = df.copy().reset_index(drop=True)
k = 2.0 / (n + 1)
prev_ema = list(df[:n][price].rolling(window=n).mean())[-1]
df.loc[n - 1, ema] = prev_ema
df.loc[n:, ema] = 0.0
emas = [0.0 for i in range(n)]
for row in df.loc[n:, [price]].itertuples(index=False):
emas.append((k * row[0]) + ((1 - k) * prev_ema))
prev_ema = emas[-1]
df[ema] += emas
return df | f8ef73e3c4b20d65b860fbfdc40e4a954b8d6730 | 254,214 |
def get_sizes_purities(clusters):
"""Return two lists containing the sizes and purities of `clusters`."""
sizes = []
purities = []
for cluster in clusters:
sizes.append(cluster["size"])
purities.append(cluster["purity"])
return sizes, purities | 91075470d54ef285c49be470eee26b321266e66c | 603,595 |
def collect_driver_info(driver):
"""Build the dictionary that describes this driver."""
info = {'name': driver.class_name,
'version': driver.version,
'fqn': driver.class_fqn,
'description': driver.desc,
'ci_wiki_name': driver.ci_wiki_name}
return info | 41e94ac324d9bfc2248d90e670378cf5e39e3e1d | 74,325 |
import cmath as c
def amp_ph_to_comp(a,ph):
""" Takes the amplitude and phase of the waveform and
computes the compose them together"""
t =[]
for i in range(len(a)):
t.append(a[i]*c.exp(ph[i]*1j))
return t | cd1b6a3135af89acc1785b49ba8a8888b8034618 | 228,604 |
def area(gdf):
"""Returns area of GeoDataFrame geometries in square kilometers."""
return gdf.to_crs(epsg=3035).area.div(1e6) | dfcd35c363699fe27052e2a4c8b3c3d24580513d | 609,188 |
def pronoun_instance_dist(novel, words):
"""
Takes in a novel and list of gender pronouns, returns a list of distances between each
instance of a pronoun in that novel
>>> from gender_novels import novel
>>> summary = "James was his convicted of adultery. "
>>> summary += "which made him very sad, and then his Jane was also sad, and himself everybody was "
>>> summary += "sad and then he died and it was very sad. His Sadness."
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter',
... 'corpus_name': 'sample_novels', 'date': '1966',
... 'filename': None, 'text': summary}
>>> scarlett = novel.Novel(novel_metadata)
>>> pronoun_instance_dist(scarlett, ["his", "him", "he", "himself"])
[6, 5, 6, 6, 7]
:param:novel
:return: list of distances between instances of pronouns
"""
text = novel.get_tokenized_text()
output = []
count = 0
start = False
for e in text:
e = e.lower()
if not start:
if e in words:
start = True
else:
count += 1
if e in words:
output.append(count)
count = 0
return output | 6bd4cf1b3b968c395b230f1a23d4d07325029659 | 569,678 |
def bvw(sw, phi):
"""
Calculates Bulk Volume Water
Parameters
----------
sw : float
Water saturation (dec)
phi : float
Porosity (dec)
Returns
-------
float
Bulk volume water (dec)
"""
return sw * phi | 27b6e843e8a348488f7150080683d18421a062bf | 167,006 |
def get_default(arr, idx, default_value):
"""get arr[idx] or return default_value
"""
try:
return arr[idx]
except IndexError:
return default_value | 038b943da7fa1d36038444880264160da8e031f4 | 701,544 |
def _read_file(path):
"""Reads the whole file and returns its content.
Args:
path: The file path.
Returns:
The content of the file.
"""
f = open(path, 'r')
return f.read() | e765cd3d369c92190dd4bb78694b1daf79c74289 | 249,145 |
def jump_to_match(input_file, regex):
"""Jump to regex match in file.
@param input_file: File object
@param regex: Compiled regex object
@return: True if successful, False otherwise
"""
for line in input_file:
if regex.match(line):
return True
return False | 9ecae9a2218d3f334e2183cc1979d839113c995b | 635,465 |
def find_biomass_reaction(
model, biomass_string=["Biomass", "BIOMASS", "biomass"]
):
"""
Identifies the biomass reaction(s) in a metabolic model.
Parameters
----------
model : cobra.Model
Metabolic model.
biomass_string : str or list
String denoting at least a part of the name of the
biomass function of the metabolic model or a list
containing multiple possibilities to be tested.
Preset to `["Biomass", "BIOMASS", "biomass"]`.
Returns
-------
biomass_reaction_ids : list
Reaction(s) containing the input string.
"""
if isinstance(biomass_string, list):
biomass = biomass_string
else:
biomass = list(biomass_string)
biomass_reaction_ids = []
for reaction in model.reactions:
for biomass_spelling in biomass:
if biomass_spelling in reaction.id:
biomass_reaction_ids.append(reaction.id)
return biomass_reaction_ids | bbc9232462fb3850be1208e324ab147aa14be086 | 101,557 |
def isNumber(n):
"""
checks if n is an integer
:param n: value to be check
:return: true if n is a number, false in other case
"""
try:
int(n)
return True
except ValueError:
return False | c6aab9655eca55d20c002e2139b8a33a3a2eef57 | 194,691 |
from typing import Optional
def get_number_str(x: Optional[int]) -> str:
"""Get string from number or empty string if number is `None`."""
if x is None:
return ''
return str(x) | f0e7e05ab3db5f190853ac6cdb731ba6bbf64065 | 145,919 |
import six
def is_string(value):
"""Return a boolean value indicating whether the value is a string or not.
This method is compatible with both Python 2.7 and Python 3.x.
NOTE:
1. We can't use isinstance(string_value, str) because strings in Python 2.7 can have "unicode" type.
2. We can't use isinstance(string_value, basestring) because "basestring" type is not available in Python 3.x.
:param value: Value
:type value: Any
:return: Boolean value indicating whether the value is a string or not
:rtype: bool
"""
return isinstance(value, six.string_types) | 809e4d43390219f08cf81e9389b38e045aae0e0c | 642,132 |
def create_attn_masks(input_ids):
"""
Create attention masks to tell model whether attention should be applied to
the input id tokens. Do not want to perform attention on padding tokens.
"""
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
return attention_masks | 6b55578b9c79410e81a6d2bf7e341bdfd51f2edf | 386,613 |
def lower(review):
"""Lowercase a review
Arguments:
- review: the review of the SemEvalReview class
Returns:
The review with lowercased text
"""
review.text = review.text.lower()
return review | 0f76d959b8258022244ee27c129a7762a9d72af3 | 136,022 |
def get_allowed_tokens(config):
"""Return a list of allowed auth tokens from the application config"""
return [token for token in (config.get('AUTH_TOKENS') or '').split(':') if token] | e046e369ef949501deaf91f6629a768af9416c75 | 83,336 |
def _get_reference_bodyreference(referencebody_element):
"""Parse ReferenceInput BodyReference element
"""
return referencebody_element.attrib.get(
'{http://www.w3.org/1999/xlink}href', '') | c0baeec99e3d9d4a54f17a721d00202e214defcc | 60,038 |
def get_parent_id(org_client, account_id):
"""
Query deployed AWS organanization for 'account_id. Return the 'Id' of
the parent OrganizationalUnit or 'None'.
"""
parents = org_client.list_parents(ChildId=account_id)['Parents']
try:
len(parents) == 1
return parents[0]['Id']
except:
raise RuntimeError("API Error: account '%s' has more than one parent: "
% (account_id, parents)) | 69b42af5989f4672fdbce2c15da5b7290a1465c1 | 573,482 |
def compact(objects):
"""
Filter out any falsey objects in a sequence.
"""
return tuple(filter(bool, objects or [])) | 60af7d26e786113d9af7a29f1e6a46ee2191453d | 653,954 |
def velocity_exceeded(Rover, max_vel=2.0):
"""
Check if velocity is under max_vel.
Keyword arguments:
max_vel -- maximum velocity in meters/second
"""
return Rover.vel < max_vel | 944a8c1c19c3c080f8fc79364b40555a8ee792e1 | 604,202 |
from typing import OrderedDict
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
return OrderedDict([(bl.name, bl) for bl in self._blobs]) | 6c68e91a10fb9eda6c0be9ef03ffdd08823131d8 | 690,620 |
def is_test(t):
"""Returns True if the target is comprised of tests."""
return t.has_label('tests') | 55215c93a773c0c28e6455c39aa60ba133742960 | 487,642 |
from typing import Any
def get_client_region(client: Any) -> str:
"""Get the region from a boto3 client.
Args:
client: The client to get the region from.
Returns:
AWS region string.
"""
return client._client_config.region_name | 1b6d0810d7376bd5ef8ea59d7ae700c39a89e610 | 643,799 |
def parse_scaling(scaling_args):
"""Translate a list of scaling requests to a dict prefix:count."""
scaling_args = scaling_args or []
result = {}
for item in scaling_args:
key, values = item.split('=')
values = values.split(',')
value = int(values[0])
blacklist = frozenset(int(v) for v in values[1:] if v)
result[key + '0'] = value, blacklist
return result | 61d25373093ace840bca120e01a71b6d4309c7f2 | 198,745 |
def local_patch(x, bbox):
"""Crop local patch according to bbox.
Args:
x: input
bbox: (top, left, height, width)
"""
return x[:, :, bbox[0]:bbox[0] + bbox[2], bbox[1]:bbox[1] + bbox[3]] | 017ae96fd8d05e789fd048842da0d16fcbd5634d | 569,593 |
def PrepareSets(args, tokenizer, train_set, dev_set, test_set, first_label=False):
"""
Function that prepares the datasets for usage.
Inputs:
args - Namespace object from the argument parser
tokenizer - BERT tokenizer instance
train_set - Unprepared training set
dev_set - Unprepared development set
test_set - Unprepared test set
first_label - Indicates whether to only use the first label. Default is False
Outputs:
train_set - Prepared training set
dev_set - Prepared development set
test_set - Prepared test set
"""
# filter out al instances where the emotion is neutral
train_set = train_set.filter(lambda example: not 27 in example['labels'])
dev_set = dev_set.filter(lambda example: not 27 in example['labels'])
test_set = test_set.filter(lambda example: not 27 in example['labels'])
# remove unnecessary columns
train_set = train_set.remove_columns(['text', 'id'])
dev_set = dev_set.remove_columns(['text', 'id'])
test_set = test_set.remove_columns(['text', 'id'])
# function that creates new instances for all labels
def handle_multiple_labels(batch):
new_batch = {'attention_mask': [],
'input_ids': [],
'labels': [],
'token_type_ids': [],
}
for instance_idx, instance in enumerate(batch['labels']):
for label in instance:
new_batch['attention_mask'].append(batch['attention_mask'][instance_idx])
new_batch['input_ids'].append(batch['input_ids'][instance_idx])
new_batch['labels'].append(label)
new_batch['token_type_ids'].append(batch['token_type_ids'][instance_idx])
return new_batch
# function that takes the first label
def handle_first_label(batch):
batch['labels'] = batch['labels'][0]
return batch
# check which label function to use
if first_label:
label_fn = handle_first_label
batched = False
else:
label_fn = handle_multiple_labels
batched = True
# filter the labels
train_set = train_set.map(label_fn, batched=batched)
dev_set = dev_set.map(label_fn, batched=batched)
test_set = test_set.map(label_fn, batched=batched)
# return the prepared datasets
return train_set, dev_set, test_set | fea720f386c4a7819317bd53f35b4268aee43d6e | 343,304 |
def file_to_dict_array(input_file, separator="\t"):
"""
Turns a column based file into an array of dicts, where the keys are the column names
So result[3]["first"] gets the value from the column with the name "first" and the fourth row
"""
result = []
with open(input_file, 'rU') as i:
header = None
for line in i:
splt = line.rstrip().split(separator)
if not header:
header = splt
continue
result.append({header[i]: splt[i] for i in range(len(splt))})
return result | 70998bbc31274c15d83d47aeb0df09c5cb590ed3 | 272,239 |
def _mock_check_state(_):
"""Mocked check_state method."""
return True | 1db16fd64d93147d1b1eb849e6afb4463b0f6ff3 | 661,191 |
def has22(list_one:list)->bool:
"""Returns True if the list contains a 2 next to a
2. Otherwise, the function returns False .
>>>has22([1,2,2,3])
True
>>>has22([1,2,3,4])
False
"""
if ', 2, 2' in str(list_one):
return True
elif list_one[0] == 2 and list_one[1] == 2:
return True
else:
return False | be5554cc17b760977505120fb965aa7f9014aa6b | 621,252 |
import torch
def make_ent2idx(entities, max_ent_id):
"""Given a tensor with entity IDs, return a tensor indexed with
an entity ID, containing the position of the entity.
Empty positions are filled with -1.
Example:
> make_ent2idx(torch.tensor([4, 5, 0]))
tensor([ 2, -1, -1, -1, 0, 1])
"""
idx = torch.arange(entities.shape[0])
ent2idx = torch.empty(max_ent_id + 1, dtype=torch.long).fill_(-1)
ent2idx.scatter_(0, entities, idx)
return ent2idx | 627b6366cff49ab22d6e9bab76fcb030e233a92f | 306,618 |
def get_channel_youtube_url(channel_code):
""" Get the Youtube Channel URL given its code
Example:
UC6gsueJf0YTIF3inlGKWLPg
must return:
https://www.youtube.com/channel/UC6gsueJf0YTIF3inlGKWLPg
"""
return "https://www.youtube.com/channel/{:s}".format(channel_code) | 3b94e53de783204e718fc4a938c4ec34224d357e | 599,534 |
def first_index_not_below(arr, t):
"""Return first index of array >= t, or len(arr) if no such found"""
for i, x in enumerate(arr):
if x >= t:
return i
return len(arr) | dc41ea6c2dfa0a4243c180d04553bc1d8d3a83d8 | 185,171 |
from pathlib import Path
def _gen_outdir(outdir):
"""
Coerces `outdir` to `pathlib.Path` and creates it, if it doesn't exist
Parameters
----------
outdir : str
Path to desired output directory
Returns
-------
outdir : pathlib.Path
Path to desired output directory
"""
outdir = Path(outdir).expanduser().resolve()
outdir.mkdir(exist_ok=True)
return outdir | d250e68a347efbb57dd8b69b73680477c81ede54 | 550,783 |
def TOTAL_DEGREE_FREEDOM(N_DOFSNODE, N_NODES):
"""
This function determines the quantity and ID values of the structure's global degrees of freedom.
Input:
N_DOFSNODE | Number of degress of freedom per node | Integer
N_NODES | Number of nodes | Integer
Output:
DOF_GLOBAL | ID global degree of freedom | Py list[N_DOFSGLOBAL]
N_DOFSGLOBAL | Total of degrees of freedom | Integer
"""
DOF_GLOBAL = []
N_DOFSGLOBAL = N_NODES * N_DOFSNODE
for I_COUNT in range (N_DOFSGLOBAL):
DOF_GLOBAL.append(I_COUNT)
return DOF_GLOBAL, N_DOFSGLOBAL | b6b374c0d9a18d4efb7077f6758cd78236bbfeeb | 277,114 |
def _join_words(words, delimiter=",", conjunction="and"):
"""Join words together for nice printout.
>>> _join_words(["first", "second", "third"])
'first, second, and third'
>>> _join_words(["first", "second"])
'first and second'
>>> _join_words(["first"])
'first'
"""
if len(words) == 1:
return words[0]
elif len(words) == 2:
return ' {0} '.format(conjunction).join(words)
else:
return '{0} '.format(delimiter).join(words[0:-1]) + \
"%s %s %s" % (delimiter, conjunction, words[-1]) | 4ad92cb027b3edf1442c122719d74d0b74d67cfe | 310,483 |
def fix_name(inp, names_used):
"""Append a number if an autoSql field name is duplicated.
"""
name = inp
i = 2
while name in names_used:
name = "%s%s" % (inp, i)
i += 1
names_used.append(name)
return name | 4021512b7ff815769bdf6b30b9852c4dfd5a17ab | 602,843 |
from typing import Tuple
def default_hyperparameter_denormalizer(n_factors_norm: float,
reg_weight_norm: float
) -> Tuple[int, float]:
"""Takes hyperparameter values normalized from default interval to (0; 1] interval,
and returns actual values for a model.
Implements :py:obj:`HyperparameterDenormalizer` type.
:param n_factors_norm: Number of factors [1; 100] -> [0.01; 1]
:param reg_weight_norm: Regularization constant [0.001; 0.01] -> [0.01; 1]
:return: Denormalized values of number of factors and regularization constant
"""
return round(n_factors_norm * 100), reg_weight_norm / 10 | e5788cc3e1c8033ea6acccd3f17d273d14f244b0 | 466,506 |
from typing import List
from typing import Union
from pathlib import Path
def cli_args(tmpdir) -> List[Union[Path, str]]:
"""
Fixture simulating a set of CLI arguments.
Returns:
List of args.
"""
in_folder = Path("requirements.in")
assert in_folder.exists()
out_folder = Path(tmpdir).joinpath("fake_requirements")
tld = Path(tmpdir)
ignore = "linting"
return [in_folder, out_folder, tld, ignore] | bbfdc585289dbbdb5cd532b2791bf695cb2f5a91 | 75,892 |
import torch
def test_model(model, dset_loader):
"""
Tests a model on a given data set and returns the accuracy of the model
on the set.
"""
model.train(False)
running_corrects = 0
for inputs, labels in dset_loader:
# wrap them in Variable
# inputs, labels = Variable(inputs.cuda()), \
# Variable(labels.cuda())
# forward
outputs = model.forward_prediction(inputs)
_, preds = torch.max(outputs.data, 1)
running_corrects += torch.sum(preds == labels.data)
return running_corrects/(len(dset_loader) * dset_loader.batch_size) | c5b4a5673e2424bb1274b3988e064cde35385f58 | 164,056 |
def get_hpsearch_call(cmd_args, num_seeds, grid_config, hpsearch_dir=None):
"""Generate the command line for the hpsearch.
Args:
cmd_args: The command line arguments.
num_seeds (int): Number of searches.
grid_config (str): Location of search grid.
hpsearch_dir (str, optional): Where the hpsearch should write its
results to.
Returns:
(str): The command line to be executed.
"""
cluster_cmd_prefix = ''
cluster_cmd_suffix = ''
non_cluster_cmd_suffix = ''
if cmd_args.run_cluster and cmd_args.scheduler == 'lsf':
cluster_cmd_prefix = 'bsub -n 1 -W %s:00 ' % cmd_args.hps_num_hours + \
'-e random_seeds.err -o random_seeds.out -R "%s" ' % \
cmd_args.hps_resources.strip('"')
cluster_cmd_suffix = ' --run_cluster ' + \
'--scheduler=%s ' % cmd_args.scheduler +\
'--num_jobs=%s ' % cmd_args.num_jobs +\
'--num_hours=%s ' % cmd_args.num_hours + \
'--resources="\\"%s\\"" ' % cmd_args.resources.strip('"') + \
'--num_searches=%d ' % num_seeds
elif cmd_args.run_cluster:
assert cmd_args.scheduler == 'slurm'
cluster_cmd_suffix = ' --run_cluster ' + \
'--scheduler=%s ' % cmd_args.scheduler + \
'--num_jobs=%s ' % cmd_args.num_jobs + \
'--num_hours=%s ' % cmd_args.num_hours + \
'--slurm_mem=%s ' % cmd_args.slurm_mem + \
'--slurm_gres=%s ' % cmd_args.slurm_gres + \
'--slurm_partition=%s ' % cmd_args.slurm_partition + \
'--slurm_qos=%s ' % cmd_args.slurm_qos + \
'--slurm_constraint=%s ' % cmd_args.slurm_constraint + \
'--num_searches=%d ' % num_seeds
else:
non_cluster_cmd_suffix = \
'--visible_gpus=%s ' % cmd_args.visible_gpus + \
'--allowed_load=%f ' % cmd_args.allowed_load + \
'--allowed_memory=%f ' % cmd_args.allowed_memory + \
'--sim_startup_time=%d ' % cmd_args.sim_startup_time + \
'--max_num_jobs_per_gpu=%d ' % cmd_args.max_num_jobs_per_gpu
#cmd_str = 'TMP_CUR_DIR="$(pwd -P)" && pushd ../../hpsearch && ' + \
cmd_str = cluster_cmd_prefix + \
'python3 hpsearch.py --grid_module=%s '% cmd_args.grid_module + \
'--grid_config=%s ' % grid_config + \
'--run_cwd=%s ' % cmd_args.run_cwd #'--run_cwd=$TMP_CUR_DIR '
if cmd_args.deterministic_search:
cmd_str += '--deterministic_search '
if cmd_args.dont_generate_full_grid:
cmd_str += '--dont_generate_full_grid '
if hpsearch_dir is not None:
cmd_str += '--out_dir=%s --force_out_dir '% hpsearch_dir + \
'--dont_force_new_dir '
cmd_str += cluster_cmd_suffix + non_cluster_cmd_suffix #+ ' && popd'
return cmd_str | 97525fc5e3e22c5367176996f5b76a9e5482c2fd | 433,357 |
def sign_split(M):
"""Given a matrix M, return two matrices. The first contains the
positive entries of M; the second contains the negative entries of M,
multiplied by -1.
"""
M_plus = M*(M>0).astype(int)
M_minus = -M*(M<0).astype(int)
return M_plus, M_minus | af573a2c68a6156cd92b5f3076c0fa6fb3c3c95e | 633,946 |
import csv
def read_known_craft(csv_file: str) -> list:
"""Reads the FILTER_CSV file into a `list`"""
all_rows = []
with open(csv_file) as csv_fd:
reader = csv.DictReader(csv_fd)
for row in reader:
all_rows.append(row)
return all_rows | 6afa0566f7a3e00b55e4e588dff52b3be5e196fb | 475,250 |
import re
def is_generate_var(var):
"""
Determine whether one variable is generated var
Args:
var: the variable which model used
Returns:
True if variable is generated var
"""
if re.match("_generated_var_[0-9]+", var) or \
re.match("sequence_pool_[0-9]+.tmp_[0-9]+", var) or \
re.match("cast_[0-9]+.tmp_[0-9]", var) or \
re.match("learning_rate_[0-9]+", var) or \
re.match("embedding_[0-9]+.tmp_[0-9]+", var):
return True
else:
return False | 844c25a89b850a667df230856a9fb402a824fb28 | 455,289 |
from typing import Iterable
def comma_join(items: Iterable[str]) -> str:
"""
Joins an iterable of strings with commas.
"""
return ", ".join(items) | 4ba674593dc0a8afcf766186b496a625ab70d6d0 | 542,987 |
def _unchanged(v1, v2):
"""Check if anything except 'LastSeen' is different between two alerts."""
return ({k: v for k, v in v1.items() if k != 'LastSeen'} ==
{k: v for k, v in v2.items() if k != 'LastSeen'}) | 3fdc0c04532331e6db1e638256ab9fcbc11e45e1 | 366,530 |
def get_topic_name(prefix, table, operation):
"""Create a topic name.
The topic name needs to be synced between the agents.
The agent will send a fanout message to all of the listening agents
so that the agents in turn can perform their updates accordingly.
:param prefix: Common prefix for the agent message queues.
:param table: The table in question (TUNNEL, LOOKUP).
:param operation: The operation that invokes notification (UPDATE)
:returns: The topic name.
"""
return '%s-%s-%s' % (prefix, table, operation) | 277a2b3d193f39630be0fb8722521aa0d3e6c9da | 570,532 |
def stage_title(stage):
"""Helper function for setting the title bar of a stage"""
stage_txt = ("Name", "Vocation", "Character Design and Details",
"Stats and Skills", "All Done: Thank You!")
stage_cmd = ("{w@add/name <character name>{n", "{w@add/vocation <character's vocation>{n",
"{w@add/<field name> <value>", "{w@add/<stat or skill> <stat or skill name>=<+ or -><new value>{n",
"{w@add/submit <application notes>")
msg = "{wStep %s: %s" % (stage, stage_txt[stage - 1])
msg += "\n%s" % (stage_cmd[stage - 1])
return msg | badd7ad6a59c52a0bbc4f186aca7fd4c653a0910 | 679,143 |
def obfuscate_email_address(address):
"""Replace anything looking like an e-mail address (``'@something'``)
with a trailing ellipsis (``'@…'``)
"""
if address:
at = address.find('@')
if at != -1:
return address[:at] + u'@\u2026' + \
('>' if address[-1] == '>' else '')
return address | 4ee71b37a05112d8f68187db57850889cb3191f9 | 535,086 |
def convertToCPM(dose, period):
"""
Converts from milliSieverts/hr to CPM
Parameters:
dose (double): The dosage
period (double): The time period over which the dosage
is administered
Returns the measurement in CPM
"""
conversionFactor = 350000 / 1.0
return conversionFactor * dose / period | 1ec1f1b49c2596496b0b3dc6fe857fdea1e776c0 | 624,694 |
def process_benchmark_df(df_input):
"""Take in the featurized the benchmark dataframe and clean it up"""
# select the relevant columns
df_output = df_input[["formula", "avg_mx_dists", "avg_mm_dists", "iv", "iv_p1",
"v_m", "v_x", "est_hubbard_u", "est_charge_trans"]]
# rename the column names to match those found in torrance_tabulated.xlsx
df_output = df_output.rename(columns={"avg_mx_dists": "d_mo", "avg_mm_dists": "d_mm", "v_x": "v_o",
"est_hubbard_u": "hubbard", "est_charge_trans": "charge_transfer"})
# drop rows containing NA values, sort by the formula and reindex the dataframe
return df_output.dropna().sort_values("formula").reset_index(drop=True) | 1ef6e95adbcfd4185d1597f29e3e218ee8a8eae3 | 128,324 |
def occurrences(string, sub):
""" string count with overlapping occurrences """
count = start = 0
while True:
start = string.find(sub, start) + 1
if start > 0:
count+=1
else:
return count | f586df3f5ffbc3f039a72caf20f6c2c54952f224 | 165,727 |
def y(x: float, slope: float, initial_offset: float = 0) -> float:
"""Same function as above, but this time with type annotations!"""
return slope * x + initial_offset | 223f981813e7b7a24d40132aefbc7a546594ccf5 | 513,318 |
import collections
def merge(queries, qp, qc, qrels):
""" Merge queries, qrels, <query, passage> pairs, <query, chunk> pairs into a single dict. """
data = collections.OrderedDict()
for qid in qc:
passage_ids = list()
labels = list()
for passage_id in qp[qid]:
doc_id = passage_id.split("_")[0]
label = 0
if doc_id in qrels[qid]: # leave unjudged documents as non-relevant
label = 1
passage_ids.append(passage_id)
labels.append(label)
assert len(passage_ids) == len(labels)
chunk_id_list = list(qc[qid].keys())
data[qid] = (queries[qid], chunk_id_list, passage_ids, labels)
return data | 24665c962d588c24ab94d48593c52d129a7bab80 | 118,881 |
def filter_short_trajectories(df, threshold):
"""
Filter trajectories that are shorter in timesteps than the threshold
:param df: pandas df with columns=['x', 'y', 'frame', 'trackId', 'sceneId', 'metaId']
:param threshold: int - number of timesteps as threshold, only trajectories over threshold are kept
:return: pd.df with trajectory length over threshold
"""
len_per_id = df.groupby(by='metaId', as_index=False).count() # sequence-length for each unique pedestrian
idx_over_thres = len_per_id[len_per_id['frame'] >= threshold] # rows which are above threshold
idx_over_thres = idx_over_thres['metaId'].unique() # only get metaIdx with sequence-length longer than threshold
df = df[df['metaId'].isin(idx_over_thres)] # filter df to only contain long trajectories
return df | d660eb522d85c1cc3eb3723d03faef0dc604df82 | 212,568 |
import json
def list_of_dicts_to_jsonl(list_input):
"""
takes a list of dict objects and turns it into a jsonl doc
"""
jsonl_contents = ""
for each_entry in list_input:
if len(jsonl_contents) == 0:
jsonl_contents = json.dumps(each_entry)
else:
jsonl_contents = jsonl_contents + "\n" + json.dumps(each_entry)
return jsonl_contents | 4da5583adec02c58e753475fba9e61dd773afcde | 243,160 |
def NPVCost(d):
"""
Parameters
----------
d : dict
Output of PED/statusQ functions with all saved variables and results
of optimization.
Returns
-------
dic : dict
dictonary with all relevant cost component of optimized solution.
"""
dic = {}
dic["fix"] = d["cost_OMfix"]
dic["var"] = d["cost_OMvar"]
#dic["fuel"] = d["cost_fuel"]
#dic["CO2"] = d["cost_CO2"]
#dic["Ext"] = d["cost_Ext"]
dic["rev"] = d["rev"]
dicc = {}
for key in dic:
dicc[key + "_share"] = sum((dic[key][y])*(1/(1.05)**(y+1)) for y in range(len(dic[key])))
dic = {**dic, **dicc}
dic["NPV"] = d["obj"]
dic["I"] = d["cost_I"]
return dic | f436752277196d3f2e42f10c35b220b71ca4440c | 396,637 |
import pkg_resources
def list_main(argv_unused): # pylint: disable=unused-argument
"""
list
List the FILENAMEs that edx_lint can provide.
"""
print("edx_lint knows about these files:")
for filename in pkg_resources.resource_listdir("edx_lint", "files"):
print(filename)
return 0 | 4f0ac77a03d882676c5b35e1726840d0aaeaf789 | 510,949 |
def df_rename_col(data, col, rename_to, destructive=False):
"""Rename a single column
data : pandas DataFrame
Pandas dataframe with the column to be renamed.
col : str
Column to be renamed
rename_to : str
New name for the column to be renamed
destructive : bool
If set to True, will make changes directly to the dataframe which
may be useful with very large dataframes instead of making a copy.
"""
if destructive is False:
data = data.copy(deep=True)
cols = list(data.columns)
loc = cols.index(col)
cols.insert(loc, rename_to)
cols.remove(col)
data.columns = cols
return data | da424f4c7202aa397153a3293f8f24a672c783d9 | 656,263 |
def ReadBBoxPredictFile(file_path):
"""
Args:
file path : str
File format:
image_name:<image_name.jpg>
(percentage) (abs)
<class_name>,<confidence>,<x1>,<y1>,<x2>,<y2>
...
end
example:
image_name:a.jpg
full,98%,19,30,37,50
...
end
Returns:
imgs_bbox : dict
{img_name1: [bbox1, bbox2, ...],
img_name2: [bbox1, bbox2, ...],
...
}
"""
f = open(file_path, 'r')
imgs_bbox = {}
img_bbox = []
imgs_name = []
for l in f:
if 'image_name:' in l or 'end' in l:
if len(img_bbox) != 0:
img_bbox.sort(key = lambda x: x['conf'], reverse=True)
imgs_bbox[l] = img_bbox.copy()
img_bbox = []
# record image name
img_name = l.split(':')[-1]
imgs_name.append(img_name)
else:
# Read bboxes!
l = l.split(',')
bbox = dict()
bbox['label'] = l[0]
bbox['conf'] = float(l[1].split('%')[0])
bbox['x1'] = int(l[2])
bbox['y1'] = int(l[3])
bbox['x2'] = int(l[4])
bbox['y2'] = int(l[5])
img_bbox.append(bbox)
return imgs_bbox | 086865ca68bd3387f090ffd5cdedac659cd10bbf | 37,333 |
def check_bit_set(value: int, bit: int):
"""
Simple function to determine if a particular bit is set
eg (12 - binary 1100) then positions 3 and 4 are set
:param value: Number to be tested
:param bit: Position to check; >0 (right to left)
:return: Bool: True if bit is set
"""
if value & (1 << (bit - 1)):
return True | ce78135b1d74cc3da31010765bcc3c32dcb680ab | 230,206 |
def compute_optalpha(norm_r, norm_Fty, epsilon, comp_alpha=True):
"""
Compute optimal alpha for WRI
Parameters
----------
norm_r: Float
Norm of residual
norm_Fty: Float
Norm of adjoint wavefield squared
epsilon: Float
Noise level
comp_alpha: Bool
Whether to compute the optimal alpha or just return 1
"""
if comp_alpha:
if norm_r > epsilon and norm_Fty > 0:
return norm_r * (norm_r - epsilon) / norm_Fty
else:
return 0
else:
return 1 | 7206d14d41df1f9d9c5e9989f4b9fc36b9b8ae31 | 647,409 |
def z_periodicity(periodicity_value):
"""
The periodicity of a bond along the z axis.
"""
return periodicity_value | 49cd0c249bde4dfea56410e0b051d2a4c65585bb | 135,490 |
def highlight_max(s):
"""
highlight the maximum in a Pandas dataframe Series yellow.
"""
is_max = s == s.max()
return ["background-color: yellow" if v else "" for v in is_max] | af0efb38f83511c8368fb174dd400a4d93a9d148 | 93,394 |
from typing import Iterable
from typing import Tuple
from typing import List
def paired_tuple_list_to_two_lists( inlist: Iterable[ Tuple ] ) -> Tuple[ List, List ]:
"""
Splits a list of the form [ ( value1_1, value1_2 ), ( value 2_1, value2_2 ), ... ]
and returns two lists containing values split by indices:
[ value1_1, value2_1, ... ]
[ value1_2, value2_2, ... ]
:param inlist: Paired tuple list
:type inlist: Iterable
:return: Tuple of two lists
:rtype: tuple
"""
xlist, ylist = zip( *inlist )
return list( xlist ), list( ylist ) | d0a4267e65a24951c447ba66bae74dcabe5320ef | 484,719 |
def find_prefix_entry(message, dictionary):
"""
Find the longest entry in dictionary which is a prefix of the given message
"""
for entry in dictionary[::-1]:
if message.startswith(entry[0]):
return dictionary.index(entry)
return -1 | 93b92c46adfb0bcf0086c8626ba6d5cab088abc2 | 197,072 |
def str2bytes(data):
"""
Converts string to bytes.
>>> str2bytes("Pwning")
b'Pwning'
"""
return bytes(data, encoding="utf-8") | 0e9ef347c245cdf4965e0b594b450ebeebc52a41 | 673,951 |
def P_otc6486(H, D, gamma, c):
""" Returns the uplift resistance of cohesive materials.
OTC6486 - Equation (7)
"""
return gamma * H * D + 2 * H * c | 2644f6df3744717be7f7614207469fbf5e26551a | 244,725 |
def get_invalid_keys(validation_warnings):
"""Get the invalid keys from a validation warnings list.
Args:
validation_warnings (list): A list of two-tuples where the first
item is an iterable of string args keys affected and the second
item is a string error message.
Returns:
A set of the string args keys found across all of the first elements in
the validation tuples.
"""
invalid_keys = set([])
for affected_keys, error_msg in validation_warnings:
for key in affected_keys:
invalid_keys.add(key)
return invalid_keys | 0eb0db053e505f696471e7166051d9b2e025d439 | 661,963 |
def closest_multiple(N, base : int = 16):
"""
Return the closest multiple of 'base' to 'N'
"""
return base * round( N / base ) | 8343e6e43fdf18418e52a07d9f003ae641cf848e | 560,366 |
def parse_join_code(join_code):
"""
takes the join code and makes sure it's at least 6 digits long
Args:
join_code (int): the number of join codes sent out so far
Returns:
string: the actual join code
"""
if join_code < 10:
return f"00000{join_code}"
elif join_code < 100:
return f"0000{join_code}"
elif join_code < 1000:
return f"000{join_code}"
elif join_code < 10000:
return f"00{join_code}"
elif join_code < 100000:
return f"0{join_code}"
else:
return f"{join_code}" | a83ddcd31ecbe701ede66118691b5a0cba9fae19 | 74,617 |
def find_largest_hotspot(centroid_list: list) -> int:
"""
Description: Find the size of the cluster with the most data points
:param centroid_list: list of centroid for each final cluster.
each centroid in form of [longitude, latitude, # location]
:return: an integer which is the length of the longest cluster
"""
largest_hotspot = 0 # set the current largest size
for a_centroid in centroid_list: # iterate through each centroid
if a_centroid[2] > largest_hotspot: # if [# location] > the current largest size
largest_hotspot = a_centroid[2] # change the largest size to the size of this centroid
return largest_hotspot | 6841f19e13af38520ab77b063c282f5a42c839ae | 527,415 |
def parse_variable_assignments(assignments):
"""
Parses a list of key=value strings and returns a corresponding dictionary.
Values are tried to be interpreted as float or int, otherwise left as str.
"""
variables = {}
for assignment in assignments or ():
key, value = assignment.replace(' ', '').split('=', 1)
for convert in (int, float, str):
try:
value = convert(value)
except ValueError:
continue
else:
break
variables[key] = value
return variables | de3c10dc1c868423d69a8815b85deced278e1cf4 | 489,015 |
def dotted_name(cls):
"""Return the dotted name of a class."""
return "{0.__module__}.{0.__name__}".format(cls) | 7f0c8eab3769e4b200a9fd0ed5b2df22aad6e2a6 | 363,472 |
def is_equation(text):
"""test if a piece of text is a latex equation, by how it is wrapped"""
text = text.strip()
if any(
[
text.startswith("\\begin{{{0}}}".format(env))
and text.endswith("\\end{{{0}}}".format(env))
for env in [
"equation",
"split",
"equation*",
"align",
"align*",
"multline",
"multline*",
"gather",
"gather*",
]
]
):
return True
elif text.startswith("$") and text.endswith("$"):
return True
else:
return False | 6cfc441dd460833d28b22fd4a6bfdc355d312e73 | 551,716 |
def get_value(x):
"""
Extract value from <v,u> data.
"""
return x.value | d89f25e23b5b28cb3d36a1299b1f1fab01be9896 | 593,378 |
def parse_bool(value, additional_true=None, additional_false=None):
"""Parses a value to a boolean value.
If `value` is a string try to interpret it as a bool:
* ['1', 't', 'y', 'true', 'yes', 'on'] ==> True
* ['0', 'f', 'n', 'false', 'no', 'off'] ==> False
Otherwise raise TypeError.
Args:
value: value to parse to a boolean.
additional_true (list): optional additional string values that stand for True.
additional_false (list): optional additional string values that stand for False.
Returns:
bool: True if `value` is true, False if `value` is false.
Raises:
ValueError: `value` does not parse.
"""
true_values = ['1', 't', 'y', 'true', 'yes', 'on']
false_values = ['0', 'f', 'n', 'false', 'no', 'off', 'none']
if additional_true:
true_values.extend(additional_true)
if additional_false:
false_values.extend(additional_false)
if isinstance(value, str):
value = value.lower()
if value in true_values:
return True
if value in false_values:
return False
raise TypeError
return bool(value) | ca22044cf5ac0a35a0e42fe16a54c7c96dd8cc17 | 551,153 |
def is_numeric(s):
"""
Return True is the string ``s`` is a numeric string.
Parameters
----------
s : str
A string.
Returns
-------
res : bool
If True, ``s`` is a numeric string and can be converted to an int or a
float. Otherwise False will be returned.
"""
try:
float(s)
except ValueError:
return False
else:
return True | 4a2a9e99dab8c1bdd29e62cf9ddf35b9d8c949fc | 496,097 |
import logging
import codecs
def read_text_from_file(file_name, insert=''):
"""
Method that read message from text file, and optionally add some
dynamically generated info.
:param file_name: Name of file to read
:param insert:
:return: message
"""
if not isinstance(file_name, str):
logging.error('Problem with file reading, filename must be a string')
raise TypeError('file_name must be a string')
msg = list()
with codecs.open(file_name, encoding='utf-8', mode='r') as data_file:
for line in data_file:
if not line.startswith('#'): # if not commented line
if line.startswith('<--insert-->'):
if insert:
msg.append(insert)
else:
msg.append(line)
return ''.join(msg) | 5eceae07d196c191d74c975abeee0128597d8c91 | 296,685 |
def subND(v1, v2):
"""Subtracts two nD vectors together, itemwise"""
return [vv1 - vv2 for vv1, vv2 in zip(v1, v2)] | 566b1916ea1b068e062d8e14665a039c1209bfc1 | 237,008 |
def is_in_constraints(trajectory, constraints, costs_by_time):
"""
Check wether or not the trajectory is complying with each
constraint.
Inputs:
- trajectory: pandas DataFrame
Trajectory to be checked
- constraints: list
Constraints the trajectory must complain with.
Each constraint is model as a function; if the function
is negative when applied on the right variable, then the
constraint is considered as not satisfied.
ex: [f1, f2] and if f1(trajectory) < 0 then the
constraint is not satisfied
- costs_by_time: ndarray
Optimized trajectory cost (useful when the constraints
depend on the trajectory cost)
Output:
is_in: bool
Wether or not constraints are satisfied
"""
# Create column for cost
trajectory_and_cost = trajectory
trajectory_and_cost["cost"] = costs_by_time
for constraint in constraints:
evaluation = constraint(trajectory_and_cost)
if (evaluation <= 0).any():
return False
return True | 7d1b03c890a296ae6bf94bb4b90f5bfb1281b852 | 183,538 |
def invert_tree_recursive(root):
"""
Invert binary tree
:param root: root node
:type root: TreeNode
:return: root node of inverted tree
:rtype: TreeNode
"""
# basic case
if root is None:
return None
root.left, root.right = invert_tree_recursive(root.right), invert_tree_recursive(root.left)
return root | 4a655fa39b4c0d1de661b1ae948043c6ea6ca6c3 | 518,701 |
def parse_access_variable(v):
"""
Parses the accessibility arguments from a variable anme.
Should be structued as <variable_name>_<dir>_within<travel_time>_mode.
For example: `sb_jobs_sector92_to_within20_OpAuto`:
- variable: sb_jobs_sector92
- direction: travel towards the zone
- travel time: 20 minutes
- mode: OpAuto
Returns a dictionary of the parsed arguments.
If the variable name does match the needed pattern a None is returned.
"""
try:
if '_within' not in v:
return None
# parse into left and right of `_within`
split1 = v.split('_within')
if len(split1) != 2:
return None
# parse out the variable and direction from the left
left = split1[0]
if left.endswith('_to'):
var_name = left.split('_to')[0]
to = True
elif left.endswith('_from'):
var_name = left.split('_from')[0]
to = False
else:
return None
# parse out the travel time and mode from the right
split2 = split1[1].split('_')
travel_time = int(split2[0])
travel_mode = split2[1]
return {
'variable': var_name,
'time': travel_time,
'mode': travel_mode,
'to': to
}
except Exception:
return None | 83ac0d8bfda29a18f6785a5bea5dfa7a4fdb004e | 657,765 |
def get_value(self):
"""get value as numpy array
:return: numpy array, or None if value is empty"""
self.__apply_lazy_initializer()
return self._get_npyarr() | 69a3d6c04870695da2e3948cac338833d8209c51 | 76,954 |
def average(number1, number2, number3):
"""
Calculating the average of three given numbers
Parameters:
number1|2|3 (float): three given numbers
Returns:
number (float): Returning the statistical average of these three numbers
"""
return (number1 + number2 + number3) / 3.0 | 04102ee8646b6e5d2cfa9265771c4f4bdbe45d45 | 30,570 |
def trivial(target):
"""
Return True if target is irreducible, False if reducible
or None if undecidable.
This trivial function checks the followings:
(1) if the constant term is zero, the polynomial is reducible.
(2) if not (1) and the degree is <= 1, the polynomial is irreducible.
"""
if not target[0]:
return False
elif target.degree() <= 1:
return True
else:
return None | 5b43e8f19796a1a4075fddef3cf4a52254dedf85 | 68,393 |
import six
import base64
def _Base64EncodeLdap(username, passwd):
"""Base64 Encode Ldap username and password."""
enc = lambda s: six.ensure_text(base64.b64encode(six.ensure_binary(s)))
return enc(username), enc(passwd) | 06ac8a27a9e3e0e0efa045e82cb12a235eaf90de | 244,852 |
def make_move(state, coord, currentplayer):
"""Update the current board using a coordinate pair and the current
player."""
row, col = coord
state[row, col] = currentplayer
return state | 435d4d3e439e622f9441ac8ce7bb0a7222b54397 | 200,731 |
def quotestr(v):
"""Quote a string value to be output."""
if not v:
v = '""'
elif " " in v or "\t" in v or '"' in v or "=" in v:
v = '"%s"' % v.replace(r'"', r"\"")
return v | 9051d54c8716e78c0e684855bba32c2e7531588b | 488,400 |
def user_from_face_id(face_id: str) -> str:
"""Returns the name from a face ID."""
return face_id.split("_")[0].capitalize() | c49cc881b33699775338a0a772276702874029f3 | 69,470 |
def get_commands_file(filename):
"""
This takes a sql script file and breaks it down to commands to be executed separately
returns list(string) of individual commands
"""
with open(filename, "r") as sql_file:
# Split file in list
ret = sql_file.read().split(';')
# drop last empty entry
ret.pop()
return ret | efd475bc5b79b5a1410c2f6b29e5ebf0c42aeda0 | 79,705 |
def get_bruised_limbs(life):
"""Returns list of bruised limbs."""
_bruised = []
for limb in life['body']:
if life['body'][limb]['bruised']:
_bruised.append(limb)
return _bruised | 8218fd689f531bc00b75469aa1be57cc63527c6d | 453,700 |
def convert_to_list_with_index(items: list) -> list:
"""
Prepare simple list to inquirer list.
Convert to dicts list, with 2 keys: index and value from original list
Args:
items (list): Original items list which needs to convert
Return:
list: List of dicts with index key and item
"""
return [{'value': index, 'name': item} for index, item in enumerate(items)] | a8678056e96207caecbca88f25e62c0ca6eb1807 | 143,341 |
def z_score(input_data, axis = None):
""" compute the z score for a given input matrix and axis """
input_mean = input_data.mean(axis = axis, keepdims = True)
input_std = input_data.std(axis = axis, keepdims = True)
return (input_data - input_mean) / input_std | 9dc1807970e753f662499a6d389d0d2c5a62aab2 | 407,311 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.