content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def compute_average_oxidation_state(site):
"""
Calculates the average oxidation state of a site
Args:
site:
Site to compute average oxidation state
Returns:
Average oxidation state of site.
"""
try:
avg_oxi = sum([sp.oxi_state * occu
for sp, occu in site.species_and_occu.items()
if sp is not None])
return avg_oxi
except AttributeError:
pass
try:
return site.charge
except AttributeError:
raise ValueError("Ewald summation can only be performed on structures "
"that are either oxidation state decorated or have "
"site charges.") | 8ea8611984f171a84a2bac17c0b49b70c85bfba4 | 703,687 |
import math
def cos(x, offset=0, period=1, minn=0, maxx=1):
"""A cosine curve scaled to fit in a 0-1 range and 0-1 domain by default.
offset: how much to slide the curve across the domain (should be 0-1)
period: the length of one wave
minn, maxx: the output range
"""
value = math.cos((x/period - offset) * math.pi * 2) / 2 + 0.5
return value*(maxx-minn) + minn | e3119dc71c1b6c6160a29dca37b51b0550479a83 | 703,688 |
def mean_of_list(list_in):
"""Returns the mean of a list
Parameters
----------
list_in : list
data for analysis
Returns
-------
mean : float
result of calculation
"""
mean = sum(list_in) / len(list_in)
return(mean) | fac8f40b86e7fa37f96a46b56de722c282ffc79c | 703,691 |
from collections import OrderedDict
def read_markers_gmt(filepath):
"""
Read a marker file from a gmt.
"""
ct_dict = OrderedDict()
with open(filepath) as file_gmt:
for line in file_gmt:
values = line.strip().split('\t')
ct_dict[values[0]] = values[2:]
return ct_dict | a45ed9da13c9ba4110bb4e392338036a32a58e60 | 703,692 |
def construct_select_bijlagen_query(bericht_uri):
"""
Construct a SPARQL query for retrieving all bijlages for a given bericht.
:param bericht_uri: URI of the bericht for which we want to retrieve bijlagen.
:returns: string containing SPARQL query
"""
q = """
PREFIX schema: <http://schema.org/>
PREFIX nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>
PREFIX nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#>
PREFIX dct: <http://purl.org/dc/terms/>
SELECT DISTINCT ?bijlagenaam ?file ?type WHERE {{
<{0}> a schema:Message;
nie:hasPart ?bijlage.
?bijlage a nfo:FileDataObject;
nfo:fileName ?bijlagenaam;
dct:format ?type.
?file nie:dataSource ?bijlage.
}}
""".format(bericht_uri)
return q | 56e9868ddc38c703ac383508cce4e446f0f566a4 | 703,694 |
def encode_captions(captions):
"""
Convert all captions' words into indices.
Input:
- captions: dictionary containing image names and list of corresponding captions
Returns:
- word_to_idx: dictionary of indices for all words
- idx_to_word: list containing all words
- vocab_size: number of words
"""
word_counts = {}
for name, caption_list in captions.items():
for caption in caption_list:
for word in caption.split():
if word not in word_counts:
word_counts[word] = 1
else:
word_counts[word] += 1
idx_to_word = ['<START>', '<END>', '<PAD>'] + [w for w in word_counts if w not in ['<START>', '<END>', '<PAD>']]
word_to_idx = {}
for i in range(len(idx_to_word)):
word_to_idx[idx_to_word[i]] = i
vocab_size = len(idx_to_word)
return word_to_idx, idx_to_word, vocab_size | 2ba216c844723b0925b46d0db7bc8afd6ce0f5b4 | 703,705 |
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words | 8127aeec8db8f7bc87130ea0d1e5faa4998ac86f | 703,706 |
def NullFlagHandler(feature):
""" This handler always returns False """
return False | 7d37ecc8518144b27b43580b7273adf5f68dfdfb | 703,707 |
import glob
def datedfile(filename,date):
""" select file based on observation date and latest version
Parameters
----------
filename: text file name pattern, including "yyyymmdd_vnn" place holder for date and version
date: yyyymmdd of observation
Returns: file name
"""
filelist = sorted(glob.glob(filename.replace('yyyymmdd_vnn','????????_v??')))
if len(filelist)==0: return ""
dateoffs = filename.find('yyyymmdd')
datelist = [file[dateoffs:dateoffs+8] for file in filelist]
file = filelist[0]
for (f,fdate) in enumerate(datelist):
if date < fdate: continue
for (v,vdate) in enumerate(datelist[f:]):
if vdate > fdate: continue
file = filelist[f+v]
return file | 203cf848e351ef9b8b77bda62d5850b35485762a | 703,713 |
import random
import string
def random_user(n):
"""generate a random user id of size n"""
chars = []
for i in range(n):
chars.append(random.choice(string.ascii_lowercase))
return ''.join(chars) | 21d8ec2ef8b275ffca481e4553ec396ff4010653 | 703,714 |
def _get_parse_input(parse_args, args_in, dict_in):
"""Return default for parse_input.
This is to decide if context_parser should run or not.
To make it easy on an API consumer, default behavior is ALWAYS to run
parser UNLESS dict_in initializes context and there is no args_in.
If dict_in specified, but no args_in: False
If dict_in specified, AND args_in too: True
If no dict_in specified, but args_in is: True
If no dict_in AND no args_in: True
If parse_args explicitly set, always honor its value.
Args:
parse_args (bool): Whether to run context parser.
args_in (list[str]): String arguments as passed from the cli.
dict_in (dict): Initialize context with this dict.
Returns:
Boolean. True if should parse input.
"""
if parse_args is None:
return not (args_in is None and dict_in is not None)
return parse_args | 64dcfd32a3d9f66749a27d4b26bd5fb3a66edf28 | 703,715 |
def _pack_64b_int_arg(arg):
"""Helper function to pack a 64-bit integer argument."""
return ((arg >> 56) & 0xff), ((arg >> 48) & 0xff), ((arg >> 40) & 0xff), ((arg >> 32) & 0xff), \
((arg >> 24) & 0xff), ((arg >> 16) & 0xff), ((arg >> 8) & 0xff), (arg & 0xff) | 274fadb627de9ac47bff34c8e55db545b8e6cf0a | 703,723 |
def list_array_paths(path, array_dict):
"""
Given a dictionary containing each directory (experiment folder) as a
key and a list of array data files (analysis of array containing Log2Ratio
data) as its value (i.e., the output of 'find_arrays'), returns a list of
full paths to array files.
"""
array_path_list = []
for key_folder in array_dict:
for array_value in array_dict[key_folder]:
array_path_list.append(path + key_folder + "/" + array_value)
return array_path_list | 79d5f58a97005fb915de290ae8ccc480fbacd3c0 | 703,724 |
def log_simple(n, k):
"""
A function that simply finds how many k's does n have.
For example 28 = 2 * 2 * 7, so log_simple(28, 2) will return 2
and log_simple(28, 7) will return 1
"""
log_result = 0
while (n % k == 0):
log_result += 1
n /= k
return n, log_result | 22bda2911aa14a5866759cc0e5d8bf377b372bd7 | 703,725 |
def spinChainProductSum(spins):
"""
Calculate the Ising nearest neighbor interactions of a spin chain, periodic boundary condition(PBC).
Parameters
----------
spins : list of ints or floats
The given spin under PBC.
Returns
float
The nearest neighbor interactions(products).
"""
res = 0.0
n = len(spins)
for i in range(n):
res += spins[i] * spins[i + 1 - n]
return res | 0f115c3284f5680b28d1648140c8618de873e16c | 703,728 |
def has_ext_state(section: str, key: str) -> bool:
"""
Return whether extended state exists for given section and key.
Parameters
----------
section : str
Extended state section.
key : str
Extended state key.
Returns
-------
has_ext_state : bool
"""
has_ext_state = bool(RPR.HasExtState(section, key)) # type:ignore
return has_ext_state | 2483763cbe05f404331d8dfe8a1112fc15b70029 | 703,729 |
def translate(value, leftMin, leftMax, rightMin, rightMax):
"""
Normalize the data in range rightMin and rightMax
:param value: Value to be normalize
:param leftMin: original min value
:param leftMax: original max value
:param rightMin: final min value
:param rightMax: final max value
:return: Normalized value
"""
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
valueScaled = float(value - leftMin) / float(leftSpan)
return rightMin + (valueScaled * rightSpan) | 2cc02618edaec4112d30a4f61de9c95d5e8a0f8b | 703,734 |
from typing import Dict
from typing import Optional
def get_gsx_entry_value(entry: Dict[str, Dict[str, str]], field: str) -> Optional[str]:
"""Returns the `entry` value for the given `field`."""
if not entry or not field:
return None
field = f"gsx${field}"
if field not in entry:
return None
return entry[field]["$t"].strip() | 788c0a3e99691bfa81386c6fc2b5ea05332c06fd | 703,737 |
from typing import Sequence
import re
def parse_fvalues(fvalues: Sequence) -> frozenset:
"""
Parse a sequence of fvalues as a frozenset.
This function is mostly used for parsing string provided by the user,
splitting them accordingly, but accepts any sequence type. If a string is
passed, it will use different delimiters and guarantees that all methods will
allow the same delimiters. Delimiters can be white spaces, commas, semicolons,
forward slashes, and the " and " substring.
@param fvalues: The sequence with the fvalues to be parsed.
@return: A frozenset with the fvalues.
"""
if isinstance(fvalues, str):
# We internally convert everything to spaces
for delimiter in [" and ", ",", ";", "/"]:
fvalues = fvalues.replace(delimiter, " ")
fvalues = re.sub(r"\s+", " ", fvalues.strip())
fvalues = fvalues.split()
return frozenset(fvalues) | 7c6356b5320e6a7056f615bf5a324edbe7c66e47 | 703,738 |
def compute_q10_correction(q10, T1, T2):
"""Compute the Q10 temperature coefficient.
As explained in [1]_, the time course of voltage clamp recordings are
strongly affected by temperature: the rates of activation and inactivation
increase with increasing temperature. The :math:`Q_{10}` temperature
coefficient, a measure of the increase in rate for a 10 :math:`^{\circ}C`
temperature change, is a correction factor used in HH-style models to
quantify this temperature dependence.
In HH-style models, the adjustment due to temperature can be achieved by
decreasing the time constants by a factor :math:`Q_{10}^{(T_2 - T_1)/10}`,
where the temperatures :math:`T_1 < T_2`. The temperature unit must be
either the Celsius or the Kelvin. Note that :math:`T_1` and :math:`T_2`
must have the same unit, and do not need to be exactly 10 degrees apart.
Parameters
----------
q10 : :obj:`float`
The :math:`Q_{10}` temperature coefficient.
T1 : :obj:`float`
Temperature at which the first rate is recorded.
T2 : :obj:`float`
Temperature at which the second rate is recorded.
Returns
-------
correction : :obj:`float`
Correction factor due to temperature.
References
----------
.. [1] D. Sterratt, B. Graham, A. Gillies, D. Willshaw,
"Principles of Computational Modelling in Neuroscience",
Cambridge University Press, 2011.
"""
# that the test below allows T1 = T2 is intentional; the function should
# accomendate for no correction, i.e. a correction factor equal to 1.
if T1 > T2:
msg = ("T2 must be greater than or equal to T1")
raise ValueError(msg)
return q10**((T2 - T1) / 10) | eed7d7f38c1f9d98b1a6a89a28eb4f1a6656b6c7 | 703,740 |
def without(array, *values):
"""Creates an array with all occurrences of the passed values removed.
Args:
array (list): List to filter.
values (mixed): Values to remove.
Returns:
list: Filtered list.
Example:
>>> without([1, 2, 3, 2, 4, 4], 2, 4)
[1, 3]
.. versionadded:: 1.0.0
"""
return [item for item in array if item not in values] | 21bddf5244a591a261f704557fb8017a2401ef77 | 703,741 |
def get_total_mnsp_ramp_rate_violation(model):
"""Get total MNSP ramp rate violation"""
ramp_up = sum(v.value for v in model.V_CV_MNSP_RAMP_UP.values())
ramp_down = sum(v.value for v in model.V_CV_MNSP_RAMP_DOWN.values())
return ramp_up + ramp_down | 9e326a70966edce51f82036977fcec1b26991c21 | 703,742 |
import math
def ecliptic_obliquity_radians(time):
"""Returns ecliptic obliquity radians at time."""
return math.radians(23.439 - 0.0000004 * time) | 384199a506d29cb14b2a42facf2d6c46bf44f111 | 703,745 |
from datetime import datetime
import functools
import time
def function_timer(func):
"""This is a timer decorator when defining a function if you want that function to
be timed then add `@function_timer` before the `def` statement and it'll time the
function
Arguments:
func {function} -- it takes a function for this decorator to work
Returns:
this will print out the time taken and the time the function started and
completed
"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.time()
# start_date = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
# print(f"The function {func.__name__} started at {start_date}")
value = func(*args, **kwargs)
elapsed_time = time.time() - start_time
stop_date = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
if elapsed_time > 60 <= 3600:
print(
f"The function {func.__name__} took: {round(elapsed_time/60, 3)} minutes at {stop_date}"
)
elif elapsed_time > 3600:
print(
f"The function {func.__name__} took: {round((elapsed_time/60)/60, 3)} hours at {stop_date}"
)
else:
print(f"The function {func.__name__} took: {round(elapsed_time, 3)} seconds")
return value
return wrapper_timer | 6ddcca82ae60aafb2c072e62497f8b27d557ccdc | 703,752 |
def sanitize_url(url: str) -> str:
"""
This function strips to the protocol, e.g., http, from urls.
This ensures that URLs can be compared, even with different protocols, for example, if both http and https are used.
"""
prefixes = ["https", "http", "ftp"]
for prefix in prefixes:
if url.startswith(prefix):
url = url[len(prefix) :]
return url | 9c61a9844cfd6f96e158a9f663357a7a3056abf0 | 703,761 |
def overlap_branches(targetbranch: dict, sourcebranch: dict) -> dict:
"""
Overlaps to dictionaries with each other. This method does apply changes
to the given dictionary instances.
Examples:
>>> overlap_branches(
... {"a": 1, "b": {"de": "ep"}},
... {"b": {"de": {"eper": 2}}}
... )
{'a': 1, 'b': {'de': {'eper': 2}}}
>>> overlap_branches(
... {},
... {"ne": {"st": "ed"}}
... )
{'ne': {'st': 'ed'}}
>>> overlap_branches(
... {"ne": {"st": "ed"}},
... {}
... )
{'ne': {'st': 'ed'}}
>>> overlap_branches(
... {"ne": {"st": "ed"}},
... {"ne": {"st": "ed"}}
... )
{'ne': {'st': 'ed'}}
Args:
targetbranch(dict):
Root where the new branch should be put.
sourcebranch(dict):
New data to be put into the sourcebranch.
"""
if not isinstance(sourcebranch, dict):
return sourcebranch
for key, newItem in sourcebranch.items():
if key not in targetbranch:
targetbranch[key] = newItem
elif isinstance(targetbranch[key], dict):
targetbranch[key] = overlap_branches(targetbranch[key], newItem)
else:
targetbranch[key] = newItem
return targetbranch | a11b54b72d4a7d79d0bfaa13ed6c351dd84ce45f | 703,764 |
def normalize_string(value):
""" Normalize a string value. """
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, str):
return value.strip()
raise ValueError("Cannot convert {} to string".format(value)) | 86d8134f8f83384d83da45ed6cb82841301e2e52 | 703,767 |
def mapAddress(name):
"""Given a register name, return the address of that register.
Passes integers through unaffected.
"""
if type(name) == type(''):
return globals()['RCPOD_REG_' + name.upper()]
return name | 21f2f9a085d259d5fd46b258cc3ee0298fdda158 | 703,769 |
def list_index(ls, indices):
"""numpy-style creation of new list based on a list of elements and another
list of indices
Parameters
----------
ls: list
List of elements
indices: list
List of indices
Returns
-------
list
"""
return [ls[i] for i in indices] | 7e5e35674f48208ae3e0befbf05b2a2e608bcdf0 | 703,770 |
def clean_dict(dictionary: dict) -> dict:
"""Recursively removes `None` values from `dictionary`
Args:
dictionary (dict): subject dictionary
Returns:
dict: dictionary without None values
"""
for key, value in list(dictionary.items()):
if isinstance(value, dict):
clean_dict(value)
elif value is None:
dictionary.pop(key)
return dictionary | 3968b6d354116cca299a01bf2c61d7b2d9610da9 | 703,773 |
def tag_tranfsers(df):
"""Tag txns with description indicating tranfser payment."""
df = df.copy()
tfr_strings = [' ft', ' trf', 'xfer', 'transfer']
exclude = ['fee', 'interest']
mask = (df.transaction_description.str.contains('|'.join(tfr_strings))
& ~df.transaction_description.str.contains('|'.join(exclude)))
df.loc[mask, 'tag'] = 'transfers'
return df | 4fdfd775ec423418370776c34fac809a513f91b5 | 703,774 |
def have_same_SNP_order(dict_A, dict_B):
"""
Checks if two dictionaries have the same SNP order.
"""
have_same_order = [k for k in dict_A.keys() if k != "ext"] == [k for k in dict_B.keys() if k != "ext"]
return have_same_order | b885dee561e9a61bb50e814401ee088593c2517b | 703,779 |
def remove_leading_character(string, character):
"""
If "string" starts with "character", strip that leading character away.
Only removes the first instance
:param string:
:param character:
:return: String without the specified, leading character
"""
if string.startswith(character):
return string[1:]
else:
return string | 4af4d6f86b9a6ed8975c4564904d2e1ca9e6d15a | 703,789 |
def _rfind(lst, item):
"""
Returns the index of the last occurance of <item> in <lst>. Returns -1 if
<item> is not in <l>.
ex: _rfind([1,2,1,2], 1) == 2
"""
try:
return (len(lst) - 1) - lst[::-1].index(item)
except ValueError:
return -1 | ab165a6795b0a495d24288d8e757c16ba9c968a4 | 703,790 |
def recognize_po_file(filename: str) -> bool:
""" Recognize .po file """
if filename.endswith(".po"):
return True
return False | 9993e1d0f1a45f1ce60709650a7381df00ebdce0 | 703,792 |
import zipfile
import json
def _GetVersionFromCrx(crx_path):
"""Retrieves extension version from CRX archive.
Args:
crx_path: path to CRX archive to extract version from.
"""
with zipfile.ZipFile(crx_path, 'r') as crx_zip:
manifest_contents = crx_zip.read('manifest.json')
version = json.loads(manifest_contents)['version']
return version | e6b612f94b0fa4e62f5ecb8df297a6255294ec5f | 703,796 |
import hashlib
def safe_md5(open_file, block_size=2**20):
"""Computes an md5 sum without loading the file into memory
This method is based on the answers given in:
http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5 | 3711eba8479fabc69f30063cfc6fb585345bab66 | 703,797 |
def get_licence(html):
"""
Searches the HTML content for a mention of a CC licence.
"""
if "creative-commons" in html or "Creative Commons" in html:
licence = "CC"
else:
licence = "N/A"
return licence | 07dcd2439455fd23b034e11204d2a474c9502cdf | 703,799 |
def sum_temp(s, n):
"""
:param s: int, sum of total value of temperature
:param n: int, latest information to add up
:return: s + n
"""
s += n
return s | 4dc7da032fd91da86d73bf545fdf527497c12cd5 | 703,800 |
def get_lsf_grid_name(fibre_number):
"""
Return the appropriate LSF name (a, b, c, or d) to use, given a mean fiber number.
:param fiber_number:
The mean fiber number of observations.
:returns:
A one-length string describing which LSF grid to use ('a', 'b', 'c', or 'd').
"""
if 50 >= fibre_number >= 1:
return "d"
if 145 >= fibre_number > 50:
return "c"
if 245 >= fibre_number > 145:
return "b"
if 300 >= fibre_number > 245:
return "a" | 009b20027f895e19c5b6cabb4476cf41a222e465 | 703,803 |
import logging
import json
def rest_error_message(error, jid):
"""Returns exception error message as valid JSON string to caller
:param error: Exception, error message
:param jid: string, job ID
:return: JSON string
"""
logging.exception(error)
e = str(error)
return json.dumps({'user_id': 'admin', 'result': {'error': e}, '_id': jid}) | 7422c77be37ed473ed15acc5fdfae9e85ff90812 | 703,805 |
def Dict(val):
"""
Build a dict for key/value pairs.
"""
return dict(val) | 47864a91183070a7f8ce285e330d1278828b8352 | 703,806 |
import typing
def noop(val: typing.Any, *_args, **_kwargs) -> typing.Any:
"""A function does nothing.
>>> noop(1)
1
"""
return val | 99841c0b291a654d83741500e83441482f59d45a | 703,809 |
def dict_to_capabilities(caps_dict):
"""Convert a dictionary into a string with the capabilities syntax."""
return ','.join("%s:%s" % tpl for tpl in caps_dict.items()) | 12a321ba5f337f8da116ec7adec68d717fbc776f | 703,811 |
def mean(vector):
"""
Calculates the arithmetic mean of the given vector.
Args:
-----
vector : list
A non-empty list/array of numbers to be averaged.
Returns:
--------
mean : float
The arithmetic mean of the given vector.
"""
return sum(vector) / len(vector) | 71bd9a37cb0bfb166632866d0a29be9b14236364 | 703,812 |
def update_attrs(orig, keys, override):
"""Utility function for altering and adding the specified attributes to a particular repository rule invocation.
This is used to make a rule reproducible.
Args:
orig: dict of actually set attributes (either explicitly or implicitly)
by a particular rule invocation
keys: complete set of attributes defined on this rule
override: dict of attributes to override or add to orig
Returns:
dict of attributes with the keys from override inserted/updated
"""
result = {}
for key in keys:
if getattr(orig, key) != None:
result[key] = getattr(orig, key)
result["name"] = orig.name
result.update(override)
return result | 82498f78604924c281da1fab372a871d5f224010 | 703,814 |
def normalization(data, dmin=0, dmax=1, save_centering=False):
"""
Normalization in [a, b] interval or with saving centering
x` = (b - a) * (xi - min(x)) / (max(x) - min(x)) + a
Args:
data (np.ndarray): data for normalization
dmin (float): left interval
dmax (float): right interval
save_centering (bool): if True -- will save data centering and just normalize by lowest data
Returns:
np.ndarray: normalized data
"""
# checking on errors
if dmin >= dmax:
raise Exception("Left interval 'dmin' must be fewer than right interval 'dmax'")
if save_centering:
return data / abs(min(data))
else:
min_x = min(data)
max_x = max(data)
return (data - min_x) * (dmax - dmin) / (max_x - min_x) + dmin | acfa7aaae1bb7eb5752751f5c929ddb7868ccf49 | 703,816 |
def split_repo_and_dir(repo):
"""
Split the input string
org-name/repo-name/subdir-name/more/sub/dirs
(where '/subdir-name/more/sub/dirs' is optional) into
org-name/repo-name
and
subdir-name/more/sub/dirs
The second part might be the empty string if no subdir-name was given.
"""
parts = repo.split('/')
if len(parts) == 2:
return [repo, '']
return ['/'.join(parts[0:2]), '/'.join(parts[2:])] | c5cfb58fa0780af0391fc07fa78279af4f5c2790 | 703,819 |
def calculateOnlineVariance(data):
"""
Returns the variance of the given list.
:param data: A list of numbers to be measured (ie. the window)
:returns: The variance of the data.
"""
n, mean, M2 = 0, 0, 0
for x in data:
n = n + 1
delta = x - mean
mean = mean + delta/n
M2 = M2 + delta*(x-mean)
variance = M2/(n-1)
return variance | bf8d70cd736471e4723db07fb609aff6a7ccec50 | 703,820 |
def is_struct(struct):
""" Checks if the message is a data structure or an rpc
request/response"""
return (not struct.name.endswith("Request") and
not struct.name.endswith("Response")) | 0dbce36cad826988cc18d86a31b91f2090d5e338 | 703,822 |
def zfill_to_collection_size(index: int, collection_size: int) -> str:
"""
Prepends amount of zeroes required for indexes to be string-sortable in terms of given collection size.
Examples:
for 10 items prepends up to 1 zero: 1 -> "01", 10 -> "10"
for 100 items prepends up to 2 zeroes: 7 -> "007", "13" -> "013"
"""
positions = len(str(collection_size))
return str(index).zfill(positions) | df086ba9c4485dd0073c9a9b4485cb0c0d423859 | 703,825 |
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results | 27184fd908ab2d214db86b612e2e5cbec9393a07 | 703,826 |
def cmd_issuer_hash(cert):
"""Returns hash of certificate issuer.
"""
return cert.get_issuer().hash() | d35d35c39ba9c33c5b0015bb9f4d4ddf433cd71d | 703,829 |
def map_bool(to_bool) -> bool:
"""Maps value to boolean from a string.
Parameters
----------
to_bool: str
Value to be converted to boolean.
Returns
-------
mapped_bool: bool
Boolean value converted from string.
Example
-------
>>> boolean_string = "True" # can also be lower case
>>> boolean_value = map_bool(boolean_string)
"""
try:
boolean_map = {"true": True, "false": False}
mapped_bool = boolean_map[to_bool.lower()]
except KeyError:
raise KeyError("Boolean Value Expected got '{}'".format(to_bool))
return mapped_bool | 4e3bb175f653174a56cb6ddc72ba7bcc56755826 | 703,831 |
def str_or_list_like(x):
"""Determine if x is list-list (list, tuple) using duck-typing.
Here is a set of Attributes for different classes
| x | type(x) | x.strip | x.__getitem__ | x.__iter__ |
| aa | <class 'str'> | True | True | True |
| ['a', 'b'] | <class 'list'> | False | True | True |
| ('a', 'b') | <class 'tuple'> | False | True | True |
| {'b', 'a'} | <class 'set'> | False | False | True |
| {'a': 1, 'b': 2} | <class 'dict'> | False | True | True |
"""
if hasattr(x, "strip"):
return "str"
elif hasattr(x, "__getitem__") or hasattr(x, "__iter__"):
return "list_like"
else:
return "others" | 5ea7a6ff90f702c766401d0a973ac02347c66ade | 703,835 |
def readlist(infile):
"""Read each row of file as an element of the list"""
with open(infile, 'r') as f:
list_of_rows = [r for r in f.readlines()]
return list_of_rows | 50ea79f3c64e5e90a0f8b3bfd4cd8108304d57b2 | 703,836 |
def read_popularity(path):
"""
:param path: a path of popularity file. A file contains '<id>,<rank>' rows.
:return: a set of popularity object ids
"""
ids = set()
for line in open(path):
try:
ident = int(line.split(",", maxsplit=1)[0])
except (AttributeError, IndexError):
continue
ids.add(ident)
return ids | a97f20b129bd7849a4bf9a91d40c23ad664b500b | 703,837 |
import string
def names_to_usernames(names):
"""
Take the given list of names and convert it to usernames.
"John Doe" -> "john.doe"
Each name is stripped before conversion, then split by spaces.
If the name contains anything except letters and spaces,
raise an exception.
If duplicate names or invalid characters are found, raise an exception.
"""
allowed_chars = set(string.ascii_letters + " ")
usernames = set()
for name in names:
name = name.strip()
# Empty or comment.
if not name or name.startswith("#"):
continue
# Illegal characters.
if not set(name).issubset(allowed_chars):
raise Exception("Invalid characters found: %s" % name)
name_parts = name.lower().split()
# Invalid name format (expected full name).
if len(name_parts) <= 1:
raise Exception("Too few parts: %s" % name_parts)
# Convert to username.
username = ".".join(name_parts)
if username in usernames:
raise Exception("Duplicate: %s" % username)
usernames.add(username)
return list(usernames) | 0156f8402541e64dc1ed2b62d21bfcfbda55f167 | 703,839 |
def vessel_tip_coupling_data_to_str(data_list):
"""A list of vessel tip data elements is converted into a string."""
s = []
for v in data_list:
s.append('VesselTipData(')
s.append(' p = Point(x={}, y={}, z={}),'.format(v.p.x, v.p.y, v.p.z))
s.append(' vertex_id = {},'.format(v.vertex_id))
s.append(' pressure = {},'.format(v.pressure))
s.append(' concentration = {},'.format(v.concentration))
s.append(' R2 = {},'.format(v.R2))
s.append(' radius_first = {},'.format(v.radius_first))
s.append(' radius_last = {},'.format(v.radius_last))
s.append(' level = {}'.format(v.level))
s.append('),')
return '\n'.join(s) | 6768afa9497e5343bc20736a963d81c7ec298867 | 703,840 |
import re
def price_quantity_us_number(price):
"""Extract the numeric quantity of the price,
assuming the number uses dot for decimal and comma for thousands, etc."""
p = re.sub('[^0-9.]', '', price.strip())
return p | 9e35d8096bd3edfe80b6fae6ab0641107828a50b | 703,841 |
def create_header(multiobj_bool, constr_func):
""" Creates header to save data.
Args:
multiobj_bool (:obj:`bool`): True if multiobjective function is used.
constr_func (:obj:`list`) : Constraint functions applied.
Returns:
Header.
"""
if multiobj_bool:
header = "iter,f0val,fvirg,f0val2,fvirg2"
else:
header = "iter,f0val,fvirg"
for func in constr_func:
header += ',' + func
return header | 519e66c8437f972cd3ad6d604bc01ab858c8abed | 703,848 |
def colour_by_year(year, train_thresh, update1_thresh, update2_thresh, colours=None):
"""
Assign/return a colour depending on the year the data point was published.
Parameters
----------
year :
publication year of data point
train_thresh :
Last year threshold to assign to training set
update1_thresh :
Last year threshold to assign to update1 set
update2_thresh :
Last year threshold to assign to update2 set
colours :
List of colours for training, update1, update2 and test set
Returns
-------
Colour based on the publication year
"""
if colours is None:
colours = ["navy", "plum", "mediumaquamarine", "green"]
if year <= train_thresh:
return colours[0]
elif year <= update1_thresh:
return colours[1]
elif year <= update2_thresh:
return colours[2]
elif year <= 2020:
return colours[3] | 179b4a5d7f8cccaaa398fdffe43c59d02478dff2 | 703,849 |
def as_int(val):
"""
Tries to convert a string to an int.
Returns None if string is empty
"""
try:
return(int(val))
except ValueError:
return(None) | 87586fbc47c37354e34d10116b86d013a98d20b9 | 703,850 |
import pathlib
from typing import List
def _write_dataset_files(
root_path: pathlib.Path, namespace: str, datasets: List[str]
) -> str:
"""Write the repo content containing the datasets."""
repo_path = root_path / namespace
# Create all datasets
for ds_name in datasets:
ds_path = repo_path / ds_name / f'{ds_name}.py'
ds_path.parent.mkdir(parents=True) # Create the containing dir
ds_path.touch() # Create the file
# Additional noisy files should be ignored
(repo_path / '__init__.py').touch()
(repo_path / 'empty_dir').mkdir()
return str(repo_path) | 49baafff58a08802830208382180ce32d8aaf8c0 | 703,853 |
import json
def transform_group_roles_data(data, okta_org_id):
"""
Transform user role data
:param data: data returned by Okta server
:param okta_org_id: okta organization id
:return: Array of dictionary containing role properties
"""
role_data = json.loads(data)
user_roles = []
for role in role_data:
role_props = {}
role_props["label"] = role["label"]
role_props["type"] = role["type"]
role_props["id"] = "{}-{}".format(okta_org_id, role["type"])
user_roles.append(role_props)
return user_roles | ea554dfb4e91e3647298a2ef0891452e423ff957 | 703,855 |
def is_list(string):
"""
Checks to see if a string contains a list in the form [A, B]
:param string: string to evaluate
:return: Boolean
"""
if string:
if '[' == string[0] and ']' == string[-1] and ',' in string:
return True
return False | 77b86e7480a2a591e18ea21989cfefa06282c5f2 | 703,857 |
import re
def _natural_key(x):
""" Splits a string into characters and digits. This helps in sorting file
names in a 'natural' way.
"""
return [int(c) if c.isdigit() else c.lower() for c in re.split("(\d+)", x)] | 1fab7dffb9765b20f77ab759e43a23325b4441f4 | 703,863 |
from typing import Tuple
import math
def _projected_velocities_from_cog(beta: float, cog_speed: float) -> Tuple[float, float]:
"""
Computes the projected velocities at the rear axle using the Bicycle kinematic model using COG data
:param beta: [rad] the angle from rear axle to COG at instantaneous center of rotation
:param cog_speed: [m/s] Magnitude of velocity vector at COG
:return: Tuple with longitudinal and lateral velocities [m/s] at the rear axle
"""
# This gives COG longitudinal, which is the same as rear axle
rear_axle_forward_velocity = math.cos(beta) * cog_speed # [m/s]
# Lateral velocity is zero, by model assumption
rear_axle_lateral_velocity = 0
return rear_axle_forward_velocity, rear_axle_lateral_velocity | defbfa58d1e67b67ff4a118ebff03e62f4c1042c | 703,865 |
def quality_scrub(df, target_cols = ['quality_1', 'quality_2', 'quality_3']):
"""
Definition:
Filters a dataframe where each target_col does not contain 'no_cough'
Args:
df: Required. A dataframe containing the target columns
target_cols: default = ['quality_1', 'quality_2', 'quality_3'].
Returns:
Returns a filtered dataframe where each target_col does not contain 'no_cough'
"""
for col in target_cols:
df = df[df[col] != 'no_cough']
return df | 1187278e008f1e4ec4688d3cf9a3d7a0c1a82dc0 | 703,867 |
import csv
def get_author_book_publisher_data(filepath):
"""
This function gets the data from the csv file
"""
with open(filepath) as csvfile:
csv_reader = csv.DictReader(csvfile)
data = [row for row in csv_reader]
return data | 5d095b20e2e32aacbe4d85efd80461abfa175127 | 703,871 |
from typing import List
def check_absence_of_skip_series(
movement: int,
past_movements: List[int],
max_n_skips: int = 2,
**kwargs
) -> bool:
"""
Check that there are no long series of skips.
:param movement:
melodic interval (in scale degrees) for line continuation
:param past_movements:
list of past movements
:param max_n_skips:
maximum allowed number of skips in a row
:return:
indicator whether a continuation is in accordance with the rule
"""
if abs(movement) <= 1:
return True
if len(past_movements) < max_n_skips:
return True
only_skips = all(abs(x) > 1 for x in past_movements[-max_n_skips:])
return not only_skips | 94ff2f3e03956d5bea1173182e389a3e6bb4b487 | 703,873 |
from unittest.mock import Mock
def mock_data_manager(components):
"""Return a mock data manager of a general model."""
dm = Mock()
dm.components = components
dm.fixed_components = []
return dm | e796dbe73e2ec7df650ceab450a3a5449a6af9ed | 703,879 |
def moeda(n=0):
"""
-> Formata um número como moeda
:param n: número
:return: número formatado
"""
return f'R$ {n:.2f}' | 3727a2257afe8746d6ef2b3c8ee088842c46c5ce | 703,880 |
def get_parameter_name(argument):
"""Return the name of the parameter without the leading prefix."""
if argument[0] not in {'$', '%'}:
raise AssertionError(u'Unexpectedly received an unprefixed parameter name, unable to '
u'determine whether it is a runtime or tagged parameter: {}'
.format(argument))
return argument[1:] | 54b51cd5e3239fbfaaccaad123975df0e84374fc | 703,883 |
import hashlib
def cmpHash(file1, file2):
"""Compare the hash of two files."""
hash1 = hashlib.md5()
with open(file1, 'rb') as f:
hash1.update(f.read())
hash1 = hash1.hexdigest()
hash2 = hashlib.md5()
with open(file2, 'rb') as f:
hash2.update(f.read())
hash2 = hash2.hexdigest()
return hash1 == hash2 | 891b71188de42fb9c30a6559cd22b39685b6fc13 | 703,884 |
def bfmt(num, size=8):
""" Returns the printable string version of a binary number <num> that's length <size> """
if num > 2**size:
return format((num >> size) & (2**size - 1), 'b').zfill(size)
try:
return format(num, 'b').zfill(size)
except ValueError:
return num | 8aadc9671643b48c7c05032473b05fd872475bb0 | 703,887 |
def check_barcode_is_off(alignment, tags, log=None):
"""
See if the barcode was recognised with soft clipping.
if so, it returns True and can be counted in the optional log
:param alignment: the read
:param tags: alignment tags as dict
:return:
"""
if 'RG' in tags:
if tags['bm'] != '0':
if log:
log.misbar(alignment)
return True
else:
return False
else:
return False | 7adcbb8eae797750b3e543c52db41341d82f0937 | 703,888 |
def already_visited(string):
"""
Helper method used to identify if a subroutine call or definition has
already been visited by the script in another instance
:param string: The call or definition of a subroutine/function
:return: a boolean indicating if it has been visited already or not
"""
separated = string.partition('(')[2]
if separated.replace(' ', '').replace('(', '')[:2] == 'gr':
visited = True
else:
visited = False
return visited | 7a9d84b6e04cdf7edb27bb7cf49cf1021130ab07 | 703,889 |
import pathlib
def get_stem_name(file_name: pathlib.Path | str | None) -> str:
"""Get the stem name from a file name.
Args:
file_name (pathlib.Path | str | None): File name or file path.
Returns:
str: Stem name.
"""
if file_name is None:
return ""
if isinstance(file_name, str):
file_name = pathlib.Path(file_name)
return file_name.stem | 01bab045f2c54aedf848922550ae241c9ddf8bce | 703,890 |
def getSingleIndexedParamValue(request, param_name, values=()):
"""Returns a value indexed by a query parameter in the HTTP request.
Args:
request: the Django HTTP request object
param_name: name of the query parameter in the HTTP request
values: list (or tuple) of ordered values; one of which is
retrieved by the index value of the param_name argument in
the HTTP request
Returns:
None if the query parameter was not present, was not an integer, or
was an integer that is not a valid [0..len(values)-1] index into
the values list.
Otherwise, returns values[int(param_name value)]
"""
value_idx = request.GET.get(param_name)
if isinstance(value_idx, (tuple, list)):
# keep only the first argument if multiple are present
value_idx = value_idx[0]
try:
# GET parameter 'param_name' should be an integer value index
value_idx = int(value_idx) if value_idx is not None else -1
except ValueError:
# ignore bogus or missing parameter values, so return None (no message)
return None
if value_idx < 0:
# value index out of range, so return None (no value)
return None
if value_idx >= len(values):
# value index out of range, so return None (no value)
return None
# return value associated with valid value index
return values[value_idx] | c8a1a552d1ad9435e21243bf05226b373257d163 | 703,891 |
from typing import Union
from typing import Dict
from typing import Optional
def get_config_float(
current: Union[int, float], config: Dict[str, str], name: str
) -> Optional[float]:
"""
Convenience function to get config values as float.
:param current: current config value to use when one is not provided
:param config: config to get values from
:param name: name of config value to get
:return: current config value when not provided, new value otherwise
"""
value = config.get(name)
if value is not None:
if value == "":
value = None
else:
value = float(value)
else:
value = current
return value | d2bb436c4b2b4aef35a8f46927bc9145ecfed04c | 703,900 |
def getRatingDistributionOfAMovie(ratingRDD, movieID):
""" Get the rating distribution of a specific movie
Args:
ratingRDD: a RDD containing tuples of (UserID, MovieID, Rating)
movieID: the ID of a specific movie
Returns:
[(rating score, number of this rating score)]
"""
return ratingRDD.filter(lambda x: x[1] == movieID).map(lambda x: (x[2], 1)).countByKey() | 708c67e51d318b887deea1ec3ec4dc4a272e794e | 703,901 |
def lower_allbutfirst_letter(mystring):
"""Lowercase all letters except the first one
"""
return mystring[0].upper() + mystring[1:].lower() | 860d1449865790e15ccc840ee85ea366b2de5a64 | 703,902 |
def cmp_public_numbers(pn1, pn2):
"""
Compare 2 sets of public numbers. These is a way to compare
2 public RSA keys. If the sets are the same then the keys are the same.
:param pn1: The set of values belonging to the 1st key
:param pn2: The set of values belonging to the 2nd key
:return: True is the sets are the same otherwise False.
"""
if pn1.n == pn2.n:
if pn1.e == pn2.e:
return True
return False | a91a7204412d07808dbd6d5040f6df8baa576417 | 703,909 |
def ranges(int_list):
"""
Given a sorted list of integers function will return
an array of strings that represent the ranges
"""
begin = 0
end = 0
ranges = []
for i in int_list:
# At the start of iteration set the value of
# `begin` and `end` to equal the first element
if begin == 0:
begin = i
end = i
# Set the current element as the value of `end`
# as long as the array is in sequence
elif i-1 == end:
end = i
# Reset flags to current element when iterating through
# multiple integers that are of broken sequence
elif begin == end:
begin = i
end = i
else:
# Sequence of array has been broken, append current range
# to `ranges` and set the value of `begin and `end` flags to
# equal the current element
ranges.append("{0}->{1}".format(begin, end))
begin = i
end = i
# Grab the last range from the array
if begin != end:
ranges.append("{0}->{1}".format(begin, end))
return ranges | cc6aab9442a6f6986acccb1fa46cd61ff1e4ba07 | 703,910 |
def flatten(array: list):
"""Converts a list of lists into a single list of x elements"""
return [x for row in array for x in row] | 178f8ddb6e4b4887e8c1eb79f32fe51c0cf5fd89 | 703,914 |
def keyevent2tuple(event):
"""Convert QKeyEvent instance into a tuple"""
return (event.type(), event.key(), event.modifiers(), event.text(),
event.isAutoRepeat(), event.count()) | a456ce7790232ecf8ea4f6f68109a2023f4f257b | 703,915 |
import math
def get_inv_unit(block_index,diff):
"""
given a block index and a 0-indexed layer in that block, returns a unit index.
"""
bottleneck_block_mapping = {1:0,
2:3,
3:7,
4:13}
return bottleneck_block_mapping[block_index] + math.floor((abs(diff-1)/3)) | ed6936a81dd8f32f76a27efcf89b8e76d384b008 | 703,916 |
import math
def find_roots_quadratic(a: float, b: float, c: float) -> set:
"""Return a set containing the solutions to the equation ax^2 + bx + c = 0.
Each solution is a float.
You may ASSUME that:
- a != 0
- (b * b) - (4 * a * c) >= 0
>>> find_roots_quadratic(1, -15, 56) == {8.0, 7.0}
True
>>> find_roots_quadratic(1, -10, 21) == {3.0, 7.0}
True
>>> find_roots_quadratic(1, 8, 15) == {-3.0, -5.0}
True
>>> # Have to use isclose to compare floats
>>> all([math.isclose(sol, -0.739, abs_tol=0.001) or math.isclose(sol, 1.739, abs_tol=0.001) for sol in find_roots_quadratic(7, -7, -9)])
True
Hint: use the quadratic formula.
"""
assert a != 0
assert (b * b) - (4 * a * c) >= 0
part = math.sqrt(b * b - 4 * a * c)
return {(-b - part) / (2 * a), (-b + part) / (2 * a)} | 664f3ec213200ac2ed3a1cc4f8001da4331938bc | 703,918 |
def edge_failure_sampling(failure_scenarios,edge_column):
"""Criteria for selecting failure samples
Parameters
---------
failure_scenarios - Pandas DataFrame of failure scenarios
edge_column - String name of column to select failed edge ID's
Returns
-------
edge_failure_samples - List of lists of failed edge sets
"""
edge_failure_samples = list(set(failure_scenarios[edge_column].values.tolist()))
return edge_failure_samples | 91c251241dcde7d457b69b2033a1751b3ae963fd | 703,920 |
import re
def count_arg_nums(method_signature):
"""
Based on the method signature(jni format) to count the arguments number.
:param method_signature: method signature(jni format)
:return: arguments number
"""
arg_signature = re.findall(re.compile(r'\((.*?)\)'), method_signature)[0]
pattern = re.compile(r'(L.*?;)|([BCDFISJZ])|(\[[BCDFISJZ])')
args = pattern.findall(arg_signature)
args_num = len(args)
return args_num
# print(len(args))
# print(args) | 6703653e26ced05baf1a639d93d6435ea8b6ff8e | 703,922 |
def form_columns(form):
"""
:param form: Taken from requests.form
:return: columns: list of slugified column names
labels: dict mapping string labels of special column types
(observed_date, latitude, longitude, location)
to names of columns
"""
labels = {}
columns = []
for k, v in form.items():
if k.startswith('col_name_'):
# key_type_observed_date
key = k.replace("col_name_", "")
columns.append(key)
# e.g labels['observed_date'] = 'date'
labels[v] = key
return columns, labels | a3a2fdaa17310c04bb28675f88976cd7283f65a9 | 703,923 |
import base64
import six
def UrlSafeB64Decode(message):
"""wrapper of base64.urlsafe_b64decode.
Helper method to avoid calling six multiple times for preparing b64 strings.
Args:
message: string or binary to decode
Returns:
decoded data in string format.
"""
data = base64.urlsafe_b64decode(six.ensure_binary(message))
return six.ensure_str(data) | f675c56f0bbd35661adfbea85135a9434fd7b107 | 703,925 |
def imei_parse_nibble(nibble):
"""Parse one nibble of an IMEI and return its ASCII representation."""
if nibble < 10:
return chr(nibble + ord('0'))
if nibble == 0xa:
return '*'
if nibble == 0xb:
return '#'
if nibble == 0xc:
return 'C'
if nibble == 0xd:
return '.'
if nibble == 0xe:
return '!'
return '' | 837445a7679bc5355978d7d4e69c5c9fa166cb3f | 703,926 |
def check_command_succeeded(reply):
"""
Return true if command succeeded, print reason and return false if command
rejected
param reply: BinaryReply
return: boolean
"""
if reply.command_number == 255: # 255 is the binary error response code.
print ("Danger! Command rejected. Error code: " + str(reply.data))
return False
else: # Command was accepted
return True | a320b5000f59790e314108398339b9a66dbf6520 | 703,927 |
def format_interval(seconds):
""" Format an integer number of seconds to a human readable string."""
units = [
(('week', 'weeks'), 604800),
(('day', 'days'), 86400),
(('hour', 'hours'), 3600),
(('minute', 'minutes'), 60),
#(('second', 'seconds'), 1)
]
result = []
for names, value in units:
n, seconds = divmod(seconds, value)
if n > 0:
result.append('%d %s' % (n, names[n > 1]))
if seconds:
result.append("%.2f %s" % (seconds, ['second', 'seconds'][seconds != 1.0]))
return ', '.join(result) | 8deae4627807f4c5e0cc1844499ebb39f658f2d0 | 703,928 |
def split_on_comma(tokens):
"""Split a list of tokens on commas, ie ``,`` DELIM tokens.
Only "top-level" comma tokens are splitting points, not commas inside a
function or other :class:`ContainerToken`.
:param tokens:
An iterable of :class:`~.token_data.Token` or
:class:`~.token_data.ContainerToken`.
:returns:
A list of lists of tokens
"""
parts = []
this_part = []
for token in tokens:
if token.type == 'DELIM' and token.value == ',':
parts.append(this_part)
this_part = []
else:
this_part.append(token)
parts.append(this_part)
return parts | 8b89dc6857a7b3e9bcc02f3a291e0ff0cd8d5f20 | 703,929 |
from datetime import datetime
def current_time() -> datetime:
"""Return timezone-aware current time as datetime."""
return datetime.now().astimezone() | 2b7237f4c5a0d88ab7643dfdd3b1f8c524683884 | 703,934 |
def mult(A, B):
"""
Function to multiply two values A and B, use as "mult(A, B)"
"""
return A * B | 586c9077303dd8a36ae6007ff74756f77ec8fb3b | 703,939 |
def get_link_href(result_object, link_relation):
"""
Given a result_object (returned by a previous API call), return
the link href for a link relation.
'result_object' a JSON object returned by a previous API call. May not
be None.
'link_relation' the link relation for which href is required.
Returns None if the link does not exist.
"""
# Argument error checking.
assert result_object is not None
result = None
link = result_object['_links'].get(link_relation)
if link:
result = link.get('href')
return result | 400cd38d1b29ea71bf974d8aa16c1b3adf104428 | 703,940 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.