content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
def get_img_full_path(path):
""" Checks if file can be found by path specified in the input. Returns the same as input
if can find, otherwise joins current directory full path with path from input and returns it.
:param path: Relative of full path to the image.
:return: Relative of full path to the image (joined with path to current directory if needed).
"""
if os.path.isfile(path):
return path
else:
directory = os.path.dirname(__file__)
new_path = os.path.join(directory, path)
if os.path.isfile(new_path):
return new_path
else:
raise IOError("File not found: " + path) | d549cd09035ebd6213f1b31c1c2eee4e64dcdce8 | 3,654,200 |
def max_pool_2x2(input_):
""" Perform max pool with 2x2 kelner"""
return tf.nn.max_pool(input_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') | b85ccfaafbdcf5d703dffab65e188ffab52ebce9 | 3,654,201 |
import tqdm
def remove_numerals(df, remove_mixed_strings=True):
"""Removes rows from an ngram table with words that are numerals. This
does not include 4-digit numbers which are interpreted as years.
Arguments:
df {Pandas dataframe} -- A dataframe of with columns 'word', 'count'.
Keyword Arguments:
remove_mixed_strings {bool} -- Whether to remove rows with words that
are mixtures of numerals and letters. (default: {True})
"""
no_numerals_df = df.copy().reset_index()
for i, row in tqdm(no_numerals_df.iterrows(), desc="Removing numerals\n"):
word = row['word']
if remove_mixed_strings:
if any([c.isnumeric() for c in word]) and \
not is_year(word):
no_numerals_df.drop(i, axis=0, inplace=True)
else:
if word.isnumeric() and len(word) != 4:
no_numerals_df.drop(i, axis=0, inplace=True)
return no_numerals_df | 4c3d0468456e08b1a0579a8b73a21221cae17676 | 3,654,202 |
def generate_json(args, df, num_changes, start_dt, finish_dt,
projects, projects_map,
not_found_proj, group=None, groups=[]):
"""
Returns json report from a dataframe for a specific project
"""
log.debug('Generating %s report for %s', args.report_format, group)
log.debug(projects)
if group:
# we want to report on the projects that are common to projects and df
projects_to_report = list(set(projects).intersection(df))
else:
projects_to_report = projects
frames = list()
for project in projects_to_report:
log.debug('%s df:\n%s', project, df[project])
frames.append(df[project])
# TODO wrap this in proper html or a template
if len(frames) <= 0:
return 'No projects in this group'
df_plot = generate_plot(args, df, frames, start_dt)
return df_plot.to_json(orient='table') | 1f90e73ad26f87cea0052f157ac3167f54f8b027 | 3,654,203 |
def b32_ntop(*args):
"""LDNS buffer."""
return _ldns.b32_ntop(*args) | b43bc9b1b112f7815ec3c280d055676e7255adcc | 3,654,204 |
import logging
import sys
import os
def get_logger(filename, logger_name=None):
"""set logging file and format
Args:
filename: str, full path of the logger file to write
logger_name: str, the logger name, e.g., 'master_logger', 'local_logger'
Return:
logger: python logger
"""
log_format = "%(asctime)s %(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt="%m%d %I:%M:%S %p")
# different name is needed when creating multiple logger in one process
logger = logging.getLogger(logger_name)
fh = logging.FileHandler(os.path.join(filename))
fh.setFormatter(logging.Formatter(log_format))
logger.addHandler(fh)
return logger | e549cdd7198961662f28390e09480d05f5ad14b4 | 3,654,205 |
from typing import Callable
from typing import Dict
import torch
def infer_feature_extraction_pytorch(
model: PreTrainedModel, run_on_cuda: bool
) -> Callable[[Dict[str, torch.Tensor]], torch.Tensor]:
"""
Perform Pytorch inference for feature extraction task
:param model: Pytorch model (sentence-transformers)
:param run_on_cuda: True if should be ran on GPU
:return: a function to perform inference
"""
def infer(inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
model_output = model(**inputs).detach() # noqa: F821
if run_on_cuda:
torch.cuda.synchronize()
return model_output
return infer | 18f935607c824c2122f68ea1ec9a7bcb50604ac8 | 3,654,206 |
def editRole(userSource, oldName, newName):
"""Renames a role in the specified user source.
When altering the Gateway System User Source, the Allow User Admin
setting must be enabled.
Args:
userSource (str): The user source in which the role is found.
Blank will use the default user source.
oldName (str): The role to edit. Role must not be blank and must
exist.
newName (str): The new name for the role. Must not be blank.
Returns:
UIResponse: An object with lists of warnings, errors, and info
about the success or failure of the edit.
"""
print(userSource, oldName, newName)
return UIResponse(Locale.ENGLISH) | f72796de14ff5f8c4314a5d31fe4fcf42cb883ec | 3,654,207 |
def compute_bspline_dot_product_derivatives(basis_features, basis_dimension):
"""
Compute dot products of B-splines and their derivatives.
Input:
- basis_features: dict
Contain information on the basis for each state
- basis_dimension: dict
Give the number of basis functions for each state
Outputs:
- dot_product_12: ndarray
Array containing the dot products of Legendre polynomials
with their derivatives
- dot_product_22: ndarray
Array containing the dot products of Legendre polynomials
derivatives
"""
# Compute the dimension of the problem
dimension = np.sum([basis_dimension[elt] for elt in basis_dimension])
# Get the knots
t = basis_features['knots']
# FIXME: Consider small parameter to avoid vanishing of the last B-spline
# at 1
eps = 1e-16
dot_product_12 = np.zeros([dimension, dimension])
dot_product_22 = np.zeros([dimension, dimension])
i, j = 0, 0
# Loop over states
for state1 in basis_dimension:
# Get degree of the B-splines of state1
k1 = basis_features[state1]
# Add external knots depending on the degree
t1 = np.r_[(0,)*(k1+1), t, (1,)*(k1+1)]
for state2 in basis_dimension:
# Get degree of the B-splines of state2
k2 = basis_features[state2]
# Add external knots depending on the degree
t2 = np.r_[(0,)*(k2+1), t, (1,)*(k2+1)]
for m in range(basis_dimension[state1]):
# Define m-th B-spline of the state1 basis
spl_m = BSpline.basis_element(t1[m:m+k1+2])
# Reproduce the same spline for differenciation because of
# differenciation problems with BSpline.basis_element()
# FIXME: simplify if possible
# Construct knots by first finding the internal knots and then
# by adding the right numbers of external knots
t1m = t1[m:m+k1+2]
ind_min1 = np.max(np.argwhere(t1m == t1[m]))
ind_max1 = np.min(np.argwhere(t1m == t1[m+k1+1]))
t_m = np.r_[(t1m[ind_min1],)*k1,
t1m[ind_min1:ind_max1+1],
(t1m[ind_max1],)*k1]
x_m = np.linspace(t1m[0], t1m[-1]-eps, 50)
spl_m = make_lsq_spline(x_m, spl_m(x_m), t_m, k1)
# Compute derivative
spl_m_deriv = spl_m.derivative(nu=1)
for n in range(basis_dimension[state2]):
# Define n-th B-spline of the state2 basis
spl_n = BSpline.basis_element(t2[n:n+k2+2])
# FIXME: simplify if possible
# Construct knots by first finding the internal knots and
# then by adding the right numbers of external knots
t2n = t2[n:n+k2+2]
ind_min2 = np.max(np.argwhere(t2n == t2[n]))
ind_max2 = np.min(np.argwhere(t2n == t2[n+k2+1]))
t_n = np.r_[(t2n[ind_min2],)*k2,
t2n[ind_min2:ind_max2+1],
(t2n[ind_max2],)*k2]
x_n = np.linspace(t2n[0], t2n[-1]-eps, 50)
spl_n = make_lsq_spline(x_n, spl_n(x_n), t_n, k2)
# Compute derivative
spl_n_deriv = spl_n.derivative(nu=1)
max_t = max(t1[m], t2[n])
min_t = min(t1[m+k1+1], t2[n+k2+1])
# If intersection of supports then do computations
if max_t < min_t:
# Numerical integration
quad_int_12 = quad(lambda x:
spl_m(x) * spl_n_deriv(x),
max_t, min_t)
quad_int_22 = quad(lambda x:
spl_m_deriv(x) * spl_n_deriv(x),
max_t, min_t)
dot_product_12[i + m, j + n] += quad_int_12[0]
dot_product_22[i + m, j + n] += quad_int_22[0]
j += basis_dimension[state2]
j = 0
i += basis_dimension[state1]
return dot_product_12, dot_product_22 | a8ba13b23bb009eb81d57792abeced6fb8715b07 | 3,654,208 |
import os
def validate_table(config, table):
"""Run VALVE validation on a table.
:param config: valve config dictionary
:param table: path to table
:return: list of errors
"""
errors = []
table_name = os.path.splitext(os.path.basename(table))[0]
table_details = config["table_details"]
fields = config["table_fields"].get(table, {})
fields.update(config["table_fields"].get("*", {}))
rules = None
if "table_rules" in config:
rules = config["table_rules"].get(table, {})
rules.update(config.get("*", {}))
row_idx = 0
for row in table_details[table_name]["rows"]:
col_idx = 1
for field, value in row.items():
if not value:
value = ""
# Check for field type
if field in fields:
# Get the expected field type
# This will be validated based on the given datatypes
parsed_type = fields[field]["parsed"]
error_message = fields[field]["message"]
# all values in this field must match the type
messages = validate_condition(
config, parsed_type, table_name, field, row_idx, value, message=error_message
)
if messages:
field_id = fields[field]["field ID"]
for m in messages:
m.update({
"rule ID": "field:" + str(field_id),
"rule": fields[field]["column"],
"level": "ERROR",
})
errors.append(m)
# Check for rules
if rules and field in rules:
# Check if the value meets any of the conditions
for rule in rules[field]:
when_condition = rule["when_condition"]
# Run meets_condition without logging
# as the then-cond check is only run if the value matches the type
messages = validate_condition(
config, when_condition, table_name, field, row_idx, value
)
if not messages:
# The "when" value meets the condition - validate the "then" value
then_column = rule["column"]
# Retrieve the "then" value to check if it meets the "then condition"
then_value = row[then_column]
if not then_value:
then_value = ""
messages = validate_condition(
config,
rule["then_condition"],
table_name,
then_column,
row_idx,
then_value,
message=rule["message"],
)
if messages:
for m in messages:
if rule["message"]:
msg = m["message"]
else:
msg = (
f"because '{value}' is '{parsed_to_str(config, when_condition)}', "
+ m["message"]
)
m.update(
{
"rule ID": "rule:" + str(rule["rule ID"]),
"rule": then_column,
"level": rule["level"],
"message": msg,
}
)
errors.append(m)
col_idx += 1
row_idx += 1
return errors | 3b8a1517219d4b8b0db5042200f1c62700cabe94 | 3,654,209 |
def beam_hardening_correction(mat, q, n, opt=True):
"""
Correct the grayscale values of a normalized image using a non-linear
function.
Parameters
----------
mat : array_like
Normalized projection image or sinogram image.
q : float
Positive number. Recommended range [0.005, 50].
n : float
Positive number. Must larger than 1.
opt : bool
True: Curve towards 0.0.
False: Curve towards 1.0.
Returns
-------
array_like
Corrected image.
"""
if np.max(mat) >= 2.0:
raise ValueError("!!! Input image must be normalized, i.e. gray-scales "
"are in the range of [0.0, 1.0]) !!!")
if n < 2.0:
raise ValueError("!!! n must be larger than or equal to 2 !!!")
return np.asarray([non_linear_function(x, q, n, opt) for x in mat]) | d113ff33c2d688f460cf2bac62cdd8a26b890ce5 | 3,654,210 |
def cargar_recursos_vectores_transpuestos():
"""
Se carga la informacion para poder calcular los vectores transpuestos
"""
# Se crea el df
filename = 'csv/' + conf.data['env']['path'] + '/vectores_transpuestos.csv'
recursos_v_transpuestos = pd.read_csv(filename)
# Se cambia el nombre de los index al nombre de los vectores transpuestos
# Se crea un MultiIndex para evitar agregar una columna a los vectores transpuestos
# queda definida por tuples = [('Reg.1','H'), ('Reg.2', 'F'),('Reg.3', 'G'),('Reg.4', 'H'),...,('Reg.10', 'A')]
tuples = [('Reg.' + str(i), vt) for i, vt in zip(recursos_v_transpuestos.pop('num_de_region'), recursos_v_transpuestos.pop('chr_vector_t'))]
recursos_v_transpuestos.index = pd.MultiIndex.from_tuples(tuples)
return recursos_v_transpuestos | f982fd22bbdfd2d7d389787bf7f923feb9abf66f | 3,654,211 |
def read_pdb(file_name, exclude=('SOL',), ignh=False, modelidx=1):
"""
Parse a PDB file to create a molecule.
Parameters
----------
filename: str
The file to read.
exclude: collections.abc.Container[str]
Atoms that have one of these residue names will not be included.
ignh: bool
Whether hydrogen atoms should be ignored.
model: int
If the PDB file contains multiple models, which one to select.
Returns
-------
list[vermouth.molecule.Molecule]
The parsed molecules. Will only contain edges if the PDB file has
CONECT records. Either way, the molecules might be disconnected. Entries
separated by TER, ENDMDL, and END records will result in separate
molecules.
"""
parser = PDBParser(exclude, ignh, modelidx)
with open(str(file_name)) as file_handle:
mols = list(parser.parse(file_handle))
LOGGER.info('Read {} molecules from PDB file {}', len(mols), file_name)
return mols | d7412b96adef5505676a80e5cdf3fe5e63a3b096 | 3,654,212 |
def sao_isomorficas(texto1: str, texto2: str) -> bool:
"""
>>> sao_isomorficas('egg', 'add')
True
>>> sao_isomorficas('foo', 'bar')
False
>>> sao_isomorficas('eggs', 'add')
False
"""
# Algoritmo O(n) em tempo e memória
letras_encontradas = {}
if len(texto1) != len(texto2):
return False
for caractere_1, caractere_2 in zip(texto1, texto2):
try:
letra = letras_encontradas[caractere_1]
except KeyError:
letras_encontradas[caractere_1] = caractere_2
else:
if letra is not caractere_2:
return False
return True | a1f2c00a50b69cb18c32a299d50cbd3a35dcbe5e | 3,654,213 |
def _is_no_args(fn):
"""Check if function has no arguments.
"""
return getargspec(fn).args == [] | 29cb096323c69dd067bf4759a557734443a82ed5 | 3,654,214 |
def failure(parsed_args):
"""
:param :py:class:`argparse.Namespace` parsed_args:
:return: Nowcast system message type
:rtype: str
"""
logger.critical(
f"{parsed_args.model_config} {parsed_args.run_type} FVCOM VH-FR run for "
f'{parsed_args.run_date.format("YYYY-MM-DD")} '
f"on {parsed_args.host_name} failed"
)
msg_type = f"failure {parsed_args.model_config} {parsed_args.run_type}"
return msg_type | e6744bfd61458497b5a70d7d28712385a8488a98 | 3,654,215 |
def good_AP_finder(time,voltage):
"""
This function takes the following input:
time - vector where each element is a time in seconds
voltage - vector where each element is a voltage at a different time
We are assuming that the two vectors are in correspondance (meaning
that at a given index, the time in one corresponds to the voltage in
the other). The vectors must be the same size or the code
won't run
This function returns the following output:
APTimes - all the times where a spike (action potential) was detected
"""
APTimes = []
#Let's make sure the input looks at least reasonable
if (len(voltage) != len(time)):
print "Can't run - the vectors aren't the same length!"
return APTimes
##Your Code Here!
treshold = 0.5 * np.max(voltage)
times_of_APs = time[voltage > treshold]
APTimes =times_of_APs[np.diff(times_of_APs) > 0.0015]
return APTimes | c13897b7bf5335cae20f65db853e7a214ec570c5 | 3,654,216 |
from typing import Dict
from typing import Any
import tqdm
def parse(excel_sheets: Dict[Any, pd.DataFrame],
dictionary: Dict[str, Any],
verbose: bool = False) -> pd.DataFrame:
"""Parse sheets of an excel file according to instructions in `dictionary`.
"""
redux_dict = recursive_traverse(dictionary)
column_tuples = redux_dict.keys()
tuple_lengths = [len(tuple) for tuple in column_tuples]
if len(set(tuple_lengths)) > 1:
raise ValueError("Depth of provided JSON file is inconsistent. All "
"entries must be located at the same depth.")
multi_index = pd.MultiIndex.from_tuples(tuples=column_tuples)
data_frame = pd.DataFrame(columns=multi_index)
if verbose:
sheets = tqdm(
excel_sheets.items(),
desc="Looping through sheets",
ncols=100
)
else:
sheets = excel_sheets.items()
for sheet_name, sheet in sheets:
new_row = {}
for column, instr in redux_dict.items():
try:
raw = sheet.iloc[instr["row"], instr["col"]].values
except AttributeError:
raw = sheet.iloc[instr["row"], instr["col"]]
except ValueError:
raw = None
try:
func = map_with_dict(instr["choices"])
except KeyError:
func = FUNC_DICT[instr["func"]]
try:
new_row[column] = func(raw)
except:
new_row[column] = None
data_frame = data_frame.append(new_row, ignore_index=True)
return data_frame | 88028fef19eda993680e89c58954c04a215a2fdd | 3,654,217 |
def build_LAMP(prob,T,shrink,untied):
"""
Builds a LAMP network to infer x from prob.y_ = matmul(prob.A,x) + AWGN
return a list of layer info (name,xhat_,newvars)
name : description, e.g. 'LISTA T=1'
xhat_ : that which approximates x_ at some point in the algorithm
newvars : a tuple of layer-specific trainable variables
"""
eta,theta_init = shrinkage.get_shrinkage_function(shrink)
print('theta_init='+repr(theta_init))
layers=[]
A = prob.A
M,N = A.shape
B = A.T / (1.01 * la.norm(A,2)**2)
B_ = tf.Variable(B,dtype=tf.float32,name='B_0')
By_ = tf.matmul( B_ , prob.y_ )
layers.append( ('Linear',By_,None) )
if getattr(prob,'iid',True) == False:
# set up individual parameters for every coordinate
theta_init = theta_init*np.ones( (N,1),dtype=np.float32 )
theta_ = tf.Variable(theta_init,dtype=tf.float32,name='theta_0')
OneOverM = tf.constant(float(1)/M,dtype=tf.float32)
NOverM = tf.constant(float(N)/M,dtype=tf.float32)
rvar_ = tf.reduce_sum(tf.square(prob.y_),0) * OneOverM
(xhat_,dxdr_) = eta( By_,rvar_ , theta_ )
layers.append( ('LAMP-{0} T=1'.format(shrink),xhat_,(theta_,) ) )
vt_ = prob.y_
for t in range(1,T):
if len(dxdr_.get_shape())==2:
dxdr_ = tf.reduce_mean(dxdr_,axis=0)
bt_ = dxdr_ * NOverM
vt_ = prob.y_ - tf.matmul( prob.A_ , xhat_ ) + bt_ * vt_
rvar_ = tf.reduce_sum(tf.square(vt_),0) * OneOverM
theta_ = tf.Variable(theta_init,name='theta_'+str(t))
if untied:
B_ = tf.Variable(B,dtype=tf.float32,name='B_'+str(t))
rhat_ = xhat_ + tf.matmul(B_,vt_)
layers.append( ('LAMP-{0} linear T={1}'.format(shrink,t+1),rhat_ ,(B_,) ) )
else:
rhat_ = xhat_ + tf.matmul(B_,vt_)
(xhat_,dxdr_) = eta( rhat_ ,rvar_ , theta_ )
layers.append( ('LAMP-{0} non-linear T={1}'.format(shrink,t+1),xhat_,(theta_,) ) )
return layers | 392050992846aeb1a16e70fe6e43c386e11915e5 | 3,654,218 |
def coeffVar(X, precision=3):
"""
Coefficient of variation of the given data (population)
Argument:
X: data points, a list of int, do not mix negative and positive numbers
precision (optional): digits precision after the comma, default=3
Returns:
float, the cv (measure of dispersion) of the input sample
or raise StatsError('mean is zero') if the mean = 0
"""
try:
return round(stdDev(X, precision) / mean(X, precision), precision)
except ZeroDivisionError:
raise StatsError('mean is zero') | e92505e79c4d10a5d56ec35cd2b543872f6be59c | 3,654,219 |
def tostring(node):
"""
Generates a string representation of the tree, in a format determined by the user.
@ In, node, InputNode or InputTree, item to turn into a string
@ Out, tostring, string, full tree in string form
"""
if isinstance(node,InputNode) or isinstance(node,InputTree):
return node.printXML()
else:
raise NotImplementedError('TreeStructure.tostring received "'+str(node)+'" but was expecting InputNode or InputTree.') | 8e6cab92b898bd99b5c738b26fc9f8d79aef0750 | 3,654,220 |
from typing import Dict
import random
def pick_char_from_dict(char: str, dictionary: Dict[str, str]) -> str:
"""
Picks a random format for the givin letter in the dictionary
"""
return random.choice(dictionary[char]) | c593166ef7cb8c960b8c4be8fa0f8a20ec616f00 | 3,654,221 |
from typing import List
def bmeow_to_bilou(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the BILOU format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_bilou_tags) | 0081b7691a743fe3e28118cbb571708809fbd485 | 3,654,222 |
def site_sold_per_category(items):
"""For every category, a (site, count) pair with the number of items sold by the
site in that category.
"""
return [(site,
[(cat, total_sold(cat_items)) for cat, cat_items in
categories])
for site, categories in
category_items_per_site(items).iteritems()] | 7b224f3e0a786aef497fad99359e525896eb8441 | 3,654,223 |
from typing import List
import ctypes
def swig_py_object_2_list_int(object, size : int) -> List[int]:
"""
Converts SwigPyObject to List[float]
"""
y = (ctypes.c_float * size).from_address(int(object))
new_object = []
for i in range(size):
new_object += [int(y[i])]
return new_object | 064a9a1e43884a9f989bec0b31d6d19705764b64 | 3,654,224 |
def ParseAttributesFromData(attributes_data, expected_param_names):
"""Parses a list of ResourceParameterAttributeConfig from yaml data.
Args:
attributes_data: dict, the attributes data defined in
command_lib/resources.yaml file.
expected_param_names: [str], the names of the API parameters that the API
method accepts. Example, ['projectsId', 'instancesId'].
Returns:
[ResourceParameterAttributeConfig].
Raises:
InvalidResourceArgumentLists: if the attributes defined in the yaml file
don't match the expected fields in the API method.
"""
raw_attributes = [
ResourceParameterAttributeConfig.FromData(a) for a in attributes_data
]
registered_param_names = [a.parameter_name for a in raw_attributes]
final_attributes = []
# TODO(b/78851830): improve the time complexity here.
for expected_name in expected_param_names:
if raw_attributes and expected_name == raw_attributes[0].parameter_name:
# Attribute matches expected, add it and continue checking.
final_attributes.append(raw_attributes.pop(0))
elif expected_name in IGNORED_FIELDS:
# Attribute doesn't match but is being ignored. Add an auto-generated
# attribute as a substitute.
# Currently, it would only be the project config.
attribute_name = IGNORED_FIELDS[expected_name]
ignored_attribute = DEFAULT_RESOURCE_ATTRIBUTE_CONFIGS.get(attribute_name)
# Manually add the parameter name, e.g. project, projectId or projectsId.
ignored_attribute.parameter_name = expected_name
final_attributes.append(ignored_attribute)
else:
# It doesn't match (or there are no more registered params) and the
# field is not being ignored, error.
raise InvalidResourceArgumentLists(expected_param_names,
registered_param_names)
if raw_attributes:
# All expected fields were processed but there are still registered
# attribute params remaining, they must be extra.
raise InvalidResourceArgumentLists(expected_param_names,
registered_param_names)
return final_attributes | 73cfc67dddd4d1385bebd0297bd84233c9546dd4 | 3,654,225 |
from typing import Tuple
from typing import Union
async def reactionFromRaw(payload: RawReactionActionEvent) -> Tuple[Message, Union[User, Member], emojis.BasedEmoji]:
"""Retrieve complete Reaction and user info from a RawReactionActionEvent payload.
:param RawReactionActionEvent payload: Payload describing the reaction action
:return: The message whose reactions changed, the user who completed the action, and the emoji that changed.
:rtype: Tuple[Message, Union[User, Member], BasedEmoji]
"""
emoji = None
user = None
message = None
if payload.member is None:
# Get the channel containing the reacted message
if payload.guild_id is None:
channel = botState.client.get_channel(payload.channel_id)
else:
guild = botState.client.get_guild(payload.guild_id)
if guild is None:
return None, None, None
channel = guild.get_channel(payload.channel_id)
# Individual handling for each channel type for efficiency
if isinstance(channel, DMChannel):
if channel.recipient.id == payload.user_id:
user = channel.recipient
else:
user = channel.me
elif isinstance(channel, GroupChannel):
# Group channels should be small and far between, so iteration is fine here.
for currentUser in channel.recipients:
if currentUser.id == payload.user_id:
user = currentUser
if user is None:
user = channel.me
# Guild text channels
elif isinstance(channel, TextChannel):
user = channel.guild.get_member(payload.user_id)
else:
return None, None, None
# Fetch the reacted message (api call)
message = await channel.fetch_message(payload.message_id)
# If a reacting member was given, the guild can be inferred from the member.
else:
user = payload.member
message = await payload.member.guild.get_channel(payload.channel_id).fetch_message(payload.message_id)
if message is None:
return None, None, None
# Convert reacted emoji to BasedEmoji
try:
emoji = emojis.BasedEmoji.fromPartial(payload.emoji, rejectInvalid=True)
except exceptions.UnrecognisedCustomEmoji:
return None, None, None
return message, user, emoji | 36ae16e2b1ffb3df1d5c68ae903b95556446138f | 3,654,226 |
import array
def poisson2d(N,dtype='d',format=None):
"""
Return a sparse matrix for the 2d poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray( [[4]],dtype=dtype)
return dia_matrix((diags,[0]), shape=(1,1)).asformat(format)
offsets = array([0,-N,N,-1,1])
diags = empty((5,N**2),dtype=dtype)
diags[0] = 4 #main diagonal
diags[1:] = -1 #all offdiagonals
diags[3,N-1::N] = 0 #first lower diagonal
diags[4,N::N] = 0 #first upper diagonal
return dia_matrix((diags,offsets),shape=(N**2,N**2)).asformat(format) | 089088f468e84dce865bbb26707714617e16f3f6 | 3,654,227 |
import random
def get_factory():
"""随机获取一个工厂类"""
return random.choice([BasicCourseFactory, ProjectCourseFactory])() | c71401a2092618701966e5214f85c67a6520b1c9 | 3,654,228 |
def delay_class_factory(motor_class):
"""
Create a subclass of DelayBase that controls a motor of class motor_class.
Used in delay_instace_factory (DelayMotor), may be useful for one-line
declarations inside ophyd Devices.
"""
try:
cls = delay_classes[motor_class]
except KeyError:
cls = type(
'Delay' + motor_class.__name__,
(DelayBase,),
{'motor': Cpt(motor_class, '')}
)
delay_classes[motor_class] = cls
return cls | 264d68f7d3db164c5c133e68f943b789db52fc8b | 3,654,229 |
import os
def check_and_makedir(folder_name):
""" Does a directory exist? if not create it. """
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
return False
else:
return True | 2f2632fc245c04add6a680fa755932d3a082168b | 3,654,230 |
import os
import fnmatch
def _get_all_files_in_directory(dir_path, excluded_glob_patterns):
"""Recursively collects all files in directory and
subdirectories of specified path.
Args:
dir_path: str. Path to the folder to be linted.
excluded_glob_patterns: set(str). Set of all glob patterns
to be excluded.
Returns:
a list of files in directory and subdirectories without excluded files.
"""
files_in_directory = []
for _dir, _, files in os.walk(dir_path):
for file_name in files:
filepath = os.path.relpath(
os.path.join(_dir, file_name), os.getcwd())
if not any([fnmatch.fnmatch(filepath, gp) for gp in
excluded_glob_patterns]):
files_in_directory.append(filepath)
return files_in_directory | 42a7f1220fd54b08b83dc9d89beef0c63c9d5cd0 | 3,654,231 |
def lonlat2px_gt(img, lon, lat, lon_min, lat_min, lon_max, lat_max):
"""
Converts a pair of lon and lat to its corresponding pixel value in an
geotiff image file.
Parameters
----------
img : Image File, e.g. PNG, TIFF
Input image file
lon : float
Longitude
lat : float
Latitude
lon_min, lat_min : float
lower left coordinate of geotiff
lon_max, lat_max : float
upper right coordinate of geotiff
Returns
-------
Row : float
corresponding pixel value
Col : float
corresponding pixel value
"""
w, h = img.size
londiff = lon_max - lon_min
latdiff = lat_max - lat_min
mw = w / londiff
mh = h / latdiff
row = (-lat + lat_max) * mh
col = (lon - lon_min) * mw
return row, col | 39c1aeb63d38fdac383c510913f50f177d274a04 | 3,654,232 |
import torch
def patchwise_contrastive_metric(image_sequence: torch.Tensor,
kpt_sequence: torch.Tensor,
method: str = 'norm',
time_window: int = 3,
patch_size: tuple = (7, 7),
alpha: float = 0.1):
""" Contrasts pixel patches around key-points.
Positive examples are drawn from the same key-point at time-steps in the given time-window.
Negative examples are drawn from other key-points at any time-step
or the same key-point outside of the time-window.
:param image_sequence: Tensor of sequential images in (N, T, C, H, W)
:param kpt_sequence: Tensor of key-point coordinates in (N, T, K, D)
:param method: Method to use:
'mean': Compares the mean patch differences
'norm': Compares the image norm of the patch differences
'vssil': Uses the pixelwise-contrastive feature representations
'tfeat': Uses tfeat encodings to compare the image patches
:param time_window: Window size of positive examples around current the current time-step
E.g. time_window=3 uses t-1 and t+1 as positives for t
At t=0 and t=T, the window size is reduced.
:param patch_size: Size of the patch so extract from the input, around the key-point
If these would extend the image borders, they are moved to within the borders.
TODO: Fix with padding instead ?
:param alpha: Allowance for pos / neg similarity
"""
N, T, C, H, W = image_sequence.shape
assert kpt_sequence.shape[0] == N, "images and kpts dont share batch size dim"
assert kpt_sequence.shape[1] == T, "images and kpts dont share time dim"
_, _, K, D = kpt_sequence.shape
# To reduce the computational effort, the extracted patches are saved and re-used by demand
patch_sequence = torch.empty(size=(N, T, K, C, patch_size[0], patch_size[1]))
evaluated_kpts = []
L = torch.empty(size=(N, T, K)).to(kpt_sequence.device)
# Iterate over time-steps
for t in range(T):
# Iterate over key-points
for k in range(K):
#
# ANCHOR
#
if (t, k) in evaluated_kpts:
anchor_patch = patch_sequence[:, t, k, ...].float()
else:
x_min, x_max, y_min, y_max = get_box_within_image_border(kpt_sequence, patch_size, H, W, t, k)
anchor_patch = image_sequence[:, t, :, x_min: x_max + 1, y_min: y_max + 1].float()
patch_sequence[:, t, k, ...] = anchor_patch
evaluated_kpts.append((t, k))
#
# POSITIVES
#
L_pos = torch.tensor([0]).to(kpt_sequence.device)
t_range = np.arange(max(0, t - int(time_window/2)), min(T - 1, t + int(time_window/2)) + 1)
# t_range = np.arange(0, T)
for t_p in t_range:
if t_p == t:
continue
if (t_p, k) in evaluated_kpts:
positive_patch = patch_sequence[:, t_p, k, ...].float()
else:
x_min, x_max, y_min, y_max = get_box_within_image_border(kpt_sequence, patch_size, H, W, t_p, k)
positive_patch = image_sequence[:, t_p, :, x_min: x_max + 1, y_min: y_max + 1].float()
patch_sequence[:, t_p, k, ...] = positive_patch
evaluated_kpts.append((t_p, k))
L_pos = L_pos + torch.norm(positive_patch - anchor_patch, p=2)
L_pos = L_pos + torch.norm(kpt_sequence[:, t, k, :] - kpt_sequence[:, t_p, k, :], p=2)
L_pos = (L_pos / (len(t_range) - 1)) if len(t_range) > 2 else L_pos
#
# NEGATIVES
#
L_neg = torch.tensor([0]).to(kpt_sequence.device)
# for t_n in range(0, T):
for t_n in t_range:
for k_n in range(0, K):
if (t_n in t_range or t_n == t) and k_n == k:
continue
else:
if (t_n, k_n) in evaluated_kpts:
negative_patch = patch_sequence[:, t_n, k_n].float()
else:
x_min, x_max, y_min, y_max = get_box_within_image_border(kpt_sequence, patch_size, H, W,
t_n, k_n)
negative_patch = image_sequence[:, t_n, :, x_min:x_max + 1, y_min:y_max + 1].float()
patch_sequence[:, t_n, k_n, ...] = negative_patch
evaluated_kpts.append((t_n, k_n))
L_neg = L_neg + torch.norm(negative_patch - anchor_patch, p=2)
L_neg = L_neg + torch.norm(kpt_sequence[:, t, k, :] - kpt_sequence[:, t_n, k_n, :], p=2)
L_neg = L_neg / (T*(K - 1) + T - len(t_range) + 1)
print(f't: {t} k: {k} = ', max(L_pos - L_neg + alpha, torch.tensor([0.0])).mean().item())
L[:, t, k] = max(L_pos - L_neg + alpha, torch.tensor([0.0]))
return torch.mean(L, dim=[0, 2]) | 8591d9359773a9b0445974da3926b3cade64d830 | 3,654,233 |
import scipy
def array_wishart_rvs(df, scale, **kwargs):
""" Wrapper around scipy.stats.wishart to always return a np.array """
if np.size(scale) == 1:
return np.array([[
scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs()
]])
else:
return scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs() | d14b26d8f1b05de1ac961499d96c604028fca379 | 3,654,234 |
def get_mpl_colors():
"""
==================
Colormap reference
==================
Reference for colormaps included with Matplotlib.
This reference example shows all colormaps included with Matplotlib. Note that
any colormap listed here can be reversed by appending "_r" (e.g., "pink_r").
These colormaps are divided into the following categories:
Sequential:
These colormaps are approximately monochromatic colormaps varying smoothly
between two color tones---usually from low saturation (e.g. white) to high
saturation (e.g. a bright blue). Sequential colormaps are ideal for
representing most scientific data since they show a clear progression from
low-to-high values.
Diverging:
These colormaps have a median value (usually light in color) and vary
smoothly to two different color tones at high and low values. Diverging
colormaps are ideal when your data has a median value that is significant
(e.g. 0, such that positive and negative values are represented by
different colors of the colormap).
Qualitative:
These colormaps vary rapidly in color. Qualitative colormaps are useful for
choosing a set of discrete colors. For example::
color_list = plt.cm.Set3(np.linspace(0, 1, 12))
gives a list of RGB colors that are good for plotting a series of lines on
a dark background.
Miscellaneous:
Colormaps that don't fit into the categories above.
"""
# Have colormaps separated into categories:
# http://matplotlib.org/examples/color/colormaps_reference.html
return [('Perceptually Uniform Sequential', [
'viridis', 'plasma', 'inferno', 'magma']),
('Sequential', [
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),
('Sequential (2)', [
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper']),
('Diverging', [
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
('Qualitative', [
'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']),
('Miscellaneous', [
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])] | 5926f878b59f3f41282968c67020f611ad928f28 | 3,654,235 |
async def async_setup_entry(hass, entry):
"""Set up Jenkins from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True | c46912d11630c36effc07eed3273e42325c9b2b8 | 3,654,236 |
import math
def signal_to_dataset(raw, fsamp, intvs, labels):
"""Segmentize raw data into list of epochs.
returns dataset and label_array : a list of data, each block is 1
second, with fixed size. width is number of channels in certain standard
order.
Args:
raw: EEG signals. Shape: (n_channel, n_sample).
fsamp(int): sampling rate, i.e., window size of resulting epoch. Unit: Hz
intvs: list of [start, end]. Unit: second
labels: list of labels. Must be same len as INTVS
Returns: tuple (dataset, labels):
- dataset: list of data; (n_epochs, n_channels, n_sample_per_epoch)
- labels: list of labels
"""
ds, lbl = [], []
for i, inv in enumerate(intvs):
tstart, tend = inv
chopped_sig = chop_signal(
[ch[math.ceil(tstart*fsamp):math.floor(tend*fsamp)] for ch in raw],
fsamp)
ds.extend(chopped_sig)
lbl.extend([labels[i]] * len(chopped_sig))
return ds, lbl | 340bbb91bd6a36d1d3a20d0689e25c29e5b879c5 | 3,654,237 |
def project_dynamic_property_graph(graph, v_prop, e_prop, v_prop_type, e_prop_type):
"""Create project graph operation for nx graph.
Args:
graph (:class:`nx.Graph`): A nx graph.
v_prop (str): The node attribute key to project.
e_prop (str): The edge attribute key to project.
v_prop_type (str): Type of the node attribute.
e_prop_type (str): Type of the edge attribute.
Returns:
Operation to project a dynamic property graph. Results in a simple graph.
"""
check_argument(graph.graph_type == types_pb2.DYNAMIC_PROPERTY)
config = {
types_pb2.GRAPH_NAME: utils.s_to_attr(graph.key),
types_pb2.GRAPH_TYPE: utils.graph_type_to_attr(types_pb2.DYNAMIC_PROJECTED),
types_pb2.V_PROP_KEY: utils.s_to_attr(v_prop),
types_pb2.E_PROP_KEY: utils.s_to_attr(e_prop),
types_pb2.V_DATA_TYPE: utils.s_to_attr(utils.data_type_to_cpp(v_prop_type)),
types_pb2.E_DATA_TYPE: utils.s_to_attr(utils.data_type_to_cpp(e_prop_type)),
}
op = Operation(
graph._session_id,
types_pb2.PROJECT_GRAPH,
config=config,
output_types=types_pb2.GRAPH,
)
return op | 6e54180a4ef257c50a02104cc3a4cbbae107d233 | 3,654,238 |
def eqfm_(a, b):
"""Helper for comparing floats AND style names."""
n1, v1 = a
n2, v2 = b
if type(v1) is not float:
return eq_(a, b)
eqf_(v1, v2)
eq_(n1, n2) | 1ee53203baa6c8772a4baf240f68bb5898a5d516 | 3,654,239 |
def flatten_comment(seq):
"""Flatten a sequence of comment tokens to a human-readable string."""
# "[CommentToken(value='# Extra settings placed in ``[app:main]`` section in generated production.ini.\\n'), CommentToken(value='# Example:\\n'), CommentToken(value='#\\n'), CommentToken(value='# extra_ini_settings: |\\n'), CommentToken(value='# mail.host = mymailserver.internal\\n'), CommentToken(value='# websauna.superusers =\\n'), CommentToken(value='# [email protected]\\n'), CommentToken(value='#\\n')]
if not seq:
return ""
result = []
for item in seq:
if not item:
continue
if isinstance(item, CommentToken):
# Mangle away # comment start from the line
s = item.value
s = s.strip(" ")
s = s.lstrip("#")
s = s.rstrip("\n")
if s.startswith(" "):
s = s[1:]
result.append(s)
if result:
raw_comment = "\n".join(result)
else:
return ""
section_header = raw_comment.rfind("---")
if section_header >= 0:
raw_comment = raw_comment[section_header + 3:]
return raw_comment | 56104eb6e0109b6c677964cd1873244ff05f27fc | 3,654,240 |
def get_community(community_id):
"""
Verify that a community with a given id exists.
:param community_id: id of test community
:return: Community instance
:return: 404 error if doesn't exist
"""
try:
return Community.objects.get(pk=community_id)
except Community.DoesNotExist:
return | 33d16db86c53b7dd68dec8fe80639b560e41f457 | 3,654,241 |
import csv
import pprint
def load_labeled_info(csv4megan_excell, audio_dataset, ignore_files=None):
"""Read labeled info from spreat sheet
and remove samples with no audio file, also files given in ignore_files
"""
if ignore_files is None:
ignore_files = set()
with open(csv4megan_excell) as csvfile:
reader = csv.DictReader(csvfile)
reader = list(reader)
reader_strip = []
for row in reader:
row = {r: row[r].strip() for r in row}
reader_strip.append(row)
reader = reader_strip.copy()
missing_audio_files = []
for row in reader:
if audio_dataset.get(row['File Name'], None) is None:
missing_audio_files.append(row['File Name'])
missing_audio_files = set(missing_audio_files)
print((f'{len(missing_audio_files)} files are missing' +
' corresponding to excell entries'))
megan_data_sheet = []
for row in reader:
if row['File Name'] not in ignore_files:
if row['File Name'] not in missing_audio_files:
megan_data_sheet.append(row)
deleted_files = set()
deleted_files.update(ignore_files)
deleted_files.update(missing_audio_files)
pprint((f'-> {len(deleted_files)} number of samples are DELETED due to ' +
'ignore_files and missing_audio_files'))
return megan_data_sheet, list(deleted_files) | f196b02c8667ebe5e8d2d89a79be78c6eb838afe | 3,654,242 |
def de_dupe_list(input):
"""de-dupe a list, preserving order.
"""
sam_fh = []
for x in input:
if x not in sam_fh:
sam_fh.append(x)
return sam_fh | bbf1936f21c19195369e41b635bf0f99704b3210 | 3,654,243 |
def donwload_l10ns():
"""Download all l10ns in zip archive."""
url = API_PREFIX + 'download/' + FILENAME + KEY_SUFFIX
l10ns_file = urllib2.urlopen(url)
with open('all.zip','wb') as f:
f.write(l10ns_file.read())
return True | 26770dfc8f32947c1a32a287f811e95ffe314822 | 3,654,244 |
def _constant_velocity_heading_from_kinematics(kinematics_data: KinematicsData,
sec_from_now: float,
sampled_at: int) -> np.ndarray:
"""
Computes a constant velocity baseline for given kinematics data, time window
and frequency.
:param kinematics_data: KinematicsData for agent.
:param sec_from_now: How many future seconds to use.
:param sampled_at: Number of predictions to make per second.
"""
x, y, vx, vy, _, _, _, _, _, _ = kinematics_data
preds = []
time_step = 1.0 / sampled_at
for time in np.arange(time_step, sec_from_now + time_step, time_step):
preds.append((x + time * vx, y + time * vy))
return np.array(preds) | 2b6781ceb9e012486d3063b8f3cff29164ff8743 | 3,654,245 |
def arg_int(name, default=None):
""" Fetch a query argument, as an integer. """
try:
v = request.args.get(name)
return int(v)
except (ValueError, TypeError):
return default | 110088655bc81363e552f31d9bbd8f4fa45abd1b | 3,654,246 |
import os
def db(app, request):
"""Session-wide test database."""
if os.path.exists(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite')):
os.unlink(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite'))
def teardown():
_db.drop_all()
os.unlink(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite'))
_db.app = app
apply_migrations(app)
request.addfinalizer(teardown)
return _db | c14b929a9fac6978a7dcb5b0815297598c8a94e1 | 3,654,247 |
def adapter_rest(request, api_module_rest, api_client_rest):
"""Pass."""
return {
"adapter": request.param,
"api_module": api_module_rest,
"api_client": api_client_rest,
} | 8b96313cb190f6f8a97a853e24a5fcfade291d76 | 3,654,248 |
import numpy
import os
import shutil
def extract(lon, lat, dep, prop=['rho', 'vp', 'vs'], **kwargs):
"""
Simple CVM-S extraction
lon, lat, dep: Coordinate arrays
prop: 'rho', 'vp', or 'vs'
nproc: Optional, number of processes
Returns: (rho, vp, vs) material arrays
"""
lon = numpy.asarray(lon, 'f')
lat = numpy.asarray(lat, 'f')
dep = numpy.asarray(dep, 'f')
shape = dep.shape
nsample = dep.size
cwd = os.getcwd()
if os.path.exists('cvms-tmp'):
shutil.rmtree('cvms-tmp')
os.mkdir('cvms-tmp')
os.chdir('cvms-tmp')
cfg = configure(**kwargs)
lon.tofile(cfg['file_lon'])
lat.tofile(cfg['file_lat'])
dep.tofile(cfg['file_dep'])
del(lon, lat, dep)
run(nsample=nsample, **kwargs)
out = []
if type(prop) not in [list, tuple]:
prop = [prop]
for v in prop:
f = cfg['file_' + v.lower()]
out += [numpy.fromfile(f, 'f').reshape(shape)]
os.chdir(cwd)
shutil.rmtree('cvms-tmp')
return out | 1503faebece8accb380ad47c0e7108a3313a2080 | 3,654,249 |
def remove_quotes(string):
"""Function to remove quotation marks surrounding a string"""
string = string.strip()
while len(string) >= 3 and string.startswith('\'') and string.endswith('\''):
string = string[1:-1]
string = quick_clean(string)
string = quick_clean(string)
return string | c6585c054abaef7248d30c1814fb13b6b9d01852 | 3,654,250 |
def compute_list_featuretypes(
data,
list_featuretypes,
fourier_n_largest_frequencies,
wavelet_depth,
mother_wavelet,
):
"""
This function lets the user choose which combination of features they
want to have computed.
list_featuretypes:
"Basic" - min, max, mean, kurt ,skew, std, sum.
"FourierComplete" - all frequencies amplitudes and phases.
"FourierNLargest" - n largest frequencies and their values.
"WaveletComplete" - all approximation and details coefficients at each depth.
"WaveletBasic" - takes "Basic" (min, max, etc) at each depth.
Args:
data (pd.DataFrame()) : one column from which to make features.
list_featuretypes (list) : list of feature types to be computed.
fourier_n_largest_frequencies (int) : amount of fourier features.
wavelet_depth (int) : level of depth up to which the wavelet is computed.
mother_wavelet (str) : type of wavelet used for the analysis.
Returns:
features (pd.DataFrame()) : row of features.
"""
if type(list_featuretypes) != list:
raise AttributeError("'list_featuretypes' must be a list.")
allowed_components = ["Basic", "FourierNLargest", "WaveletComplete", "WaveletBasic", "FourierComplete"]
for argument in list_featuretypes:
if argument not in allowed_components:
raise ValueError(f"argument must be one of {allowed_components}")
features_basic = pd.DataFrame()
features_fourier = pd.DataFrame()
features_wavelet = pd.DataFrame()
features_wavelet_basic = pd.DataFrame()
features_fft2 = pd.DataFrame()
if "Basic" in list_featuretypes:
features_basic = compute_basic(data)
if "FourierNLargest" in list_featuretypes:
features_fourier = compute_fourier_n_largest(data, fourier_n_largest_frequencies)
if "FourierComplete" in list_featuretypes:
features_fft2 = compute_fourier_complete(data)
if "WaveletComplete" in list_featuretypes:
features_wavelet = compute_wavelet_complete(data, wavelet_depth, mother_wavelet)
if "WaveletBasic" in list_featuretypes:
features_wavelet_basic = compute_wavelet_basic(
data, wavelet_depth, mother_wavelet
)
features = pd.concat(
[features_basic, features_fourier, features_fft2, features_wavelet, features_wavelet_basic],
axis=1,
)
return features | f1c8fea04a01f6b7a3932434e27aba7ea2e17948 | 3,654,251 |
def select(locator):
"""
Returns an :class:`Expression` for finding selects matching the given locator.
The query will match selects that meet at least one of the following criteria:
* the element ``id`` exactly matches the locator
* the element ``name`` exactly matches the locator
* the element ``id`` exactly matches the ``for`` attribute of a corresponding ``label`` element
whose text matches the locator
* the element is nested within a ``label`` element whose text matches the locator
Args:
locator (str): A string that identifies the desired selects.
Returns:
Expression: An :class:`Expression` object matching the desired selects.
"""
field_expr = x.descendant("select")
return _locate_field(field_expr, locator) | a3cd093a62d6c926fd9f782cdec35eadc34eba67 | 3,654,252 |
def send_image(filename):
"""Route to uploaded-by-client images
Returns
-------
file
Image file on the server (see Flask documentation)
"""
return send_from_directory(app.config['UPLOAD_FOLDER'], filename) | 68b99ca59d6d4b443a77560d3eb1913422407764 | 3,654,253 |
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
with get_cursor() as cursor:
cursor.execute("SELECT * FROM pairings")
pairings = cursor.fetchall()
return pairings | f83a8a108f2d926c948999014f0dbb79a3b1c428 | 3,654,254 |
import torch
def split(data, batch):
"""
PyG util code to create graph batches
"""
node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
node_slice = torch.cat([torch.tensor([0]), node_slice])
row, _ = data.edge_index
edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
edge_slice = torch.cat([torch.tensor([0]), edge_slice])
# Edge indices should start at zero for every graph.
data.edge_index -= node_slice[batch[row]].unsqueeze(0)
data.__num_nodes__ = torch.bincount(batch).tolist()
slices = {'edge_index': edge_slice}
if data.x is not None:
slices['x'] = node_slice
if data.edge_attr is not None:
slices['edge_attr'] = edge_slice
if data.y is not None:
if data.y.size(0) == batch.size(0):
slices['y'] = node_slice
else:
slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)
return data, slices | 69af8b969d7f0da28a1f7fda951f64974c238da0 | 3,654,255 |
def _get_shadowprice_data(scenario_id):
"""Gets data necessary for plotting shadow price
:param str/int scenario_id: scenario id
:return: (*tuple*) -- interconnect as a str, bus data as a data frame, lmp data
as a data frame, branch data as a data frame and congestion data as a data
frame
"""
s = Scenario(scenario_id)
interconnect = s.info["interconnect"]
interconnect = " ".join(interconnect.split("_"))
s_grid = s.state.get_grid()
# Get bus and add location data
bus_map = project_bus(s_grid.bus)
# get branch and add location data
branch_map = project_branch(s_grid.branch)
# get congestion
congu = s.state.get_congu()
congl = s.state.get_congl()
cong_abs = pd.DataFrame(
np.maximum(congu.to_numpy(), congl.to_numpy()),
columns=congu.columns,
index=congu.index,
)
return interconnect, bus_map, s.state.get_lmp(), branch_map, cong_abs | 57488b7ff6984cc292dce3bf76d18d0b2585b7ff | 3,654,256 |
import json
def get_city_reviews(city):
"""
Given a city name, return the data for all reviews.
Returns a pandas DataFrame.
"""
with open(f"{DATA_DIR}/{city}/review.json", "r") as f:
review_list = []
for line in f:
review = json.loads(line)
review_list.append(review)
# convert to pandas DataFrame
reviews = to_pandas([city], {city: review_list})
# optimize memory usage
reviews = optimize(reviews, {'city': 'category'})
return reviews | e0723ab90dafc53059677928fb553cf197abecc1 | 3,654,257 |
def extract_rows_from_table(dataset, col_names, fill_null=False):
""" Extract rows from DB table.
:param dataset:
:param col_names:
:return:
"""
trans_dataset = transpose_list(dataset)
rows = []
if type(col_names).__name__ == 'str':
col_names = [col_names]
for col_name in col_names:
if col_name in dataset[0]:
idx = dataset[0].index(col_name)
rows.append(trans_dataset[idx])
else:
if fill_null:
null_list = [''] * (len(trans_dataset[0])-1)
null_list = [col_name] + null_list
rows.append(null_list)
else:
pass
if len(col_names) == 1:
return rows[0]
else:
return transpose_list(rows) | 91371215f38a88b93d08c467303ccbd45f57b369 | 3,654,258 |
def CalculateHydrogenNumber(mol):
"""
#################################################################
Calculation of Number of Hydrogen in a molecule
---->nhyd
Usage:
result=CalculateHydrogenNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
Hmol = Chem.AddHs(mol)
for atom in Hmol.GetAtoms():
if atom.GetAtomicNum() == 1:
i = i + 1
return i | 0b9fbad14c8e9f46beab5208ab0f929fef1ab263 | 3,654,259 |
def check_update ():
"""Return the following values:
(False, errmsg) - online version could not be determined
(True, None) - user has newest version
(True, (version, url string)) - update available
(True, (version, None)) - current version is newer than online version
"""
version, value = get_online_version()
if version is None:
# value is an error message
return False, value
if version == CurrentVersion:
# user has newest version
return True, None
if is_newer_version(version):
# value is an URL linking to the update package
return True, (version, value)
# user is running a local or development version
return True, (version, None) | 8bba3e7fbe11ce6c242f965450628dc94b6c2c0b | 3,654,260 |
import torch
def count_regularization_baos_for_both(z, count_tokens, count_pieces, mask=None):
"""
Compute regularization loss, based on a given rationale sequence
Use Yujia's formulation
Inputs:
z -- torch variable, "binary" rationale, (batch_size, sequence_length)
percentage -- the percentage of words to keep
Outputs:
a loss value that contains two parts:
continuity_loss -- \sum_{i} | z_{i-1} - z_{i} |
sparsity_loss -- |mean(z_{i}) - percent|
"""
# (batch_size,)
if mask is not None:
mask_z = z * mask
seq_lengths = torch.sum(mask, dim=1)
else:
mask_z = z
seq_lengths = torch.sum(z - z + 1.0, dim=1)
mask_z_ = torch.cat([mask_z[:, 1:], mask_z[:, -1:]], dim=-1)
continuity_ratio = torch.sum(torch.abs(mask_z - mask_z_), dim=-1) / seq_lengths #(batch_size,)
percentage = count_pieces * 2 / seq_lengths
# continuity_loss = F.threshold(continuity_ratio - percentage, 0, 0, False)
continuity_loss = torch.abs(continuity_ratio - percentage)
sparsity_ratio = torch.sum(mask_z, dim=-1) / seq_lengths #(batch_size,)
percentage = count_tokens / seq_lengths #(batch_size,)
# sparsity_loss = F.threshold(sparsity_ratio - percentage, 0, 0, False)
sparsity_loss = torch.abs(sparsity_ratio - percentage)
return continuity_loss, sparsity_loss | 7925c8621866a20f0c6130cd925afffe144e1c7c | 3,654,261 |
def unsqueeze_samples(x, n):
"""
"""
bn, d = x.shape
x = x.reshape(bn//n, n, d)
return x | 0c7b95e97df07aea72e9c87996782081763664cf | 3,654,262 |
def f_snr(seq):
"""compute signal to noise rate of a seq
Args:
seq: input array_like sequence
paras: paras array, in this case should be "axis"
"""
seq = np.array(seq, dtype=np.float64)
result = np.mean(seq)/float(np.std(seq))
if np.isinf(result):
print "marker"
result = 0
return result | b018b5e4c249cfafcc3ce8b485c917bfcdd19ce2 | 3,654,263 |
def _lorentzian_pink_beam(p, x):
"""
@author Saransh Singh, Lawrence Livermore National Lab
@date 03/22/2021 SS 1.0 original
@details the lorentzian component of the pink beam peak profile
obtained by convolution of gaussian with normalized back to back
exponentials. more details can be found in
Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6
p has the following parameters
p = [A,x0,alpha0,alpha1,beta0,beta1,fwhm_l]
"""
A,x0,alpha,beta,fwhm_l = p
del_tth = x - x0
p = -alpha*del_tth + 1j*0.5*alpha*fwhm_l
q = -beta*del_tth + 1j*0.5*beta*fwhm_l
y = np.zeros(x.shape)
f1 = exp1exp(p)
f2 = exp1exp(q)
y = -(alpha*beta)/(np.pi*(alpha+beta))*(f1+f2).imag
mask = np.isnan(y)
y[mask] = 0.
y *= A
return y | 7de93743da63ab816133e771075a8e8f0386ad35 | 3,654,264 |
def get_q_HPU_ave(Q_HPU):
"""1時間平均のヒートポンプユニットの平均暖房出力 (7)
Args:
Q_HPU(ndarray): 1時間当たりのヒートポンプユニットの暖房出力 (MJ/h)
Returns:
ndarray: 1時間平均のヒートポンプユニットの平均暖房出力 (7)
"""
return Q_HPU * 10 ** 6 / 3600 | fdf339d7f8524f69409711d4daefd1e2aaccbc76 | 3,654,265 |
def particles(t1cat):
"""Return a list of the particles in a T1 catalog DataFrame.
Use it to find the individual particles involved in a group of events."""
return particles_fromlist(t1cat.particles.tolist()) | 38f9a077b7bab55b76a19f467f596ddb28e40c60 | 3,654,266 |
def interp_coeff_lambda3(i2,dx2,nx):
"""
NOTE:
input and output index from 0 to nx-1 !!!
"""
i2=i2+1 # TODO, waiting for script to be updated
# Find index of other cells
i1 = i2 - 1
i3 = i2 + 1
i4 = i2 + 2
# Find normalised distance to other cells
dx1 = dx2 + 1.0
dx3 = 1.0 - dx2
dx4 = 2.0 - dx2
# lambda 3 kernel
ax1 = 1.0 / 6.0 * (1.0 - dx1) * (2.0 - dx1) * (3.0 - dx1)
ax2 = 1.0 / 2.0 * (1 - dx2 ** 2) * (2 - dx2)
ax3 = 1.0 / 2.0 * (1 - dx3 ** 2) * (2 - dx3)
ax4 = 1.0 / 6.0 * (1.0 - dx4) * (2.0 - dx4) * (3.0 - dx4)
if i2==nx-1:
i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1
ax1,ax2,ax3,ax4 = 0.,0.,0.,0.
elif i2 == 1:
i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1
ax1,ax2,ax3,ax4 = 0.,0.,0.,0.
elif i2 < 1:
# Should not happen
i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1
ax1,ax2,ax3,ax4 = 0.,0.,0.,0.
elif (i2 > nx - 1):
# Should not happen
i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1
ax1,ax2,ax3,ax4 = 0.,0.,0.,0.
elif i1 <= 0 or i2 <= 0:
# Might happen if on grid
i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1
ax1,ax2,ax3,ax4 = 0.,0.,0.,0.
elif i4 > nx or i3 > nx:
# Might happen if on grid
i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1
ax1,ax2,ax3,ax4 = 0.,0.,0.,0.
return ax1,ax2,ax3,ax4,i1-1,i2-1,i3-1,i4-1 | 560125921588e6302da8ab16e2d7394169fdcbea | 3,654,267 |
def prime_list(num):
"""
This function returns a list of prime numbers less than natural number entered.
:param num: natural number
:return result: List of primes less than natural number entered
"""
prime_table = [True for _ in range(num+1)]
i = 2
while i ** 2 <= num:
if prime_table[i]:
j = i + i
while j <= num:
prime_table[j] = False
j += i
i += 1
result = [i for i in range(num) if prime_table[i] and i >= 2]
return result | c8e05aae2a59c229cfafb997469dd8ccacdda0fc | 3,654,268 |
import time
def check_deadline_exceeded_and_store_partial_minimized_testcase(
deadline, testcase_id, job_type, input_directory, file_list,
file_to_run_data, main_file_path):
"""Store the partially minimized test and check the deadline."""
testcase = data_handler.get_testcase_by_id(testcase_id)
store_minimized_testcase(testcase, input_directory, file_list,
file_to_run_data, main_file_path)
deadline_exceeded = time.time() > deadline
if deadline_exceeded:
attempts = testcase.get_metadata(
'minimization_deadline_exceeded_attempts', default=0)
if attempts >= MAX_DEADLINE_EXCEEDED_ATTEMPTS:
_skip_minimization(testcase,
'Exceeded minimization deadline too many times.')
else:
testcase.set_metadata('minimization_deadline_exceeded_attempts',
attempts + 1)
tasks.add_task('minimize', testcase_id, job_type)
return deadline_exceeded | 443c09a8b5bcd8141f721b8ea90348879bc3b8c5 | 3,654,269 |
import ipaddress
def _item_to_python_repr(item, definitions):
"""Converts the given Capirca item into a typed Python object."""
# Capirca comments are just appended to item strings
s = item.split("#")[0].strip()
# A reference to another network
if s in definitions.networks:
return s
# IPv4 address / network
try:
return ipaddress.IPv4Address(s)
except ValueError:
pass
try:
return ipaddress.IPv4Network(s, strict=False)
except ValueError:
pass
# IPv6 address / network
try:
return ipaddress.IPv6Address(s)
except ValueError:
pass
try:
return ipaddress.IPv6Network(s, strict=False)
except ValueError:
pass
raise ValueError("Unknown how to convert {s}".format(s=s)) | 9881e304e923eb2cea8223224273f4c9ef81696b | 3,654,270 |
import numpy
def floor_divide(x1, x2, out=None, where=True, **kwargs):
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Args:
x1 (numpoly.ndpoly):
Numerator.
x2 (numpoly.ndpoly):
Denominator. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which becomes the shape of the
output).
out (Optional[numpy.ndarray]):
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where (Optional[numpy.ndarray]):
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
(numpoly.ndpoly):
This is a scalar if both `x1` and `x2` are scalars.
Examples:
>>> xyz = [1, 2, 4]*numpoly.symbols("x y z")
>>> numpoly.floor_divide(xyz, 2.)
polynomial([0.0, y, 2.0*z])
>>> numpoly.floor_divide(xyz, [1, 2, 4])
polynomial([x, y, z])
>>> numpoly.floor_divide([1, 2, 4], xyz)
Traceback (most recent call last):
...
ValueError: only constant polynomials can be converted to array.
"""
x1, x2 = numpoly.align_polynomials(x1, x2)
x2 = x2.tonumpy()
no_output = out is None
if no_output:
out = numpoly.ndpoly(
exponents=x1.exponents,
shape=x1.shape,
names=x1.indeterminants,
dtype=numpy.common_type(x1, numpy.array(1.)),
)
for key in x1.keys:
numpy.floor_divide(x1[key], x2, out=out[key], where=where, **kwargs)
if no_output:
out = numpoly.clean_attributes(out)
return out | 9269d088c0893b9b6b4c3b27e8dc83c4493ac2c9 | 3,654,271 |
from typing import Callable
import click
def node_args_argument(command: Callable[..., None]) -> Callable[..., None]:
"""
Decorate a function to allow choosing arguments to run on a node.
"""
function = click.argument(
'node_args',
type=str,
nargs=-1,
required=True,
)(command) # type: Callable[..., None]
return function | 89365a41b7665cf291f5c15852db81e89aeef9a7 | 3,654,272 |
import functools
import unittest
def _tag_error(func):
"""Decorates a unittest test function to add failure information to the TestCase."""
@functools.wraps(func)
def decorator(self, *args, **kwargs):
"""Add failure information to `self` when `func` raises an exception."""
self.test_failed = False
try:
func(self, *args, **kwargs)
except unittest.SkipTest:
raise
except Exception: # pylint: disable=broad-except
self.test_failed = True
raise # re-raise the error with the original traceback.
return decorator | a2818c63647410abea3fde0b7f4fdae667b558bf | 3,654,273 |
import fnmatch
import sys
import traceback
import logging
import os
def create_drizzle_products(total_obj_list, custom_limits=None):
"""
Run astrodrizzle to produce products specified in the total_obj_list.
Parameters
----------
total_obj_list: list
List of TotalProduct objects, one object per instrument/detector combination is
a visit. The TotalProduct objects are comprised of FilterProduct and ExposureProduct
objects.
custom_limits : list, optional
4-element list containing the mosaic bounding rectangle X min and max and Y min and max values for
custom mosaics
RETURNS
-------
product_list: list
A list of output products
"""
# Get rules files
rules_files = {}
log.info("Processing with astrodrizzle version {}".format(drizzlepac.astrodrizzle.__version__))
# Generate list of all input exposure filenames that are to be processed
edp_names = []
for t in total_obj_list:
edp_names += [e.full_filename for e in t.edp_list]
# Define dataset-specific rules filenames for each input exposure
for imgname in edp_names:
rules_files[imgname] = proc_utils.get_rules_file(imgname)
print('Generated RULES_FILE names of: \n{}\n'.format(rules_files))
# Keep track of all the products created for the output manifest
product_list = []
# For each detector (as the total detection product are instrument- and detector-specific),
# create the drizzle-combined filtered image, the drizzled exposure (aka single) images,
# and finally the drizzle-combined total detection image.
for filt_obj in total_obj_list:
filt_obj.rules_file = rules_files[filt_obj.edp_list[0].full_filename]
log.info("~" * 118)
# Get the common WCS for all images which are part of a total detection product,
# where the total detection product is detector-dependent.
meta_wcs = filt_obj.generate_metawcs(custom_limits=custom_limits)
log.info("CREATE DRIZZLE-COMBINED FILTER IMAGE: {}\n".format(filt_obj.drizzle_filename))
filt_obj.wcs_drizzle_product(meta_wcs)
product_list.append(filt_obj.drizzle_filename)
product_list.append(filt_obj.trl_filename)
# Add individual single input images with updated WCS headers to manifest
for exposure_obj in filt_obj.edp_list:
product_list.append(exposure_obj.full_filename)
# Create Drizzled images for each input on SkyCell pixels
exposure_obj.wcs_drizzle_product(meta_wcs)
# Add drizzled FLC images to manifest
product_list.append(exposure_obj.drizzle_filename)
product_list.append(exposure_obj.trl_filename)
# Ensure that all drizzled products have headers that are to specification
try:
log.info("Updating these drizzle products for CAOM compatibility:")
fits_files = fnmatch.filter(product_list, "*dr?.fits")
for filename in fits_files:
log.info(" {}".format(filename))
proc_utils.refine_product_headers(filename, total_obj_list)
except Exception:
log.critical("Trouble updating drizzle products for CAOM.")
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
logging.exception("message")
# Remove rules files copied to the current working directory
for rules_filename in list(rules_files.values()):
log.info("Removed rules file {}".format(rules_filename))
os.remove(rules_filename)
# Add primary header information to all objects
for filt_obj in total_obj_list:
filt_obj = poller_utils.add_primary_fits_header_as_attr(filt_obj)
# Return product list for creation of pipeline manifest file
return product_list | 838ad7e0f3590e3dab4f7383f33f9c5d5c55e6d7 | 3,654,274 |
from datetime import datetime
def get_submission_praw(n, sub, n_num):
"""
Returns a list of results for submission in past:
1st list: current result from n hours ago until now
2nd list: prev result from 2n hours ago until n hours ago
"""
mid_interval = datetime.today() - timedelta(hours=n)
timestamp_mid = int(mid_interval.timestamp())
timestamp_start = int((mid_interval - timedelta(hours=n)).timestamp())
timestamp_end = int(datetime.today().timestamp())
recent = {}
prev = {}
subreddit = reddit.subreddit(sub)
all_results = []
for post in subreddit.new(limit=n_num):
all_results.append([post.title, post.link_flair_text, post.selftext, post.score, post.num_comments,
post.created_utc])
# start --> mid --> end
recent[sub] = [posts for posts in all_results if timestamp_mid <= posts[5] <= timestamp_end]
prev[sub] = [posts for posts in all_results if timestamp_start <= posts[5] < timestamp_mid]
return recent, prev | 692af49736fac07a2de51d1cd0c4abcfe7bb8ee3 | 3,654,275 |
import scipy
def memory_kernel_logspace(dt, coeffs, dim_x, noDirac=False):
"""
Return the value of the estimated memory kernel
Parameters
----------
dt: Timestep
coeffs : Coefficients for diffusion and friction
dim_x: Dimension of visible variables
noDirac: Remove the dirac at time zero
Returns
-------
timespan : array-like, shape (n_samples, )
Array of time to evaluate memory kernel
kernel_evaluated : array-like, shape (n_samples, dim_x,dim_x)
Array of values of the kernel at time provided
"""
Avv = coeffs["A"][:dim_x, :dim_x]
Ahv = coeffs["A"][dim_x:, :dim_x]
Avh = coeffs["A"][:dim_x, dim_x:]
Ahh = coeffs["A"][dim_x:, dim_x:]
eigs = np.linalg.eigvals(Ahh)
Kernel = np.zeros((150, dim_x, dim_x))
final_time = 25 / np.min(np.abs(np.real(eigs)))
times = np.logspace(np.log10(dt), np.log10(final_time), num=150)
for n, t in enumerate(times):
Kernel[n, :, :] = -np.matmul(Avh, np.matmul(scipy.linalg.expm(-1 * t * Ahh), Ahv))
if not noDirac:
Kernel[0, :, :] = Kernel[0, :, :] + Avv
return times, Kernel | 21e6aed08bebd91f359efa216ab1331cf9ace310 | 3,654,276 |
def _is_constant(x, atol=1e-7, positive=None):
"""
True if x is a constant array, within atol
"""
x = np.asarray(x)
return (np.max(np.abs(x - x[0])) < atol and
(np.all((x > 0) == positive) if positive is not None else True)) | 0b272dd843adbd4eaa4ebbe31efe6420de05a6dd | 3,654,277 |
def estimate_M(X, estimator, B, ratio):
"""Estimating M with Block or incomplete U-statistics estimator
:param B: Block size
:param ratio: size of incomplete U-statistics estimator
"""
p = X.shape[1]
x_bw = util.meddistance(X, subsample = 1000)**2
kx = kernel.KGauss(x_bw)
if estimator == 'inc':
hsic_M = hsic.HSIC_Inc(kx, kx, ratio = ratio)
else: # 'block'
hsic_M = hsic.HSIC_Block(kx, kx, bsize = B)
M_true = np.zeros((p, p))
for i in range(p):
for j in range(i+1):
M_true[i, j] = np.mean(hsic_M.estimates(X[:, i, np.newaxis], X[:, j, np.newaxis]))
M_true[j, i] = M_true[i, j]
M = nearestPD(M_true) # positive definite approximation
return M_true, M | 656b83eac9e522b1feb20a4b5b56649b9553ecb0 | 3,654,278 |
def query_yes_no(question, default="yes"):
"""Queries user for confimration"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
console.print(question + escape(prompt))
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
console.print("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 58e9bba831155ca9f4d4879a5e960949757b0562 | 3,654,279 |
import base64
import binascii
def decode(password, encoded, notice):
"""
:type password: str
:type encoded: str
"""
dec = []
try:
encoded_bytes = base64.urlsafe_b64decode(encoded.encode()).decode()
except binascii.Error:
notice("Invalid input '{}'".format(encoded))
return
for i in range(len(encoded_bytes)):
key_c = password[i % len(password)]
dec_c = chr((256 + ord(encoded_bytes[i]) - ord(key_c)) % 256)
dec.append(dec_c)
return "".join(dec) | 5cf82bfbbe7eee458914113f648dadbe7b15dee8 | 3,654,280 |
def read_file_unlabelled_data(file_name):
"""
read_file_unlabelled_data reades from file_name
These files are to be in csv-format with one token per line (see the example project).
returns text_vector:
Ex:
[['7_7', 'perhaps', 'there', 'is', 'a_a', 'better', 'way', '._.'], ['2_2', 'Why', 'are', 'you, 'doing','doing', 'it', '._.']]
"""
# Read file, to get text, grouped into sentences
text_vector = []
current_text = []
f = open(file_name)
for line in f:
word = line.strip()
if word != "":
if len(word) == 1:
word = word + "_" + word # to cover for a bug in scikit learn's tokenization
current_text.append(word)
else:
if len(current_text) != 0: # end of sentence
text_vector.append(current_text)
current_text = []
if len(current_text) != 0: # the last sentence
text_vector.append(current_text)
f.close()
return text_vector | f171ac6c4728aa67ef59b523acc0a006b3b4f16a | 3,654,281 |
from functools import reduce
def replace(data, replacements):
""" Allows to performs several string substitutions.
This function performs several string substitutions on the initial ``data`` string using a list
of 2-tuples (old, new) defining substitutions and returns the resulting string.
"""
return reduce(lambda a, kv: a.replace(*kv), replacements, data) | 37b2ad5b9b6d50d81a8c1bcded9890de3c840722 | 3,654,282 |
def fake_kafka() -> FakeKafka:
"""Fixture for fake kafka."""
return FakeKafka() | 35fdcf2030dda1cab2be1820549f67dc246cf88f | 3,654,283 |
from typing import Union
import operator
def rr20(prec: pd.Series) -> Union[float, int]:
"""Function for count of heavy precipitation (days where rr greater equal 20mm)
Args:
prec (list): value array of precipitation
Returns:
np.nan or number: the count of icing days
"""
assert isinstance(prec, pd.Series)
op = operator.ge
num = 20.0
return number_of(prec, num, op) | 4686eccac5be53b4a888d8bf0649c72e65d81bdb | 3,654,284 |
def get_neg_label(cls_label: np.ndarray, num_neg: int) -> np.ndarray:
"""Generate random negative samples.
:param cls_label: Class labels including only positive samples.
:param num_neg: Number of negative samples.
:return: Label with original positive samples (marked by 1), negative
samples (marked by -1), and ignored samples (marked by 0)
"""
seq_len, num_scales = cls_label.shape
cls_label = cls_label.copy().reshape(-1)
cls_label[cls_label < 0] = 0 # reset negative samples
neg_idx, = np.where(cls_label == 0)
np.random.shuffle(neg_idx)
neg_idx = neg_idx[:num_neg]
cls_label[neg_idx] = -1
cls_label = np.reshape(cls_label, (seq_len, num_scales))
return cls_label | 3cd0ad5c1973eff969330f014c405f39092b733b | 3,654,285 |
def G12(x, a):
"""
Eqs 20, 24, 25 of Khangulyan et al (2014)
"""
alpha, a, beta, b = a
pi26 = np.pi ** 2 / 6.0
G = (pi26 + x) * np.exp(-x)
tmp = 1 + b * x ** beta
g = 1.0 / (a * x ** alpha / tmp + 1.0)
return G * g | 6b84d5f5978a9faf8c9d77a2b9351f73f5717f48 | 3,654,286 |
def binomial(n, k):
""" binomial coefficient """
if k < 0 or k > n:
return 0
if k == 0 or k == n:
return 1
num = 1
den = 1
for i in range(1, min(k, n - k) + 1): # take advantage of symmetry
num *= (n + 1 - i)
den *= i
c = num // den
return c | 78910202202f749f8e154b074a55f6a5ddf91f64 | 3,654,287 |
def pagination(page):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator = page.paginator
page_num = page.number
#pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if False: #not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(1, paginator.num_pages + 1)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(1, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(1, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages + 1))
else:
page_range.extend(range(page_num + 1, paginator.num_pages + 1))
#need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'paginator': paginator,
'page_obj': page,
'page': page.number,
#'pagination_required': pagination_required,
#'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
#'ALL_VAR': ALL_VAR,
'1': 1,
'is_paginated': True,
} | 60d90adfbeceab9d159652b641e60da8fa995954 | 3,654,288 |
def bubbleSort(arr):
"""
>>> bubbleSort(arr)
[11, 12, 23, 25, 34, 54, 90]
"""
n = len(arr)
for i in range(n-1):
for j in range(0, n-i-1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr | 28bc9d505ef44a4b403c0f91a971cccf74644c5a | 3,654,289 |
def generate_kronik_feats(fn):
"""Generates features from a Kronik output file"""
header = get_tsv_header(fn)
return generate_split_tsv_lines(fn, header) | 8b98f346ef5d833e0bfb876a7985c8bb3ced905c | 3,654,290 |
import time
import sys
def regressor_contrast(model1:RegressorMixin,
model2:RegressorMixin,
test_data:pd.DataFrame,
label_data:pd.Series,
threshold:int=10)->pd.DataFrame:
"""Compute 11 metrics to compare a Sckit-learn regression models
and make statistical test for residual normality"""
np.random.seed(33)
models_time = []
models_memory = []
models_predictions = []
models_acc = []
models_dagostino = []
models_dagostino_p = []
models_explained_variance = []
models_r2 = []
models_rmse = []
models_mae = []
models_shapiro = []
models_shapiro_p = []
for m in [model1, model2]:
t1 = time()
predictions = m.predict(test_data)
t2 = time()
models_time.append(t2 -t1)
models_predictions.append(predictions)
models_explained_variance.append(round(explained_variance_score(label_data,predictions),5))
models_r2.append(round(r2_score(label_data,predictions),5))
models_rmse.append(round(mean_squared_error(label_data,predictions, squared = False ),5))
models_mae.append(round(mean_absolute_error(label_data,predictions),5))
models_acc.append(round(percentaje_acc(label_data,predictions, threshold=threshold),5))
models_memory.append(sys.getsizeof(m)/1024)
shap_sta, shap_p, dagostino_sta, dagostino_p = _multiples_normality_test(predictions, label_data)
models_shapiro.append(round(shap_sta,5))
models_dagostino.append(round(dagostino_sta,5))
models_shapiro_p.append(shap_p)
models_dagostino_p.append(dagostino_p)
table = pd.DataFrame({
"Model": ["Model1", "Model2"],
"Exec time(seg)": models_time,
"Memory (Kb)": models_memory,
"R2":models_r2,
"MAE": models_mae,
"RMSE": models_rmse,
"Explained Variance": models_explained_variance,
"Residual Shapiro Test Stat": models_shapiro ,
"Residual Shapiro Test p-value": models_shapiro_p,
"Residual D’Agostino’s Statical": models_dagostino ,
"Residual D’Agostino’s p-value": models_dagostino_p,
"Ratio errors in Threshold": models_acc
})
return table | 18c55de497009555a30ffd9a3a2b5c5a0f1b53ee | 3,654,291 |
def delete_product(uuid: str, db: Session = Depends(auth)):
"""Delete a registered product."""
if product := repo.get_product_by_uuid(db=db, uuid=uuid):
if product.taken:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Cannot delete products already taken.",
)
repo.delete_product(db=db, product=product)
return {
"deleted": True,
"product": product,
}
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="No product found for the code specified.",
) | 97aa45eec0ae98a58984f8ca97d584b5a715cba6 | 3,654,292 |
import functools
def CreateMnemonicsC(mnemonicsIds):
""" Create the opcodes arrays for C header files. """
opsEnum = "typedef enum {\n\tI_UNDEFINED = 0, "
pos = 0
l2 = sorted(mnemonicsIds.keys())
for i in l2:
s = "I_%s = %d" % (i.replace(" ", "_").replace(",", ""), mnemonicsIds[i])
if i != l2[-1]:
s += ","
pos += len(s)
if pos >= 70:
s += "\n\t"
pos = 0
elif i != l2[-1]:
s += " "
opsEnum += s
opsEnum += "\n} _InstructionType;"
# Mnemonics are sorted by insertion order. (Psuedo mnemonics depend on this!)
# NOTE: EXTRA BACKSLASHES FORE RE.SUB !!!
s = "const unsigned char _MNEMONICS[] =\n\"\\\\x09\" \"UNDEFINED\\\\0\" "
l = list(zip(mnemonicsIds.keys(), mnemonicsIds.values()))
l = sorted(l, key=functools.cmp_to_key(lambda x, y: x[1] - y[1]))
for i in l:
s += "\"\\\\x%02x\" \"%s\\\\0\" " % (len(i[0]), i[0])
if len(s) - s.rfind("\n") >= 76:
s += "\\\\\n"
s = s[:-1] + ";" # Ignore last space.
# Return enum & mnemonics.
return (opsEnum, s) | a20a01fbefc1175c24144753264edc938258cdca | 3,654,293 |
import math
def create_windows(c_main, origin, J=None, I=None, depth=None, width=None):
"""
Create windows based on contour and windowing parameters. The first
window (at arc length = 0) is placed at the spline origin.
Note: to define the windows, this function uses pseudo-radial and
pseudo-angular coordinates. The pseudo-radial coordinate is based
on the distance transform of the rasterized version of the continuous
spline that defines the contour of the cell. The pseudo-angular coordinate
for layer j is based on the distance transform of the discrete contour of
layer j. So there is a bit of an inconsistency between continuous and
discrete contours.
Parameters
----------
c_main: 2d array
A rasterized version of the contour, as obtained
by spline_to_param_image.
origin: tuple
(y, x) coordinates of the origin of the curve.
J: int
Number of window layers.
I: list of int
Vector of dimension J specifying the number of windows per layer.
depth: int
Desired depth of the windows.
width: int
Desired width of the windows.
Returns
-------
w: 3d list
w[i][j][0] and w[i][j][1] are 1d arrays representing
lists of x,y indices of pixels belonging to window in i'th layer
in j'th window
J: int
number of layers (calculated if not provided as input)
I: list of int
number of windows per layer (calculated if not provided as input)
"""
origin = [origin[1], origin[0]]
# Compute the distance transform of the main contour
D_main = distance_transform_edt(-1 == c_main)
# Compute the mask corresponding to the main contour
mask_main = binary_fill_holes(
-1 < c_main
) # Maybe not necessary? Can't we just use the segmented mask here?
# Divide the radial coordinate into J layers with specified depth
Dmax = np.amax(D_main * mask_main)
if J is None:
J = int(math.ceil(Dmax / depth))
b = np.linspace(
0, Dmax, J + 1
) # Boundaries of the layers in terms of distances to the main contour
if I is None:
compute_num_win = True
I = []
else:
compute_num_win = False
w = []
for j in range(J):
w.append([])
# The mask containing the interior of the cell starting from
# the j-th layer
mask = (b[j] <= D_main) * mask_main
# Extract the contour of the mask
# We must fix certain frames where multiple contours are returned.
# So we choose the longest contour. Some pixels may be lost in the process,
# i.e., the windows may not cover the entire cell.
clist = find_contours(mask, 0, fully_connected="high")
cvec = np.asarray(
clist[np.argmax([cel.shape[0] for cel in clist])], dtype=np.int
)
# An alternative fix using OpenCV's findContours routine---doesn't solve the problem
# contours, hierarchy = cv.findContours(np.asarray(mask, dtype=np.uint8), cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
# cvec = np.asarray(contours[np.argmax([cel.shape[0] for cel in contours])], dtype=np.int)
# cvec = cvec.reshape((cvec.shape[0], cvec.shape[2]))
# cvec = cvec[::-1, [1,0]] # Sort boundary pixels in clockwise direction and switch (x, y) coordinates
# Lvec = compute_discrete_arc_length(cvec)
# c = create_arc_length_image(mask.shape, cvec, Lvec)
# plt.figure()
# plt.imshow(c, 'gray', vmin=-Lvec[-1], vmax=Lvec[-1])
# plt.plot(origin[1], origin[0], 'or')
# # plt.show()
# Adjust the origin of the contour:
# on the discrete contour cvec, find the closest point to the origin,
# then apply a circular shift to cvec to make this point the first one.
n0 = np.argmin(np.linalg.norm(cvec - origin, axis=1))
cvec = np.roll(cvec, -n0, axis=0)
# Compute the discrete arc length along the contour
Lvec = compute_discrete_arc_length(cvec)
# Create an image of the contour where the intensity is the arc length
arc = create_arc_length_image(mask.shape, cvec, Lvec)
# Compute the feature transform of this image:
# for each pixel position, we get the coordinates of the closest pixel on the contour
F = distance_transform_edt(
-1 == arc, return_distances=False, return_indices=True
)
# Fill array with arc lengths of closest points on the contour
# L = np.zeros(c.shape)
# for u in range(c.shape[0]):
# for v in range(c.shape[1]):
# L[u, v] = c[F[0, u, v], F[1, u, v]]
# gridx, gridy = np.meshgrid(range(c.shape[1]), range(c.shape[0]))
# L = c[F[0,:,:][gridy, gridx], F[1,:,:][gridy, gridx]]
L = arc[F[0, :, :], F[1, :, :]]
# Create sampling windows for the j-th layer
if compute_num_win:
I.append(int(math.ceil(Lvec[-1] / width)))
w_borders = np.linspace(0, Lvec[-1], I[j] + 1)
for i in range(I[j]):
# w[-1].append(np.where(mask & (s1[i] <= L) & (L < s1[i+1]) & (b[0] <= D) & (D < b[1])))
w[-1].append(
np.where(
mask
& (w_borders[i] <= L)
& (L < w_borders[i + 1])
& (b[j] <= D_main)
& (D_main < b[j + 1])
)
)
# plt.figure()
# plt.imshow(w[j][i])
# plt.show()
# # Compute positions on the contour that will be used for the displacement estimation
# if j == 0:
# t = define_contour_positions(Lvec, I[0], cvec, c_main)
return w, J, I | c5e3989b8f8f0f558cdc057b6f3bb9901c4363cf | 3,654,294 |
from bs4 import BeautifulSoup
def extractsms(htmlsms) :
"""
extractsms -- extract SMS messages from BeautifulSoup tree of Google Voice SMS HTML.
Output is a list of dictionaries, one per message.
"""
msgitems = [] # accum message items here
# Extract all conversations by searching for a DIV with an ID at top level.
tree = BeautifulSoup.BeautifulSoup(htmlsms) # parse HTML into tree
conversations = tree.findAll("div",attrs={"id" : True},recursive=False)
for conversation in conversations :
# For each conversation, extract each row, which is one SMS message.
rows = conversation.findAll(attrs={"class" : "gc-message-sms-row"})
for row in rows : # for all rows
# For each row, which is one message, extract all the fields.
msgitem = {"id" : conversation["id"]} # tag this message with conversation ID
spans = row.findAll("span",attrs={"class" : True}, recursive=False)
for span in spans : # for all spans in row
cl = span["class"].replace('gc-message-sms-', '')
msgitem[cl] = (" ".join(span.findAll(text=True))).strip() # put text in dict
msgitems.append(msgitem) # add msg dictionary to list
return msgitems | e31a66ae5ee56faf4eab131044c395fcd8de3a2a | 3,654,295 |
def load_ch_wubi_dict(dict_path=e2p.E2P_CH_WUBI_PATH):
"""Load Chinese to Wubi Dictionary.
Parameters
---------
dict_path : str
the absolute path to chinese2wubi dictionary.
In default, it's E2P_CH_WUBI_PATH.
Returns
-------
dict : Dictionary
a mapping between Chinese to Wubi Code
"""
return load_dict(dict_path) | e9297968b5dc4d1811659084e03ef0b2156c8a00 | 3,654,296 |
def middle_flow(middle_inputs: Tensor) -> Tensor:
"""
Middle flow
Implements the second of the three broad parts of the model
:param middle_inputs: middle_inputs: Tensor output generate by the Entry Flow,
having shape [*, new_rows, new_cols, 728]
:return: Output tensor of shape [*, new_rows, new_cols, 728]
"""
# Block 4 - Conv B (Green)
middle_outputs = middle_inputs
for _ in range(8):
res = middle_outputs
for _ in range(3):
middle_outputs = separable_convolutional_unit(middle_outputs, 728)
middle_outputs = Add()([res, middle_outputs])
return middle_outputs | 80fedffbb6da2f3e0b99a931d66d593bf627bdbe | 3,654,297 |
def feature_extraction(sample_index, labels, baf, lrr, rawcopy_pred, data_shape, margin=10000, pad_val=-2):
"""
Extract features at sample index
:param sample_index: sample index
:param labels: break point labels
:param baf: b-allele frequency values
:param lrr: log r ratio values
:param rawcopy_pred: rawcop predictions
:param data_shape: shape of the data
:param margin: margin to use
:param pad_val: padding value for windows appearing on start or end of data sequence
:return:
"""
window_size = margin * 4
if sample_index < margin * 2:
running_idx = margin * 2 - sample_index
running_idx2 = margin * 2 + sample_index
if running_idx2 >= len(baf):
running_idx2 = len(baf) - 1
ix = range(sample_index, sample_index + margin)
baf_ix = range(0, running_idx2)
baf_ = baf[baf_ix]
baf = np.pad(baf_, (running_idx, 0), 'constant', constant_values=pad_val)
lrr_ = lrr[baf_ix]
lrr = np.pad(lrr_, (running_idx, 0), 'constant', constant_values=pad_val)
elif sample_index + margin * 2 > data_shape[0]:
running_idx = sample_index - margin * 2
ix = range(sample_index - margin, data_shape[0])
baf_ix = range(running_idx, data_shape[0])
baf_ = baf[baf_ix]
baf = np.pad(baf_, (0, running_idx), 'constant', constant_values=pad_val)
lrr_ = lrr[baf_ix]
lrr = np.pad(lrr_, (0, running_idx), 'constant', constant_values=pad_val)
else:
ix = range(sample_index - margin, sample_index + margin)
baf_ix = range(sample_index - margin * 2, sample_index + margin * 2)
baf = baf[baf_ix]
lrr = lrr[baf_ix]
label = []
for l in labels[baf_ix]:
if label == []:
label.append(l)
elif l != label[-1]:
label.append(l)
rc_pred = []
for l in rawcopy_pred[baf_ix]:
if rc_pred == []:
rc_pred.append(l)
elif l != label[-1]:
rc_pred.append(l)
assert baf.shape[0] == window_size
assert lrr.shape[0] == window_size
feat = np.vstack((baf, lrr))
return feat, rc_pred, label, ix | 2b70229d3e4021d4a0cce9bf7dce2222956e299d | 3,654,298 |
def get_filename(file_fullpath):
"""
Returns the filename without the full path
:param file_fullpath:
:return: Returns the filename
"""
filename = file_fullpath.split("/")[-1].split(".")[0]
return filename | 903cb26c89d1d18c9ebafe1a468c7fa66c51f119 | 3,654,299 |
Subsets and Splits