content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def connect_registry_client():
"""
connect the module client for the Registry implementation we're using return the client object
"""
client = adapters.RegistryClient()
client.connect(environment.service_connection_string)
return client | f2e2bccb4cfacd86af36e3924463541d9e3dcdcd | 3,654,159 |
def get_group_average_score(gid=None, name=None):
"""
Get the average score of teams in a group.
Args:
gid: The group id
name: The group name
Returns:
The total score of the group
"""
group_scores = get_group_scores(gid=gid, name=name)
total_score = sum([entry['score'] for entry in group_scores])
return int(total_score / len(group_scores)) if len(group_scores) > 0 else 0 | cdea61e388b47f399fbc8e228e313d6199164b2f | 3,654,160 |
def solve_with_cdd_for_II(A, verbose=False):
"""This method finds II's minmax strategy for zero-sum game A"""
m = A.shape[0] # number of rows
n = A.shape[1] # number of columns
A = np.column_stack([[0]*m,-A,[1]*m])
I = np.eye(n)
nn = np.column_stack([[0]*n,I,[0]*n])
# non-negativity constraints
n1 = [-1] * n
n1.insert(0,1)
n1.append(0) # n1 = 1,-1,-1,...,-1,0]
n2 = [1] * n
n2.insert(0,-1)
n2.append(0) # n1 = 1,-1,-1,...,-1,0]
d = np.vstack([A,nn,n1,n2])
mat = cdd.Matrix(d.tolist(), number_type='fraction')
mat.obj_type = cdd.LPObjType.MIN
d = [0] * (n+1)
d.append(1) # [0,0,...0,1]
mat.obj_func = d
lp = cdd.LinProg(mat)
lp.solve()
lp.status == cdd.LPStatusType.OPTIMAL
# lp.primal_solution uses fractions, and has value as last entry, so that
# is dropped
p = [float(val) for val in lp.primal_solution[:-1]]
u = float(lp.obj_value)
if verbose:
print("------ Solved with cdd -------------")
print("Optimal strategy:", p)
print("Optimal payoff:", -u)
print("------------------------------------")
return p, -u | 87ac90691fcbbe2f89bf9090c31f86c165c007ed | 3,654,161 |
def build_none() -> KeySetNone:
"""Returns NONE."""
return KeySetNone() | 8ba38204cd763597c66d51466f5d2ffa5c9a19bf | 3,654,162 |
import csv
import numpy
def load_csv(file, shape=None, normalize=False):
"""
Load CSV file.
:param file: CSV file.
:type file: file like object
:param shape : data array is reshape to this shape.
:type shape: tuple of int
:return: numpy array
"""
value_list = []
for row in csv.reader(file):
value_list.append(map(float, row))
if shape is None:
return numpy.array(value_list)
else:
return numpy.array(value_list).reshape(shape) | 07f3b61bbdb6c9937f3cc4b0ae98fdfb7d8de48a | 3,654,163 |
from typing import Tuple
def flip_around_axis(
coords: np.ndarray,
axis: Tuple[float, float, float] = (0.2, 0.2, 0.2)
) -> np.ndarray:
"""Flips coordinates randomly w.r.t. each axis with its associated probability."""
for col in range(3):
if np.random.binomial(1, axis[col]):
coords[:, col] = np.negative(coords[:, col])
return coords | 914834a8492998b4e1e0b93e5e9677ec9af2d736 | 3,654,164 |
import math
def get_tc(name):
"""Determine the amount of tile columns to use."""
args = ["ffprobe", "-hide_banner", "-select_streams", "v", "-show_streams", name]
proc = sp.run(args, text=True, stdout=sp.PIPE, stderr=sp.DEVNULL)
lines = proc.stdout.splitlines()
d = {}
for ln in lines[1:-1]:
key, value = ln.strip().split("=")
d[key] = value
width = d["width"]
return math.floor(math.log2(math.ceil(float(width) / 64.0))) | ee917cd8cebfe7dc4ae718d883c657cf23bff1cf | 3,654,165 |
def cmap_hex_color(cmap, i):
"""
Convert a Colormap to hex color.
Parameters
----------
cmap : matplotlib.colors.ListedColormap
Represents the Colormap.
i : int
List color index.
Returns
-------
String
Represents corresponding hex string.
"""
return matplotlib.colors.rgb2hex(cmap(i)) | 9ac7753cde9470e3dd9fbd4a66373b25126635ca | 3,654,167 |
def train_folds(X, y, fold_count, batch_size, get_model_func):
""" K-Fold Cross-Validation for Keras Models
Inspired by PavelOstyakov
https://github.com/PavelOstyakov/toxic/blob/master/toxic/train_utils.py
"""
fold_size = len(X[0]) // fold_count
models = []
for fold_id in range(0, fold_count):
print('===== FOLD {} ====='.format(fold_id+1))
model = get_model_func()
model.compile()
RocAuc = RocAucEvaluation()
RocAuc.set_model(model)
model.fit(
X, y, validation_split=max(1/fold_count, 0.15),
batch_size=batch_size, epochs=20, shuffle=True,
add_callbacks=[RocAuc], verbose=1
)
models.append(model)
return models | 51a38243925c76ac6179a90be46be31fcb685054 | 3,654,168 |
async def cancel(command: HALCommandType, script: str):
"""Cancels the execution of a script."""
try:
await command.actor.helpers.scripts.cancel(script)
except Exception as err:
command.warning(text=f"Error found while trying to cancel {script}.")
return command.fail(error=err)
return command.finish(f"Script {script} has been scheduled for cancellation.") | 438297845f5ba4ffc49b95a798adf61294371694 | 3,654,170 |
def get_nodes_by_betweenness_centrality(query_id, node_number):
"""Get a list of nodes with the top betweenness-centrality.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database query identifier
required: true
type: integer
- name: node_number
in: path
description: The number of top between-nodes to return
required: true
type: integer
"""
graph = manager.cu_get_graph_from_query_id_or_404(query_id)
if node_number > graph.number_of_nodes():
node_number = graph.number_of_nodes()
bw_dict = nx.betweenness_centrality(graph)
return jsonify([
node.md5
for node, score in sorted(bw_dict.items(), key=itemgetter(1), reverse=True)[:node_number]
]) | 44d97e443c6bef4d7048496674a2382a6c4f2ade | 3,654,171 |
import sympy
def preprocess(function):
"""
Converts a given function from type str to a Sympy object.
Keyword arguments:
function -- a string type representation of the user's math function
"""
expr = function
while True:
if '^' in expr:
expr = expr[:expr.index('^')] + '**' + expr[expr.index('^')+1:]
else:
break
expr = sympy.sympify(expr)
return expr | 001bd04d27db2afa4debbe776e5fe3cf1af1476d | 3,654,172 |
import re
def tot_changes(changes: str) -> int:
"""Add deletions and insertions."""
insertions_pat = re.compile(r"(\d+) insertion")
deletions_pat = re.compile(r"(\d+) deletion")
insertions = insertions_pat.search(changes)
insertions = int(insertions.group(1)) if insertions else 0
deletions = deletions_pat.search(changes)
deletions = int(deletions.group(1)) if deletions else 0
return insertions + deletions | 74742baf63db51b5c59b332f0104008500f330b9 | 3,654,173 |
def update_from_mcd(full_table, update_table):
# type: (pd.DataFrame, pd.DataFrame) -> pd.DataFrame
"""
Update the full table (aka the PDG extended-style table) with the
up-to-date information from the PDG .mcd file.
Example
-------
>>> new_table = update_from_mcd('mass_width_2008.fwf', 'mass_width_2021.mcd') # doctest: +SKIP
"""
full_table = full_table.copy()
full_table.update(update_table)
update_table_neg = update_table.copy()
update_table_neg.index = -update_table_neg.index
full_table.update(update_table_neg)
return full_table | c60daea5719445fb696ef21bbc0f233fea4e48cd | 3,654,174 |
import socket
def resolve_hostname(host):
"""Get IP address of hostname or URL."""
try:
parsed = urlparse.urlparse(host)
except AttributeError as err:
error = "Hostname `%s`is unparseable. Error: %s" % (host, err)
LOG.exception(error)
raise errors.SatoriInvalidNetloc(error)
# Domain names are in netloc, IP addresses fall into path
hostname = parsed.netloc or parsed.path
# socket.gaierror is not trapped here
address = socket.gethostbyname(hostname)
return address | 1792d943e490661b4dd42608f2b025096810688f | 3,654,175 |
import pandas as pd
import numpy as np
from .data_utils import keep_common_genes
from .data_utils import df_normalization
def DeconRNASeq_main(rna_df, sig_df, patient_IDs='ALL', args={}):
"""
This function does the following:
- parses the dictionary 'args' for the arguments to pass on to the DeconRNASeq method.
- eliminates genes from rna_df and sig_df that are not present in both data sets
- Runs DeconRNASeq() for each patient specified in patient_IDs' argument
- Combines the resulting frequencies into a pandas dataframe (num_celltypes x num_patients)
Inputs:
- rna_df: pandas df of rna gene expression data.
Rows are genes (indexed by 'Hugo_Symbol') and columns are patients
- sig_df: pandas df of Signature gene expression values for given cell types.
Rows are genes (indexed by 'Hugo_Symbol') and columns are cell types
- patient_IDs: list of patient IDs to run DeconRNASeq for.
Alternatively, can use the string 'ALL' to run for all patients
- args: dictionary containing any of the following:
- check_sig: boolean, whether or not to check the condition number of the signature matrix
- scaling: string, must be either 'None', 'zscore', or 'minmax'. Determines how to scale the signature matrix and mixture data before solving for optimal x
- scaling_axis: 0 or 1. Whether to scale mixture data and signature matrix by normalizing each column (celltype/patient) separately (scaling_axis=0) or each row (gene) separately (scaling_axis=1).
- formulation: see DeconRNASeq()
- reg_constant: see DeconRNASeq()
- print_result: see DeconRNASeq()
Outputs:
- cell_freqs: pandas df. Contains cell type frequencies for each patient in 'patient_IDs' list.
Rows are indexed by cell type, columns are patient IDs
"""
# Read in optional arguments, or set them as default
# Assert values are of the right data type when passed to DeconRNASeq() function.
# formulation must be 'qp', 'ridge', or 'lasso'
if 'formulation' in args.keys():
formulation = args['formulation']
if formulation not in ['qp','ridge','lasso']:
raise ValueError("Formulation ({!r}) must be set to 'qp', 'ridge', or 'lasso'".format(formulation))
else:
formulation = 'qp'
# reg_constant must be a double
if 'reg_constant' in args.keys():
reg_constant = args['reg_constant']
else:
reg_constant = 1.0
if 'check_sig' in args.keys():
check_sig = args['check_sig']
if not isinstance(check_sig, bool):
raise ValueError("check_sig ({!r}) must be a boolean variable".format(check_sig))
else:
check_sig = False
if 'scaling' in args.keys():
scaling = args['scaling']
if scaling not in ['None', 'none', 'zscore', 'minmax', 'r-zscore']:
raise ValueError("scaling ({!r}) must be set to 'none', 'zscore' or 'minmax'".format(scaling))
else:
scaling = 'minmax'
if 'scaling_axis' in args.keys():
scaling_axis = args['scaling_axis']
if scaling_axis not in [0, 1]:
raise ValueError("scaling_axis ({!r}) must be 0 or 1".format(scaling_axis))
else:
scaling_axis = 0
if 'print_results' in args.keys():
print_results = args['print_results']
if not isinstance(print_results, bool):
raise ValueError("print_results ({!r}) must be a boolean variable".format(print_results))
else:
print_results = False
# eliminate genes not present in both rna and sig dfs, and ensure they are in the same order:
rna_df, sig_df = keep_common_genes(rna_df, sig_df)
# Scale Data:
if scaling in ['zscore', 'minmax', 'r-zscore']:
# R implementation uses zscore scaling.
sig_df = df_normalization(sig_df, scaling=scaling, axis=scaling_axis)
rna_df = df_normalization(rna_df, scaling=scaling, axis=scaling_axis)
# Convert signature to numpy array
Sig = np.array(sig_df)
# Check the condition number of the signature matrix:
if check_sig:
print("Condition number of signature matrix =", np.linalg.cond(Sig))
# Select a patient / list of patients to solve for their cell type frequencies:
# Patient_ID must be 'ALL' or an array of specific patient IDs.
if patient_IDs == 'ALL':
patient_list = rna_df.columns
elif not isinstance(patient_IDs, type([])):
raise ValueError("patient_IDs should be either 'ALL', or an array of IDs (not a single ID)")
else:
patient_list = patient_IDs
# For each patient, run DeconRNASeq to get cell type frequencies, and save results to pandas df:
print("Running DeconRNASeq...")
cell_freqs_df = pd.DataFrame()
cell_freqs_df['Patient_ID'] = sig_df.columns
cell_freqs_df = cell_freqs_df.set_index(['Patient_ID'])
for patient in patient_list:
if patient in rna_df.columns:
Mix = np.array(rna_df[patient])
cell_freqs_df[patient] = DeconRNASeq(Sig, Mix, formulation=formulation, reg_constant=reg_constant, print_results=print_results, label=patient)
else:
raise ValueError("patient_ID ({!r}) not present in rna dataframe".format(patient))
cell_freqs_df = cell_freqs_df.transpose()
return cell_freqs_df | 44bf01b0d53110610d3219e3002cca0ab35720b5 | 3,654,176 |
import re
from typing import OrderedDict
def parse_c_interface(c_interface_file):
"""
@brief Parses a c-interface file and generates a dictionary of function names to parameter lists.
Exported functions are expected to be preceded by 'DLL_EXPORT'. Python keywords should not be used as variable
names for the function names in the cpp-interface file. If a Python wrapper function shall return the output buffer,
the corresponding parameter has to be preceded by the _OUT_BUFFER_KEYWORD in the C++ file. In this case, we assume
the parameter is a numpy array. The shape and the dtype will be taken from the first input parameter.
"""
_OUT_BUFFER_KEYWORD = "OUT"
with open(c_interface_file, "r") as f:
# read file and remove comments
content = "\n".join([c.split("//")[0] for c in re.sub("/\*.*?\*/", "", f.read(), flags=re.DOTALL).split("\n")])
function_signatures = [x for x in re.findall("DLL_EXPORT.+?\)", content, flags=re.DOTALL)]
function_dict = OrderedDict()
for sig in function_signatures:
params_regex = re.compile("\(.*?\)", flags=re.DOTALL)
# find function name
wo_params = re.sub(params_regex, "", sig)
tokens = re.split("\s", wo_params)
name = tokens[-1]
function_dict[name] = dict()
# find return type and initialize dict
function_dict[name] = {"restype": " ".join(tokens[1:-1]), "params": [], "out_buffers": []}
# find parameters, remove template specifiers, and split at commas
param_fields = re.sub("<.*?>", "", re.search(params_regex, sig).group(0)[1:-1]).split(",")
out_buffer_indices = [i for i, s in enumerate(param_fields)
if _OUT_BUFFER_KEYWORD in [x.strip() for x in s.split(" ")]]
name_position = -1 # last position in C++ should contain the name of the variable
try:
all_parameters = [re.search("[A-Za-z0-9_]+", x[name_position].strip()).group(0)
for x in (re.split("\s", s) for s in param_fields)]
for i, p in enumerate(all_parameters):
if i in out_buffer_indices:
function_dict[name]["out_buffers"].append(p)
else:
function_dict[name]["params"].append(p)
except AttributeError:
pass
return function_dict | 06a4edb40e12343cda688da82c9042d1342e6429 | 3,654,177 |
def con_minimize(fun, bounds, constr=(), x0=None, args=(),
callback=None, options={}, workers=None):
"""Constrained minimization of `fun` using Genetic Algorithm.
This function is a wrapper over modetga.minimize().
The constraints are defined as a tuple of functions
(`fcon1(x, *args)`, `fcon2(x, *args)`, `...`).
The algorithm searches for a solution minimizing
`fun(x, *args)` and satisfying the conditions
(`fcon1(x, *args) >= 0`, `fcon2(x, *args) >= 0`, `...`).
`callback` arguments: `x`, `fx`, `ng`, `*args`.
`fx` is the function value at the generation `ng`.
Returns an optimization result object with the following attributes:
- x - numpy 1D array, optimized parameters,
- message - str, exit message,
- ng - int, number of generations,
- fx - float, final function value.
:param fun: function to be minimized
:param bounds: tuple, parameter bounds
:param constr: tuple, functions defining constraints
:param x0: numpy 1D array, initial parameters
:param args: tuple, positional arguments to be passed to `fun` and to `fcon`
:param callback: function, called after every generation
:param options: dict, GA options
:param workers: int, number of processes to use (will use all CPUs if None)
:return: OptRes, optimization result
"""
# Wrap cost function with constraints
def fun_soft_con(x, *augmented_args):
# Unpack constraints and arguments
fcore = augmented_args[0] # Function to be minimized
fcons = augmented_args[1] # Constraints
user_args = augmented_args[2:] # Arguments
# Evaluate core function
ycore = fcore(x, *user_args)
# Initialize penalty
penalty = 0.
# Update penalty
# (the more negative fcon() is, the higher penalty)
for f in fcons:
ycon = np.max([f(x, *user_args) * -1., 0.])
pscale = ycore / (ycon + 1e-6)
penalty += ycon * pscale
return ycore + penalty
# Run minimization
augmented_args = (fun, constr, *args)
res = minimize(
fun=fun_soft_con,
bounds=bounds,
x0=x0,
args=augmented_args,
callback=callback,
options=options,
workers=workers)
# Extend result with contraint violation info
res.constr = [fcon(res.x, *args) for fcon in constr]
return res | 46a7400953e54dfb9b2364832e6029a508acc9de | 3,654,178 |
def unique_v2(lst):
"""
Returns a list of all unique elements in the input list "lst."
This algorithm runs in o(n), as it only passes through the list "lst" twice
"""
dd = defaultdict(int) # avoids blank dictionary problem (KeyError when accessing nonexistent entries)
unique_list = []
for val in lst:
dd[val] += 1
for val in lst:
if dd[val] == 1:
unique_list.append(val)
return unique_list | d7c5706908d569b3ee93ba1bebbd09bc6f335ad2 | 3,654,179 |
import ipaddress
def is_ip_network(network, strict=False):
"""Returns True/False if a string is a valid network."""
network = str(network)
try:
ipaddress.ip_network(network, strict)
return True
except ValueError:
return False | 84206586412b76816fa845a75fc6c121bfdf0989 | 3,654,180 |
def assign_point_of_contact(point_of_contact):
"""
Assign a user to be the point of contact in emails/letters
:param point_of_contact: A string containing the user_guid if point of contact has been set for a request
:return: A User object to be designated as the point of contact for a request
"""
if point_of_contact:
return Users.query.filter(Users.guid == point_of_contact).one_or_none()
else:
return current_user | 99f2e7d036c4f7cf71be2bd6b82a313f26b3af41 | 3,654,181 |
def response_with_pagination(guests, previous, nex, count):
"""
Make a http response for GuestList get requests.
:param count: Pagination Total
:param nex: Next page Url if it exists
:param previous: Previous page Url if it exists
:param guests: Guest
:return: Http Json response
"""
return make_response(jsonify({
'status': 'success',
'previous': previous,
'next': nex,
'count': count,
'guests': guests
})), 200 | 00373c866b6cc8384a88e62b63fcaa5950ccc1c1 | 3,654,182 |
def put_object(request, old_pid):
"""MNStorage.update(session, pid, object, newPid, sysmeta) โ Identifier."""
if django.conf.settings.REQUIRE_WHITELIST_FOR_UPDATE:
d1_gmn.app.auth.assert_create_update_delete_permission(request)
d1_gmn.app.util.coerce_put_post(request)
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (("field", "newPid"), ("file", "object"), ("file", "sysmeta"))
)
d1_gmn.app.views.assert_db.is_valid_pid_to_be_updated(old_pid)
sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES["sysmeta"])
new_pid = request.POST["newPid"]
d1_gmn.app.views.assert_sysmeta.matches_url_pid(sysmeta_pyxb, new_pid)
d1_gmn.app.views.assert_sysmeta.obsoletes_matches_pid_if_specified(
sysmeta_pyxb, old_pid
)
sysmeta_pyxb.obsoletes = old_pid
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, "seriesId")
d1_gmn.app.views.assert_sysmeta.is_valid_sid_for_chain(old_pid, sid)
d1_gmn.app.views.create.create_sciobj(request, sysmeta_pyxb)
# The create event for the new object is added in create_sciobj(). The update
# event on the old object is added here.
d1_gmn.app.event_log.log_update_event(
old_pid,
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
),
)
d1_gmn.app.sysmeta.update_modified_timestamp(old_pid)
return new_pid | 192ed2a7efc35baf28605de9db594319370f294d | 3,654,183 |
def _match_gelu_pattern(gf, entry_node):
""" Return the nodes that form the subgraph of a GELU layer
"""
try:
if not len(entry_node.outputs) == 3:
return None
pow_1, add_2, mul_3 = [gf[x] for x in entry_node.outputs]
if not (pow_1.op == 'Pow' and add_2.op == 'Add' and mul_3.op == 'Mul'):
return None
const_4 = gf[pow_1.inputs[1]]
if not (const_4.op == 'Const' and int(round(const_4.value.val)) == 3):
return None
mul_5 = gf[pow_1.outputs[0]]
const_6 = gf[mul_5.inputs[0]]
if not (const_6.op == 'Const' and \
abs(const_6.value.val - 0.0447) < 1e-3):
return None
if not (gf[add_2.inputs[0]] == entry_node and \
gf[add_2.inputs[1]] == mul_5):
return None
mul_7 = gf[add_2.outputs[0]]
const_8 = gf[mul_7.inputs[0]]
if not abs(const_8.value.val - np.sqrt(2 / np.pi)) < 1e-3:
return None
tanh_9 = gf[mul_7.outputs[0]]
add_10 = gf[tanh_9.outputs[0]]
const_11 = gf[add_10.inputs[0]]
if not (tanh_9.op == 'Tanh' and add_10.op == 'Add' and \
const_11.op == 'Const' and int(round(const_11.value.val)) == 1):
return None
mul_12 = gf[add_10.outputs[0]]
const_13 = gf[mul_12.inputs[0]]
if not (mul_12.op == 'Mul' and const_13.op == 'Const' and \
abs(const_13.value.val - 0.5) < 1e-3):
return None
if not (gf[mul_3.inputs[0]] == entry_node and \
gf[mul_3.inputs[1]] == mul_12):
return None
gelu_nodes = [pow_1, add_2, mul_3, const_4, mul_5, const_6, mul_7,
const_8, tanh_9, add_10, const_11, mul_12, const_13]
return gelu_nodes
except:
return None | 6e08578a9cb9bea96c939a4fbee31003d6c575d4 | 3,654,184 |
def assign_obs_error(param, truth_mag, band, run):
"""
Assign errors to Object catalog quantities
Returns
-------
obs_err : float or np.array
The error values in units defined in get_astrometric_error(), get_photometric_error
err_type : str
Type of observational error
"""
if param in ['ra_offset', 'dec_offset', 'Ixx_sqrt', 'Iyy_sqrt', 'x', 'y_obs',]:
obs_err = get_astrometric_error(truth_mag, band=band)
err_type = 'astrometric'
elif param in ['Ixy', 'IxxPSF', 'IxyPSF', 'IyyPSF',]:
# \delta(x^2) = \delta(x) \times 2x
obs_err = 2.0*param_val*get_astrometric_error(truth_mag, band=band)
err_type = 'astrometric'
elif 'Flux' in param: # flux columns
obs_err = get_photometric_error(truth_mag, band=band, run=run)
err_type = 'photometric'
elif param == 'extendedness':
obs_err = np.zeros_like(param_val)
err_type = 'N/A'
else:
raise NotImplementedError
return obs_err, err_type | 9a90b80755941ac19cbf023f7ee63f4650518242 | 3,654,185 |
from typing import List
from typing import Tuple
import tokenize
def dir_frequency(dirname: str, amount=50) -> List[Tuple[str, int]]:
"""Pipeline of word_frequency from a directory of raw input file."""
md_list = md.collect_md_text(dirname)
return compute_frequency(tokenize(normalize(" ".join(md_list))), amount) | 3daddb1930e80235887b51ed5918e9d7cb1fff71 | 3,654,186 |
def test_solver1(N, version='scalar'):
"""
Very simple test case.
Store the solution at every N time level.
"""
def I(x): return sin(2*x*pi/L)
def f(x,t): return 0
solutions = []
# Need time_level_counter as global variable since
# it is assigned in the action function (that makes
# a variable local to that block otherwise).
# The manager class below provides a cleaner solution.
global time_level_counter
time_level_counter = 0
def action(u, t, x):
global time_level_counter
if time_level_counter % N == 0:
solutions.append(u.copy())
time_level_counter += 1
n = 100; tstop = 6; L = 10
dt, x, cpu = solver(I, f, 1.0, lambda t: 0, lambda t: 0,
L, n, 0, tstop,
user_action=action, version=version)
print 'CPU time:', cpu
print 'Max value in final u:', arrmax(solutions[-1]) | 7c74b3f731c0aa613c7a9f8da82533c1239a574f | 3,654,187 |
import requests
def get_auth_data():
"""
Create auth data.
Returns:
return: access token and token expiring time.
"""
payload = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'client_credentials',
}
api_url = '{0}/oauth/access_token'.format(API_BASE_URL)
response = requests.post(url=api_url, data=payload)
response.raise_for_status()
auth_data = response.json()
return auth_data['access_token'], auth_data['expires_in'] | ecd921c1ef3639c388111ec5952c887867076d99 | 3,654,188 |
def datatable(module, tag):
"""Mapping for DataTable."""
if tag == "DataTable":
return module, tag | 1eaa06771ecdd99dfa102ec249b23db3999b6fd7 | 3,654,189 |
def remove_prepending(seq):
"""
Method to remove prepending ASs from AS path.
"""
last_add = None
new_seq = []
for x in seq:
if last_add != x:
last_add = x
new_seq.append(x)
is_loopy = False
if len(set(seq)) != len(new_seq):
is_loopy = True
# raise Exception('Routing Loop: {}'.format(seq))
return new_seq, is_loopy | 78bb1554678af0998e15ecf9ed8f4e379ac2e2ad | 3,654,190 |
def github_handle_error(e):
"""
Handles an error from the Github API
an error example: Error in API call [401] - Unauthorized
{"message": "Bad credentials", "documentation_url": "https://docs.github.com/rest"}
The error might contain error_code, error_reason and error_message
The error_reason and error_message might be the same but usually, the error_reason adds more information that
the error_message doesn't provide
examples:
error_code = 401
error_message = 'Bad credentials'
error_reason = 'Unauthorized'
:param e: the client object
:return: error_code and error_message
"""
try:
error_code = ""
error_message = str(e)
if e.__class__ is DemistoException and e.res is not None:
error_res = e.res
if isinstance(error_res, dict):
error_code = str(error_res.get("status"))
error_message = str(error_res.get("detail"))
else:
error_code = e.res.status_code
if not e.res.ok:
if e.res.json():
error_message = error_res.json().get("message", "")
if not error_message:
error_message = error_res.json().get("detail", "")
error_reason = error_res.reason
if error_reason and error_reason != error_message:
error_message += f' {error_reason}'
return error_code, error_message
except Exception as e:
error_code = ""
error_message = str(e)
return error_code, error_message | 1b3d7ef6756c02d7bf1b8db506dbf926dd3e6abd | 3,654,191 |
def netmask_to_bits(net_mask):
""" Convert netmask to bits
Args:
net_mask ('str'): Net mask IP address
ex.) net_mask = '255.255.255.255'
Raise:
None
Returns:
Net mask bits
"""
return IPAddress(net_mask).netmask_bits() | 7ecc069e14242ebffd840b989331a431f6c2ecbc | 3,654,192 |
def register_corrector(cls=None, *, name=None):
"""A decorator for registering corrector classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _CORRECTORS:
raise ValueError(f'Already registered model with name: {local_name}')
_CORRECTORS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls) | 90795496caff7958af52bbe1518582a2a2ceea73 | 3,654,193 |
import numpy
def _sample_perc_from_list(lst, perc=100, algorithm="cum_rand", random_state=None):
"""
Sample randomly a certain percentage of items from the given
list. The original order of the items is kept.
:param lst: list, shape = (n,), input items
:param perc: scalar, percentage to sample
:param algorithm: string, which algorithm should be used
"random": Decide for each item to be chosen or not. This
algorithm runs in linear time O(n), but
the percentages might not match exactly.
"cum_rand": O(n log(n) + perc)
:return: list
"""
if perc >= 100:
return lst
if perc <= 0:
return []
# Store old random state and set random state
rs_old = numpy.random.get_state()
numpy.random.seed(random_state)
if algorithm == "random":
lst_sub = [it for it in lst if numpy.random.uniform(high=100) <= perc]
elif algorithm == "cum_rand":
n = len(lst)
n_perc = numpy.round(n * perc / 100.0)
rank_its = numpy.argsort(numpy.random.uniform(size=n))
lst_sub = []
for idx, it in enumerate(lst):
if rank_its[idx] < n_perc:
lst_sub.append(it)
if len(lst_sub) > n_perc:
break
else:
raise ValueError("Invalid sampling algorithm: %s." % algorithm)
# Restore old random stat
numpy.random.set_state(rs_old)
return lst_sub | 4ec000e9bd8f5e10550040e49018e2a045659397 | 3,654,194 |
def irods_setacls(path, acl_list, verbose=False):
"""
This function will add the ACLs listed in 'acl_list'
to the collection or data object at 'path'.
'acl_list' is a list where each element itself is
a list consisting of the username in name#zone format,
and the access level ('read', 'write', 'own', or 'null').
Access type 'null' removes all ACLs for that user/group.
Note. On an error return, some of the ACLs might have
been applied. The function does not "roll back" on error.
Returns 0 on success, non-zero on error.
"""
if not path or not acl_list:
return 1
for acl in acl_list:
(rc, output) = shell_command(['ichmod', acl[1], acl[0], path])
if rc:
if verbose:
print("Error running 'ichmod %s %s %s': rc = %d:"
% (acl[1], acl[0], path, rc))
print output[1]
return rc
return 0 | 5727d6ff96e2d693323d5d88ed81eafbd4de0435 | 3,654,195 |
from datetime import datetime
def add_years(date_to_change, years):
"""
Return a date that's `years` years after the date (or datetime)
object `date_to_change`. Return the same calendar date (month and day) in the
destination year, if it exists, otherwise use the following day
(thus changing February 29 to March 1).
Args:
date_to_change (date): The date that we're adding years to.
years ([type]): The number of years to add.
Returns:
[date]: The provided date + one year.
"""
try:
return date_to_change.replace(year=date_to_change.year + years)
except ValueError:
return date_to_change + (
datetime.date(date_to_change.year + years, 1, 1)
- datetime.date(date_to_change.year, 1, 1)
) | e9b71d190f7629a3edc0902d0582005a26a33956 | 3,654,196 |
def concurrency_update_done(client, function_name, qualifier):
"""wait fn for ProvisionedConcurrencyConfig 'Status'"""
def _concurrency_update_done():
status = client.get_provisioned_concurrency_config(
FunctionName=function_name, Qualifier=qualifier
)["Status"]
if status == "FAILED":
raise ShortCircuitWaitException(f"Concurrency update failed: {status=}")
else:
return status == "READY"
return _concurrency_update_done | 4d168e4e9648c3a3d8cb149aad1e835362bd271a | 3,654,197 |
def googleapis_email(url, params):
"""Loads user data from googleapis service, only email so far as it's
described in http://sites.google.com/site/oauthgoog/Home/emaildisplayscope
Parameters must be passed in queryset and Authorization header as described
on Google OAuth documentation at:
http://groups.google.com/group/oauth/browse_thread/thread/d15add9beb418ebc
and: http://code.google.com/apis/accounts/docs/OAuth2.html#CallingAnAPI
"""
request = Request(url + '?' + params, headers={'Authorization': params})
try:
return simplejson.loads(dsa_urlopen(request).read())['data']
except (ValueError, KeyError, IOError):
return None | c6123e367f093a512ac17797da487e733503dc11 | 3,654,198 |
def max_pool_2x2(input_):
""" Perform max pool with 2x2 kelner"""
return tf.nn.max_pool(input_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') | b85ccfaafbdcf5d703dffab65e188ffab52ebce9 | 3,654,201 |
import tqdm
def remove_numerals(df, remove_mixed_strings=True):
"""Removes rows from an ngram table with words that are numerals. This
does not include 4-digit numbers which are interpreted as years.
Arguments:
df {Pandas dataframe} -- A dataframe of with columns 'word', 'count'.
Keyword Arguments:
remove_mixed_strings {bool} -- Whether to remove rows with words that
are mixtures of numerals and letters. (default: {True})
"""
no_numerals_df = df.copy().reset_index()
for i, row in tqdm(no_numerals_df.iterrows(), desc="Removing numerals\n"):
word = row['word']
if remove_mixed_strings:
if any([c.isnumeric() for c in word]) and \
not is_year(word):
no_numerals_df.drop(i, axis=0, inplace=True)
else:
if word.isnumeric() and len(word) != 4:
no_numerals_df.drop(i, axis=0, inplace=True)
return no_numerals_df | 4c3d0468456e08b1a0579a8b73a21221cae17676 | 3,654,202 |
def b32_ntop(*args):
"""LDNS buffer."""
return _ldns.b32_ntop(*args) | b43bc9b1b112f7815ec3c280d055676e7255adcc | 3,654,204 |
from typing import Callable
from typing import Dict
import torch
def infer_feature_extraction_pytorch(
model: PreTrainedModel, run_on_cuda: bool
) -> Callable[[Dict[str, torch.Tensor]], torch.Tensor]:
"""
Perform Pytorch inference for feature extraction task
:param model: Pytorch model (sentence-transformers)
:param run_on_cuda: True if should be ran on GPU
:return: a function to perform inference
"""
def infer(inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
model_output = model(**inputs).detach() # noqa: F821
if run_on_cuda:
torch.cuda.synchronize()
return model_output
return infer | 18f935607c824c2122f68ea1ec9a7bcb50604ac8 | 3,654,206 |
def editRole(userSource, oldName, newName):
"""Renames a role in the specified user source.
When altering the Gateway System User Source, the Allow User Admin
setting must be enabled.
Args:
userSource (str): The user source in which the role is found.
Blank will use the default user source.
oldName (str): The role to edit. Role must not be blank and must
exist.
newName (str): The new name for the role. Must not be blank.
Returns:
UIResponse: An object with lists of warnings, errors, and info
about the success or failure of the edit.
"""
print(userSource, oldName, newName)
return UIResponse(Locale.ENGLISH) | f72796de14ff5f8c4314a5d31fe4fcf42cb883ec | 3,654,207 |
def beam_hardening_correction(mat, q, n, opt=True):
"""
Correct the grayscale values of a normalized image using a non-linear
function.
Parameters
----------
mat : array_like
Normalized projection image or sinogram image.
q : float
Positive number. Recommended range [0.005, 50].
n : float
Positive number. Must larger than 1.
opt : bool
True: Curve towards 0.0.
False: Curve towards 1.0.
Returns
-------
array_like
Corrected image.
"""
if np.max(mat) >= 2.0:
raise ValueError("!!! Input image must be normalized, i.e. gray-scales "
"are in the range of [0.0, 1.0]) !!!")
if n < 2.0:
raise ValueError("!!! n must be larger than or equal to 2 !!!")
return np.asarray([non_linear_function(x, q, n, opt) for x in mat]) | d113ff33c2d688f460cf2bac62cdd8a26b890ce5 | 3,654,210 |
def cargar_recursos_vectores_transpuestos():
"""
Se carga la informacion para poder calcular los vectores transpuestos
"""
# Se crea el df
filename = 'csv/' + conf.data['env']['path'] + '/vectores_transpuestos.csv'
recursos_v_transpuestos = pd.read_csv(filename)
# Se cambia el nombre de los index al nombre de los vectores transpuestos
# Se crea un MultiIndex para evitar agregar una columna a los vectores transpuestos
# queda definida por tuples = [('Reg.1','H'), ('Reg.2', 'F'),('Reg.3', 'G'),('Reg.4', 'H'),...,('Reg.10', 'A')]
tuples = [('Reg.' + str(i), vt) for i, vt in zip(recursos_v_transpuestos.pop('num_de_region'), recursos_v_transpuestos.pop('chr_vector_t'))]
recursos_v_transpuestos.index = pd.MultiIndex.from_tuples(tuples)
return recursos_v_transpuestos | f982fd22bbdfd2d7d389787bf7f923feb9abf66f | 3,654,211 |
def read_pdb(file_name, exclude=('SOL',), ignh=False, modelidx=1):
"""
Parse a PDB file to create a molecule.
Parameters
----------
filename: str
The file to read.
exclude: collections.abc.Container[str]
Atoms that have one of these residue names will not be included.
ignh: bool
Whether hydrogen atoms should be ignored.
model: int
If the PDB file contains multiple models, which one to select.
Returns
-------
list[vermouth.molecule.Molecule]
The parsed molecules. Will only contain edges if the PDB file has
CONECT records. Either way, the molecules might be disconnected. Entries
separated by TER, ENDMDL, and END records will result in separate
molecules.
"""
parser = PDBParser(exclude, ignh, modelidx)
with open(str(file_name)) as file_handle:
mols = list(parser.parse(file_handle))
LOGGER.info('Read {} molecules from PDB file {}', len(mols), file_name)
return mols | d7412b96adef5505676a80e5cdf3fe5e63a3b096 | 3,654,212 |
def sao_isomorficas(texto1: str, texto2: str) -> bool:
"""
>>> sao_isomorficas('egg', 'add')
True
>>> sao_isomorficas('foo', 'bar')
False
>>> sao_isomorficas('eggs', 'add')
False
"""
# Algoritmo O(n) em tempo e memรณria
letras_encontradas = {}
if len(texto1) != len(texto2):
return False
for caractere_1, caractere_2 in zip(texto1, texto2):
try:
letra = letras_encontradas[caractere_1]
except KeyError:
letras_encontradas[caractere_1] = caractere_2
else:
if letra is not caractere_2:
return False
return True | a1f2c00a50b69cb18c32a299d50cbd3a35dcbe5e | 3,654,213 |
def _is_no_args(fn):
"""Check if function has no arguments.
"""
return getargspec(fn).args == [] | 29cb096323c69dd067bf4759a557734443a82ed5 | 3,654,214 |
def failure(parsed_args):
"""
:param :py:class:`argparse.Namespace` parsed_args:
:return: Nowcast system message type
:rtype: str
"""
logger.critical(
f"{parsed_args.model_config} {parsed_args.run_type} FVCOM VH-FR run for "
f'{parsed_args.run_date.format("YYYY-MM-DD")} '
f"on {parsed_args.host_name} failed"
)
msg_type = f"failure {parsed_args.model_config} {parsed_args.run_type}"
return msg_type | e6744bfd61458497b5a70d7d28712385a8488a98 | 3,654,215 |
def good_AP_finder(time,voltage):
"""
This function takes the following input:
time - vector where each element is a time in seconds
voltage - vector where each element is a voltage at a different time
We are assuming that the two vectors are in correspondance (meaning
that at a given index, the time in one corresponds to the voltage in
the other). The vectors must be the same size or the code
won't run
This function returns the following output:
APTimes - all the times where a spike (action potential) was detected
"""
APTimes = []
#Let's make sure the input looks at least reasonable
if (len(voltage) != len(time)):
print "Can't run - the vectors aren't the same length!"
return APTimes
##Your Code Here!
treshold = 0.5 * np.max(voltage)
times_of_APs = time[voltage > treshold]
APTimes =times_of_APs[np.diff(times_of_APs) > 0.0015]
return APTimes | c13897b7bf5335cae20f65db853e7a214ec570c5 | 3,654,216 |
from typing import Dict
from typing import Any
import tqdm
def parse(excel_sheets: Dict[Any, pd.DataFrame],
dictionary: Dict[str, Any],
verbose: bool = False) -> pd.DataFrame:
"""Parse sheets of an excel file according to instructions in `dictionary`.
"""
redux_dict = recursive_traverse(dictionary)
column_tuples = redux_dict.keys()
tuple_lengths = [len(tuple) for tuple in column_tuples]
if len(set(tuple_lengths)) > 1:
raise ValueError("Depth of provided JSON file is inconsistent. All "
"entries must be located at the same depth.")
multi_index = pd.MultiIndex.from_tuples(tuples=column_tuples)
data_frame = pd.DataFrame(columns=multi_index)
if verbose:
sheets = tqdm(
excel_sheets.items(),
desc="Looping through sheets",
ncols=100
)
else:
sheets = excel_sheets.items()
for sheet_name, sheet in sheets:
new_row = {}
for column, instr in redux_dict.items():
try:
raw = sheet.iloc[instr["row"], instr["col"]].values
except AttributeError:
raw = sheet.iloc[instr["row"], instr["col"]]
except ValueError:
raw = None
try:
func = map_with_dict(instr["choices"])
except KeyError:
func = FUNC_DICT[instr["func"]]
try:
new_row[column] = func(raw)
except:
new_row[column] = None
data_frame = data_frame.append(new_row, ignore_index=True)
return data_frame | 88028fef19eda993680e89c58954c04a215a2fdd | 3,654,217 |
def build_LAMP(prob,T,shrink,untied):
"""
Builds a LAMP network to infer x from prob.y_ = matmul(prob.A,x) + AWGN
return a list of layer info (name,xhat_,newvars)
name : description, e.g. 'LISTA T=1'
xhat_ : that which approximates x_ at some point in the algorithm
newvars : a tuple of layer-specific trainable variables
"""
eta,theta_init = shrinkage.get_shrinkage_function(shrink)
print('theta_init='+repr(theta_init))
layers=[]
A = prob.A
M,N = A.shape
B = A.T / (1.01 * la.norm(A,2)**2)
B_ = tf.Variable(B,dtype=tf.float32,name='B_0')
By_ = tf.matmul( B_ , prob.y_ )
layers.append( ('Linear',By_,None) )
if getattr(prob,'iid',True) == False:
# set up individual parameters for every coordinate
theta_init = theta_init*np.ones( (N,1),dtype=np.float32 )
theta_ = tf.Variable(theta_init,dtype=tf.float32,name='theta_0')
OneOverM = tf.constant(float(1)/M,dtype=tf.float32)
NOverM = tf.constant(float(N)/M,dtype=tf.float32)
rvar_ = tf.reduce_sum(tf.square(prob.y_),0) * OneOverM
(xhat_,dxdr_) = eta( By_,rvar_ , theta_ )
layers.append( ('LAMP-{0} T=1'.format(shrink),xhat_,(theta_,) ) )
vt_ = prob.y_
for t in range(1,T):
if len(dxdr_.get_shape())==2:
dxdr_ = tf.reduce_mean(dxdr_,axis=0)
bt_ = dxdr_ * NOverM
vt_ = prob.y_ - tf.matmul( prob.A_ , xhat_ ) + bt_ * vt_
rvar_ = tf.reduce_sum(tf.square(vt_),0) * OneOverM
theta_ = tf.Variable(theta_init,name='theta_'+str(t))
if untied:
B_ = tf.Variable(B,dtype=tf.float32,name='B_'+str(t))
rhat_ = xhat_ + tf.matmul(B_,vt_)
layers.append( ('LAMP-{0} linear T={1}'.format(shrink,t+1),rhat_ ,(B_,) ) )
else:
rhat_ = xhat_ + tf.matmul(B_,vt_)
(xhat_,dxdr_) = eta( rhat_ ,rvar_ , theta_ )
layers.append( ('LAMP-{0} non-linear T={1}'.format(shrink,t+1),xhat_,(theta_,) ) )
return layers | 392050992846aeb1a16e70fe6e43c386e11915e5 | 3,654,218 |
def coeffVar(X, precision=3):
"""
Coefficient of variation of the given data (population)
Argument:
X: data points, a list of int, do not mix negative and positive numbers
precision (optional): digits precision after the comma, default=3
Returns:
float, the cv (measure of dispersion) of the input sample
or raise StatsError('mean is zero') if the mean = 0
"""
try:
return round(stdDev(X, precision) / mean(X, precision), precision)
except ZeroDivisionError:
raise StatsError('mean is zero') | e92505e79c4d10a5d56ec35cd2b543872f6be59c | 3,654,219 |
def tostring(node):
"""
Generates a string representation of the tree, in a format determined by the user.
@ In, node, InputNode or InputTree, item to turn into a string
@ Out, tostring, string, full tree in string form
"""
if isinstance(node,InputNode) or isinstance(node,InputTree):
return node.printXML()
else:
raise NotImplementedError('TreeStructure.tostring received "'+str(node)+'" but was expecting InputNode or InputTree.') | 8e6cab92b898bd99b5c738b26fc9f8d79aef0750 | 3,654,220 |
from typing import Dict
import random
def pick_char_from_dict(char: str, dictionary: Dict[str, str]) -> str:
"""
Picks a random format for the givin letter in the dictionary
"""
return random.choice(dictionary[char]) | c593166ef7cb8c960b8c4be8fa0f8a20ec616f00 | 3,654,221 |
from typing import List
def bmeow_to_bilou(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the BILOU format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_bilou_tags) | 0081b7691a743fe3e28118cbb571708809fbd485 | 3,654,222 |
def site_sold_per_category(items):
"""For every category, a (site, count) pair with the number of items sold by the
site in that category.
"""
return [(site,
[(cat, total_sold(cat_items)) for cat, cat_items in
categories])
for site, categories in
category_items_per_site(items).iteritems()] | 7b224f3e0a786aef497fad99359e525896eb8441 | 3,654,223 |
from typing import List
import ctypes
def swig_py_object_2_list_int(object, size : int) -> List[int]:
"""
Converts SwigPyObject to List[float]
"""
y = (ctypes.c_float * size).from_address(int(object))
new_object = []
for i in range(size):
new_object += [int(y[i])]
return new_object | 064a9a1e43884a9f989bec0b31d6d19705764b64 | 3,654,224 |
from typing import Tuple
from typing import Union
async def reactionFromRaw(payload: RawReactionActionEvent) -> Tuple[Message, Union[User, Member], emojis.BasedEmoji]:
"""Retrieve complete Reaction and user info from a RawReactionActionEvent payload.
:param RawReactionActionEvent payload: Payload describing the reaction action
:return: The message whose reactions changed, the user who completed the action, and the emoji that changed.
:rtype: Tuple[Message, Union[User, Member], BasedEmoji]
"""
emoji = None
user = None
message = None
if payload.member is None:
# Get the channel containing the reacted message
if payload.guild_id is None:
channel = botState.client.get_channel(payload.channel_id)
else:
guild = botState.client.get_guild(payload.guild_id)
if guild is None:
return None, None, None
channel = guild.get_channel(payload.channel_id)
# Individual handling for each channel type for efficiency
if isinstance(channel, DMChannel):
if channel.recipient.id == payload.user_id:
user = channel.recipient
else:
user = channel.me
elif isinstance(channel, GroupChannel):
# Group channels should be small and far between, so iteration is fine here.
for currentUser in channel.recipients:
if currentUser.id == payload.user_id:
user = currentUser
if user is None:
user = channel.me
# Guild text channels
elif isinstance(channel, TextChannel):
user = channel.guild.get_member(payload.user_id)
else:
return None, None, None
# Fetch the reacted message (api call)
message = await channel.fetch_message(payload.message_id)
# If a reacting member was given, the guild can be inferred from the member.
else:
user = payload.member
message = await payload.member.guild.get_channel(payload.channel_id).fetch_message(payload.message_id)
if message is None:
return None, None, None
# Convert reacted emoji to BasedEmoji
try:
emoji = emojis.BasedEmoji.fromPartial(payload.emoji, rejectInvalid=True)
except exceptions.UnrecognisedCustomEmoji:
return None, None, None
return message, user, emoji | 36ae16e2b1ffb3df1d5c68ae903b95556446138f | 3,654,226 |
import array
def poisson2d(N,dtype='d',format=None):
"""
Return a sparse matrix for the 2d poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray( [[4]],dtype=dtype)
return dia_matrix((diags,[0]), shape=(1,1)).asformat(format)
offsets = array([0,-N,N,-1,1])
diags = empty((5,N**2),dtype=dtype)
diags[0] = 4 #main diagonal
diags[1:] = -1 #all offdiagonals
diags[3,N-1::N] = 0 #first lower diagonal
diags[4,N::N] = 0 #first upper diagonal
return dia_matrix((diags,offsets),shape=(N**2,N**2)).asformat(format) | 089088f468e84dce865bbb26707714617e16f3f6 | 3,654,227 |
import random
def get_factory():
"""้ๆบ่ทๅไธไธชๅทฅๅ็ฑป"""
return random.choice([BasicCourseFactory, ProjectCourseFactory])() | c71401a2092618701966e5214f85c67a6520b1c9 | 3,654,228 |
def delay_class_factory(motor_class):
"""
Create a subclass of DelayBase that controls a motor of class motor_class.
Used in delay_instace_factory (DelayMotor), may be useful for one-line
declarations inside ophyd Devices.
"""
try:
cls = delay_classes[motor_class]
except KeyError:
cls = type(
'Delay' + motor_class.__name__,
(DelayBase,),
{'motor': Cpt(motor_class, '')}
)
delay_classes[motor_class] = cls
return cls | 264d68f7d3db164c5c133e68f943b789db52fc8b | 3,654,229 |
def lonlat2px_gt(img, lon, lat, lon_min, lat_min, lon_max, lat_max):
"""
Converts a pair of lon and lat to its corresponding pixel value in an
geotiff image file.
Parameters
----------
img : Image File, e.g. PNG, TIFF
Input image file
lon : float
Longitude
lat : float
Latitude
lon_min, lat_min : float
lower left coordinate of geotiff
lon_max, lat_max : float
upper right coordinate of geotiff
Returns
-------
Row : float
corresponding pixel value
Col : float
corresponding pixel value
"""
w, h = img.size
londiff = lon_max - lon_min
latdiff = lat_max - lat_min
mw = w / londiff
mh = h / latdiff
row = (-lat + lat_max) * mh
col = (lon - lon_min) * mw
return row, col | 39c1aeb63d38fdac383c510913f50f177d274a04 | 3,654,232 |
import scipy
def array_wishart_rvs(df, scale, **kwargs):
""" Wrapper around scipy.stats.wishart to always return a np.array """
if np.size(scale) == 1:
return np.array([[
scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs()
]])
else:
return scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs() | d14b26d8f1b05de1ac961499d96c604028fca379 | 3,654,234 |
async def async_setup_entry(hass, entry):
"""Set up Jenkins from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True | c46912d11630c36effc07eed3273e42325c9b2b8 | 3,654,236 |
import math
def signal_to_dataset(raw, fsamp, intvs, labels):
"""Segmentize raw data into list of epochs.
returns dataset and label_array : a list of data, each block is 1
second, with fixed size. width is number of channels in certain standard
order.
Args:
raw: EEG signals. Shape: (n_channel, n_sample).
fsamp(int): sampling rate, i.e., window size of resulting epoch. Unit: Hz
intvs: list of [start, end]. Unit: second
labels: list of labels. Must be same len as INTVS
Returns: tuple (dataset, labels):
- dataset: list of data; (n_epochs, n_channels, n_sample_per_epoch)
- labels: list of labels
"""
ds, lbl = [], []
for i, inv in enumerate(intvs):
tstart, tend = inv
chopped_sig = chop_signal(
[ch[math.ceil(tstart*fsamp):math.floor(tend*fsamp)] for ch in raw],
fsamp)
ds.extend(chopped_sig)
lbl.extend([labels[i]] * len(chopped_sig))
return ds, lbl | 340bbb91bd6a36d1d3a20d0689e25c29e5b879c5 | 3,654,237 |
def project_dynamic_property_graph(graph, v_prop, e_prop, v_prop_type, e_prop_type):
"""Create project graph operation for nx graph.
Args:
graph (:class:`nx.Graph`): A nx graph.
v_prop (str): The node attribute key to project.
e_prop (str): The edge attribute key to project.
v_prop_type (str): Type of the node attribute.
e_prop_type (str): Type of the edge attribute.
Returns:
Operation to project a dynamic property graph. Results in a simple graph.
"""
check_argument(graph.graph_type == types_pb2.DYNAMIC_PROPERTY)
config = {
types_pb2.GRAPH_NAME: utils.s_to_attr(graph.key),
types_pb2.GRAPH_TYPE: utils.graph_type_to_attr(types_pb2.DYNAMIC_PROJECTED),
types_pb2.V_PROP_KEY: utils.s_to_attr(v_prop),
types_pb2.E_PROP_KEY: utils.s_to_attr(e_prop),
types_pb2.V_DATA_TYPE: utils.s_to_attr(utils.data_type_to_cpp(v_prop_type)),
types_pb2.E_DATA_TYPE: utils.s_to_attr(utils.data_type_to_cpp(e_prop_type)),
}
op = Operation(
graph._session_id,
types_pb2.PROJECT_GRAPH,
config=config,
output_types=types_pb2.GRAPH,
)
return op | 6e54180a4ef257c50a02104cc3a4cbbae107d233 | 3,654,238 |
def eqfm_(a, b):
"""Helper for comparing floats AND style names."""
n1, v1 = a
n2, v2 = b
if type(v1) is not float:
return eq_(a, b)
eqf_(v1, v2)
eq_(n1, n2) | 1ee53203baa6c8772a4baf240f68bb5898a5d516 | 3,654,239 |
def flatten_comment(seq):
"""Flatten a sequence of comment tokens to a human-readable string."""
# "[CommentToken(value='# Extra settings placed in ``[app:main]`` section in generated production.ini.\\n'), CommentToken(value='# Example:\\n'), CommentToken(value='#\\n'), CommentToken(value='# extra_ini_settings: |\\n'), CommentToken(value='# mail.host = mymailserver.internal\\n'), CommentToken(value='# websauna.superusers =\\n'), CommentToken(value='# [email protected]\\n'), CommentToken(value='#\\n')]
if not seq:
return ""
result = []
for item in seq:
if not item:
continue
if isinstance(item, CommentToken):
# Mangle away # comment start from the line
s = item.value
s = s.strip(" ")
s = s.lstrip("#")
s = s.rstrip("\n")
if s.startswith(" "):
s = s[1:]
result.append(s)
if result:
raw_comment = "\n".join(result)
else:
return ""
section_header = raw_comment.rfind("---")
if section_header >= 0:
raw_comment = raw_comment[section_header + 3:]
return raw_comment | 56104eb6e0109b6c677964cd1873244ff05f27fc | 3,654,240 |
def get_community(community_id):
"""
Verify that a community with a given id exists.
:param community_id: id of test community
:return: Community instance
:return: 404 error if doesn't exist
"""
try:
return Community.objects.get(pk=community_id)
except Community.DoesNotExist:
return | 33d16db86c53b7dd68dec8fe80639b560e41f457 | 3,654,241 |
import csv
import pprint
def load_labeled_info(csv4megan_excell, audio_dataset, ignore_files=None):
"""Read labeled info from spreat sheet
and remove samples with no audio file, also files given in ignore_files
"""
if ignore_files is None:
ignore_files = set()
with open(csv4megan_excell) as csvfile:
reader = csv.DictReader(csvfile)
reader = list(reader)
reader_strip = []
for row in reader:
row = {r: row[r].strip() for r in row}
reader_strip.append(row)
reader = reader_strip.copy()
missing_audio_files = []
for row in reader:
if audio_dataset.get(row['File Name'], None) is None:
missing_audio_files.append(row['File Name'])
missing_audio_files = set(missing_audio_files)
print((f'{len(missing_audio_files)} files are missing' +
' corresponding to excell entries'))
megan_data_sheet = []
for row in reader:
if row['File Name'] not in ignore_files:
if row['File Name'] not in missing_audio_files:
megan_data_sheet.append(row)
deleted_files = set()
deleted_files.update(ignore_files)
deleted_files.update(missing_audio_files)
pprint((f'-> {len(deleted_files)} number of samples are DELETED due to ' +
'ignore_files and missing_audio_files'))
return megan_data_sheet, list(deleted_files) | f196b02c8667ebe5e8d2d89a79be78c6eb838afe | 3,654,242 |
def de_dupe_list(input):
"""de-dupe a list, preserving order.
"""
sam_fh = []
for x in input:
if x not in sam_fh:
sam_fh.append(x)
return sam_fh | bbf1936f21c19195369e41b635bf0f99704b3210 | 3,654,243 |
def donwload_l10ns():
"""Download all l10ns in zip archive."""
url = API_PREFIX + 'download/' + FILENAME + KEY_SUFFIX
l10ns_file = urllib2.urlopen(url)
with open('all.zip','wb') as f:
f.write(l10ns_file.read())
return True | 26770dfc8f32947c1a32a287f811e95ffe314822 | 3,654,244 |
def _constant_velocity_heading_from_kinematics(kinematics_data: KinematicsData,
sec_from_now: float,
sampled_at: int) -> np.ndarray:
"""
Computes a constant velocity baseline for given kinematics data, time window
and frequency.
:param kinematics_data: KinematicsData for agent.
:param sec_from_now: How many future seconds to use.
:param sampled_at: Number of predictions to make per second.
"""
x, y, vx, vy, _, _, _, _, _, _ = kinematics_data
preds = []
time_step = 1.0 / sampled_at
for time in np.arange(time_step, sec_from_now + time_step, time_step):
preds.append((x + time * vx, y + time * vy))
return np.array(preds) | 2b6781ceb9e012486d3063b8f3cff29164ff8743 | 3,654,245 |
def arg_int(name, default=None):
""" Fetch a query argument, as an integer. """
try:
v = request.args.get(name)
return int(v)
except (ValueError, TypeError):
return default | 110088655bc81363e552f31d9bbd8f4fa45abd1b | 3,654,246 |
def adapter_rest(request, api_module_rest, api_client_rest):
"""Pass."""
return {
"adapter": request.param,
"api_module": api_module_rest,
"api_client": api_client_rest,
} | 8b96313cb190f6f8a97a853e24a5fcfade291d76 | 3,654,248 |
def remove_quotes(string):
"""Function to remove quotation marks surrounding a string"""
string = string.strip()
while len(string) >= 3 and string.startswith('\'') and string.endswith('\''):
string = string[1:-1]
string = quick_clean(string)
string = quick_clean(string)
return string | c6585c054abaef7248d30c1814fb13b6b9d01852 | 3,654,250 |
def compute_list_featuretypes(
data,
list_featuretypes,
fourier_n_largest_frequencies,
wavelet_depth,
mother_wavelet,
):
"""
This function lets the user choose which combination of features they
want to have computed.
list_featuretypes:
"Basic" - min, max, mean, kurt ,skew, std, sum.
"FourierComplete" - all frequencies amplitudes and phases.
"FourierNLargest" - n largest frequencies and their values.
"WaveletComplete" - all approximation and details coefficients at each depth.
"WaveletBasic" - takes "Basic" (min, max, etc) at each depth.
Args:
data (pd.DataFrame()) : one column from which to make features.
list_featuretypes (list) : list of feature types to be computed.
fourier_n_largest_frequencies (int) : amount of fourier features.
wavelet_depth (int) : level of depth up to which the wavelet is computed.
mother_wavelet (str) : type of wavelet used for the analysis.
Returns:
features (pd.DataFrame()) : row of features.
"""
if type(list_featuretypes) != list:
raise AttributeError("'list_featuretypes' must be a list.")
allowed_components = ["Basic", "FourierNLargest", "WaveletComplete", "WaveletBasic", "FourierComplete"]
for argument in list_featuretypes:
if argument not in allowed_components:
raise ValueError(f"argument must be one of {allowed_components}")
features_basic = pd.DataFrame()
features_fourier = pd.DataFrame()
features_wavelet = pd.DataFrame()
features_wavelet_basic = pd.DataFrame()
features_fft2 = pd.DataFrame()
if "Basic" in list_featuretypes:
features_basic = compute_basic(data)
if "FourierNLargest" in list_featuretypes:
features_fourier = compute_fourier_n_largest(data, fourier_n_largest_frequencies)
if "FourierComplete" in list_featuretypes:
features_fft2 = compute_fourier_complete(data)
if "WaveletComplete" in list_featuretypes:
features_wavelet = compute_wavelet_complete(data, wavelet_depth, mother_wavelet)
if "WaveletBasic" in list_featuretypes:
features_wavelet_basic = compute_wavelet_basic(
data, wavelet_depth, mother_wavelet
)
features = pd.concat(
[features_basic, features_fourier, features_fft2, features_wavelet, features_wavelet_basic],
axis=1,
)
return features | f1c8fea04a01f6b7a3932434e27aba7ea2e17948 | 3,654,251 |
def select(locator):
"""
Returns an :class:`Expression` for finding selects matching the given locator.
The query will match selects that meet at least one of the following criteria:
* the element ``id`` exactly matches the locator
* the element ``name`` exactly matches the locator
* the element ``id`` exactly matches the ``for`` attribute of a corresponding ``label`` element
whose text matches the locator
* the element is nested within a ``label`` element whose text matches the locator
Args:
locator (str): A string that identifies the desired selects.
Returns:
Expression: An :class:`Expression` object matching the desired selects.
"""
field_expr = x.descendant("select")
return _locate_field(field_expr, locator) | a3cd093a62d6c926fd9f782cdec35eadc34eba67 | 3,654,252 |
def send_image(filename):
"""Route to uploaded-by-client images
Returns
-------
file
Image file on the server (see Flask documentation)
"""
return send_from_directory(app.config['UPLOAD_FOLDER'], filename) | 68b99ca59d6d4b443a77560d3eb1913422407764 | 3,654,253 |
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
with get_cursor() as cursor:
cursor.execute("SELECT * FROM pairings")
pairings = cursor.fetchall()
return pairings | f83a8a108f2d926c948999014f0dbb79a3b1c428 | 3,654,254 |
import torch
def split(data, batch):
"""
PyG util code to create graph batches
"""
node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
node_slice = torch.cat([torch.tensor([0]), node_slice])
row, _ = data.edge_index
edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
edge_slice = torch.cat([torch.tensor([0]), edge_slice])
# Edge indices should start at zero for every graph.
data.edge_index -= node_slice[batch[row]].unsqueeze(0)
data.__num_nodes__ = torch.bincount(batch).tolist()
slices = {'edge_index': edge_slice}
if data.x is not None:
slices['x'] = node_slice
if data.edge_attr is not None:
slices['edge_attr'] = edge_slice
if data.y is not None:
if data.y.size(0) == batch.size(0):
slices['y'] = node_slice
else:
slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)
return data, slices | 69af8b969d7f0da28a1f7fda951f64974c238da0 | 3,654,255 |
def _get_shadowprice_data(scenario_id):
"""Gets data necessary for plotting shadow price
:param str/int scenario_id: scenario id
:return: (*tuple*) -- interconnect as a str, bus data as a data frame, lmp data
as a data frame, branch data as a data frame and congestion data as a data
frame
"""
s = Scenario(scenario_id)
interconnect = s.info["interconnect"]
interconnect = " ".join(interconnect.split("_"))
s_grid = s.state.get_grid()
# Get bus and add location data
bus_map = project_bus(s_grid.bus)
# get branch and add location data
branch_map = project_branch(s_grid.branch)
# get congestion
congu = s.state.get_congu()
congl = s.state.get_congl()
cong_abs = pd.DataFrame(
np.maximum(congu.to_numpy(), congl.to_numpy()),
columns=congu.columns,
index=congu.index,
)
return interconnect, bus_map, s.state.get_lmp(), branch_map, cong_abs | 57488b7ff6984cc292dce3bf76d18d0b2585b7ff | 3,654,256 |
import json
def get_city_reviews(city):
"""
Given a city name, return the data for all reviews.
Returns a pandas DataFrame.
"""
with open(f"{DATA_DIR}/{city}/review.json", "r") as f:
review_list = []
for line in f:
review = json.loads(line)
review_list.append(review)
# convert to pandas DataFrame
reviews = to_pandas([city], {city: review_list})
# optimize memory usage
reviews = optimize(reviews, {'city': 'category'})
return reviews | e0723ab90dafc53059677928fb553cf197abecc1 | 3,654,257 |
def extract_rows_from_table(dataset, col_names, fill_null=False):
""" Extract rows from DB table.
:param dataset:
:param col_names:
:return:
"""
trans_dataset = transpose_list(dataset)
rows = []
if type(col_names).__name__ == 'str':
col_names = [col_names]
for col_name in col_names:
if col_name in dataset[0]:
idx = dataset[0].index(col_name)
rows.append(trans_dataset[idx])
else:
if fill_null:
null_list = [''] * (len(trans_dataset[0])-1)
null_list = [col_name] + null_list
rows.append(null_list)
else:
pass
if len(col_names) == 1:
return rows[0]
else:
return transpose_list(rows) | 91371215f38a88b93d08c467303ccbd45f57b369 | 3,654,258 |
def CalculateHydrogenNumber(mol):
"""
#################################################################
Calculation of Number of Hydrogen in a molecule
---->nhyd
Usage:
result=CalculateHydrogenNumber(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
i = 0
Hmol = Chem.AddHs(mol)
for atom in Hmol.GetAtoms():
if atom.GetAtomicNum() == 1:
i = i + 1
return i | 0b9fbad14c8e9f46beab5208ab0f929fef1ab263 | 3,654,259 |
def check_update ():
"""Return the following values:
(False, errmsg) - online version could not be determined
(True, None) - user has newest version
(True, (version, url string)) - update available
(True, (version, None)) - current version is newer than online version
"""
version, value = get_online_version()
if version is None:
# value is an error message
return False, value
if version == CurrentVersion:
# user has newest version
return True, None
if is_newer_version(version):
# value is an URL linking to the update package
return True, (version, value)
# user is running a local or development version
return True, (version, None) | 8bba3e7fbe11ce6c242f965450628dc94b6c2c0b | 3,654,260 |
import torch
def count_regularization_baos_for_both(z, count_tokens, count_pieces, mask=None):
"""
Compute regularization loss, based on a given rationale sequence
Use Yujia's formulation
Inputs:
z -- torch variable, "binary" rationale, (batch_size, sequence_length)
percentage -- the percentage of words to keep
Outputs:
a loss value that contains two parts:
continuity_loss -- \sum_{i} | z_{i-1} - z_{i} |
sparsity_loss -- |mean(z_{i}) - percent|
"""
# (batch_size,)
if mask is not None:
mask_z = z * mask
seq_lengths = torch.sum(mask, dim=1)
else:
mask_z = z
seq_lengths = torch.sum(z - z + 1.0, dim=1)
mask_z_ = torch.cat([mask_z[:, 1:], mask_z[:, -1:]], dim=-1)
continuity_ratio = torch.sum(torch.abs(mask_z - mask_z_), dim=-1) / seq_lengths #(batch_size,)
percentage = count_pieces * 2 / seq_lengths
# continuity_loss = F.threshold(continuity_ratio - percentage, 0, 0, False)
continuity_loss = torch.abs(continuity_ratio - percentage)
sparsity_ratio = torch.sum(mask_z, dim=-1) / seq_lengths #(batch_size,)
percentage = count_tokens / seq_lengths #(batch_size,)
# sparsity_loss = F.threshold(sparsity_ratio - percentage, 0, 0, False)
sparsity_loss = torch.abs(sparsity_ratio - percentage)
return continuity_loss, sparsity_loss | 7925c8621866a20f0c6130cd925afffe144e1c7c | 3,654,261 |
def unsqueeze_samples(x, n):
"""
"""
bn, d = x.shape
x = x.reshape(bn//n, n, d)
return x | 0c7b95e97df07aea72e9c87996782081763664cf | 3,654,262 |
def f_snr(seq):
"""compute signal to noise rate of a seq
Args:
seq: input array_like sequence
paras: paras array, in this case should be "axis"
"""
seq = np.array(seq, dtype=np.float64)
result = np.mean(seq)/float(np.std(seq))
if np.isinf(result):
print "marker"
result = 0
return result | b018b5e4c249cfafcc3ce8b485c917bfcdd19ce2 | 3,654,263 |
def _lorentzian_pink_beam(p, x):
"""
@author Saransh Singh, Lawrence Livermore National Lab
@date 03/22/2021 SS 1.0 original
@details the lorentzian component of the pink beam peak profile
obtained by convolution of gaussian with normalized back to back
exponentials. more details can be found in
Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3โ6
p has the following parameters
p = [A,x0,alpha0,alpha1,beta0,beta1,fwhm_l]
"""
A,x0,alpha,beta,fwhm_l = p
del_tth = x - x0
p = -alpha*del_tth + 1j*0.5*alpha*fwhm_l
q = -beta*del_tth + 1j*0.5*beta*fwhm_l
y = np.zeros(x.shape)
f1 = exp1exp(p)
f2 = exp1exp(q)
y = -(alpha*beta)/(np.pi*(alpha+beta))*(f1+f2).imag
mask = np.isnan(y)
y[mask] = 0.
y *= A
return y | 7de93743da63ab816133e771075a8e8f0386ad35 | 3,654,264 |
def get_q_HPU_ave(Q_HPU):
"""1ๆ้ๅนณๅใฎใใผใใใณใใฆใใใใฎๅนณๅๆๆฟๅบๅ (7)
Args:
Q_HPU(ndarray): 1ๆ้ๅฝใใใฎใใผใใใณใใฆใใใใฎๆๆฟๅบๅ (MJ/h)
Returns:
ndarray: 1ๆ้ๅนณๅใฎใใผใใใณใใฆใใใใฎๅนณๅๆๆฟๅบๅ (7)
"""
return Q_HPU * 10 ** 6 / 3600 | fdf339d7f8524f69409711d4daefd1e2aaccbc76 | 3,654,265 |
def particles(t1cat):
"""Return a list of the particles in a T1 catalog DataFrame.
Use it to find the individual particles involved in a group of events."""
return particles_fromlist(t1cat.particles.tolist()) | 38f9a077b7bab55b76a19f467f596ddb28e40c60 | 3,654,266 |
def prime_list(num):
"""
This function returns a list of prime numbers less than natural number entered.
:param num: natural number
:return result: List of primes less than natural number entered
"""
prime_table = [True for _ in range(num+1)]
i = 2
while i ** 2 <= num:
if prime_table[i]:
j = i + i
while j <= num:
prime_table[j] = False
j += i
i += 1
result = [i for i in range(num) if prime_table[i] and i >= 2]
return result | c8e05aae2a59c229cfafb997469dd8ccacdda0fc | 3,654,268 |
import time
def check_deadline_exceeded_and_store_partial_minimized_testcase(
deadline, testcase_id, job_type, input_directory, file_list,
file_to_run_data, main_file_path):
"""Store the partially minimized test and check the deadline."""
testcase = data_handler.get_testcase_by_id(testcase_id)
store_minimized_testcase(testcase, input_directory, file_list,
file_to_run_data, main_file_path)
deadline_exceeded = time.time() > deadline
if deadline_exceeded:
attempts = testcase.get_metadata(
'minimization_deadline_exceeded_attempts', default=0)
if attempts >= MAX_DEADLINE_EXCEEDED_ATTEMPTS:
_skip_minimization(testcase,
'Exceeded minimization deadline too many times.')
else:
testcase.set_metadata('minimization_deadline_exceeded_attempts',
attempts + 1)
tasks.add_task('minimize', testcase_id, job_type)
return deadline_exceeded | 443c09a8b5bcd8141f721b8ea90348879bc3b8c5 | 3,654,269 |
import ipaddress
def _item_to_python_repr(item, definitions):
"""Converts the given Capirca item into a typed Python object."""
# Capirca comments are just appended to item strings
s = item.split("#")[0].strip()
# A reference to another network
if s in definitions.networks:
return s
# IPv4 address / network
try:
return ipaddress.IPv4Address(s)
except ValueError:
pass
try:
return ipaddress.IPv4Network(s, strict=False)
except ValueError:
pass
# IPv6 address / network
try:
return ipaddress.IPv6Address(s)
except ValueError:
pass
try:
return ipaddress.IPv6Network(s, strict=False)
except ValueError:
pass
raise ValueError("Unknown how to convert {s}".format(s=s)) | 9881e304e923eb2cea8223224273f4c9ef81696b | 3,654,270 |
import numpy
def floor_divide(x1, x2, out=None, where=True, **kwargs):
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Args:
x1 (numpoly.ndpoly):
Numerator.
x2 (numpoly.ndpoly):
Denominator. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which becomes the shape of the
output).
out (Optional[numpy.ndarray]):
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where (Optional[numpy.ndarray]):
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
(numpoly.ndpoly):
This is a scalar if both `x1` and `x2` are scalars.
Examples:
>>> xyz = [1, 2, 4]*numpoly.symbols("x y z")
>>> numpoly.floor_divide(xyz, 2.)
polynomial([0.0, y, 2.0*z])
>>> numpoly.floor_divide(xyz, [1, 2, 4])
polynomial([x, y, z])
>>> numpoly.floor_divide([1, 2, 4], xyz)
Traceback (most recent call last):
...
ValueError: only constant polynomials can be converted to array.
"""
x1, x2 = numpoly.align_polynomials(x1, x2)
x2 = x2.tonumpy()
no_output = out is None
if no_output:
out = numpoly.ndpoly(
exponents=x1.exponents,
shape=x1.shape,
names=x1.indeterminants,
dtype=numpy.common_type(x1, numpy.array(1.)),
)
for key in x1.keys:
numpy.floor_divide(x1[key], x2, out=out[key], where=where, **kwargs)
if no_output:
out = numpoly.clean_attributes(out)
return out | 9269d088c0893b9b6b4c3b27e8dc83c4493ac2c9 | 3,654,271 |
from typing import Callable
import click
def node_args_argument(command: Callable[..., None]) -> Callable[..., None]:
"""
Decorate a function to allow choosing arguments to run on a node.
"""
function = click.argument(
'node_args',
type=str,
nargs=-1,
required=True,
)(command) # type: Callable[..., None]
return function | 89365a41b7665cf291f5c15852db81e89aeef9a7 | 3,654,272 |
import functools
import unittest
def _tag_error(func):
"""Decorates a unittest test function to add failure information to the TestCase."""
@functools.wraps(func)
def decorator(self, *args, **kwargs):
"""Add failure information to `self` when `func` raises an exception."""
self.test_failed = False
try:
func(self, *args, **kwargs)
except unittest.SkipTest:
raise
except Exception: # pylint: disable=broad-except
self.test_failed = True
raise # re-raise the error with the original traceback.
return decorator | a2818c63647410abea3fde0b7f4fdae667b558bf | 3,654,273 |
from datetime import datetime
def get_submission_praw(n, sub, n_num):
"""
Returns a list of results for submission in past:
1st list: current result from n hours ago until now
2nd list: prev result from 2n hours ago until n hours ago
"""
mid_interval = datetime.today() - timedelta(hours=n)
timestamp_mid = int(mid_interval.timestamp())
timestamp_start = int((mid_interval - timedelta(hours=n)).timestamp())
timestamp_end = int(datetime.today().timestamp())
recent = {}
prev = {}
subreddit = reddit.subreddit(sub)
all_results = []
for post in subreddit.new(limit=n_num):
all_results.append([post.title, post.link_flair_text, post.selftext, post.score, post.num_comments,
post.created_utc])
# start --> mid --> end
recent[sub] = [posts for posts in all_results if timestamp_mid <= posts[5] <= timestamp_end]
prev[sub] = [posts for posts in all_results if timestamp_start <= posts[5] < timestamp_mid]
return recent, prev | 692af49736fac07a2de51d1cd0c4abcfe7bb8ee3 | 3,654,275 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.