content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
def get_seconds_from_duration(time_str: str) -> int:
"""
This function will convert the TM1 time to seconds
:param time_str: P0DT00H01M43S
:return: int
"""
pattern = re.compile('\w(\d+)\w\w(\d+)\w(\d+)\w(\d+)\w')
matches = pattern.search(time_str)
d, h, m, s = matches.groups()
seconds = (int(d) * 86400) + (int(h) * 3600) + (int(m) * 60) + int(s)
return seconds | a8614c0ed6e41c7216ae461ef1fd57319a5995e1 | 3,649,280 |
def get_protecteds(object: Object) -> Dictionary:
"""Gets the protected namespaces of an object."""
return object.__protecteds__ | 479f6ee0a9334107f67517fd6bb2ad55a915d0ac | 3,649,281 |
def pah2area(_position, angle, height, shape):
"""Calculates area from position, angle, height depending on shape."""
if shape == "PseudoVoigt":
fwhm = np.tan(angle) * height
area = (height * (fwhm * np.sqrt(np.pi / ln2))
/ (1 + np.sqrt(1 / (np.pi * ln2))))
return area
elif shape == "DoniachSunjic":
fwhm = np.tan(angle) * height
area = height / pure_ds(0, amplitude=1, center=0, fwhm=fwhm, asym=0.5)
return area
elif shape == "Voigt":
fwhm = np.tan(angle) * height
area = height / voigt(0, amplitude=1, center=0, fwhm=fwhm, fwhm_l=0.5)
return area
raise NotImplementedError | 79e239de4ee8b356152717f7a9a301f062cc6c71 | 3,649,282 |
def config(key, values, axis=None):
"""Class decorator to parameterize the Chainer configuration.
This is a specialized form of `parameterize` decorator to parameterize
the Chainer configuration. For all `time_*` functions and `setup` function
in the class, this decorator wraps the function to be called inside the
context where specified Chainer configuration set.
This decorator adds parameter axis with the name of the configuration
by default. You can change the axis name by passing axis parameter.
You cannot apply `parameterize` decorator to the class already decorated
by this decorator. If you want to use `parameterize` along with this
decorator, make `parameterize` the most inner (i.e., the closest to the
class declaration) decorator.
Example of usage is as follows:
>>> @config('autotune', [True, False])
... class ConvolutionBenchmark(object):
... def time_benchmark(self):
... ...
"""
axis = key if axis is None else axis
def _wrap_class(klass):
assert isinstance(klass, type)
if not hasattr(chainer.config, key):
print(
'''Notice: Configuration '{}' unknown to this version of '''
'''Chainer'''.format(key))
return _inject_config(klass, axis, key, values)
return _wrap_class | a2ab11ca245647c6a5267b2f1c62a55b9aa1b96b | 3,649,284 |
def dot2states(dot):
"""Translate a dot-bracket string in a sequence of numerical states"""
dot = dot.replace(".", "0") # Unpaired
dot = dot.replace("(", "1") # Paired
dot = dot.replace(")", "1") # Paired
return np.array(list(dot), dtype=int) | 655b57749a39d04f62aae20ac16ffb1f31b0bc71 | 3,649,286 |
def mel_to_hz(mels, htk=False):
"""Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
"""
mels = np.asanyarray(mels)
if htk:
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if mels.ndim:
# If we have vector data, vectorize
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))
elif mels >= min_log_mel:
# If we have scalar data, check directly
freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel))
return freqs | 93e9d115d9ef0a58420c796737b96f4460f44ceb | 3,649,287 |
import numpy as np
def load_images(images):
"""
Decodes batch of image bytes and returns a 4-D numpy array.
"""
batch = []
for image in images:
img_np = readImage(image)
batch.append(img_np)
batch_images = np.concatenate(batch)
logger.info('batch_images.shape:%s'%(str(batch_images.shape)))
return batch_images | ae4f18488cdfa4980f849f2f7110f9963381e6e7 | 3,649,288 |
def stats_hook():
"""
decorator to register a stats hook.
:raises InvalidStatsHookTypeError: invalid stats hook type error.
:returns: stats hook class.
:rtype: type
"""
def decorator(cls):
"""
decorates the given class and registers an instance
of it into available stats hooks.
:param type cls: stats hook class.
:returns: stats hook class.
:rtype: type
"""
instance = cls()
stat_services.register_hook(instance)
return cls
return decorator | de386a9bff39c4060833f11100ec538b6d2b8d68 | 3,649,290 |
def safe_infer(node, context=None):
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred).
"""
try:
inferit = node.infer(context=context)
value = next(inferit)
except exceptions.InferenceError:
return None
try:
next(inferit)
return None # None if there is ambiguity on the inferred node
except exceptions.InferenceError:
return None # there is some kind of ambiguity
except StopIteration:
return value | 928c1d2e3c2813cc389085ea6bd3ccd50709effe | 3,649,291 |
import functools
def catch_exception(func):
"""
Returns:
object:
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
worker = kwargs['error_catcher']
try:
return func(*args, **kwargs)
except Exception as e:
print('stdout:', worker.stdout.read().decode("utf-8"))
print('stderr:', worker.stderr.read().decode("utf-8"))
raise
return wrapper | be579d9b6723e5025b7b70f38c83bcedc30196a5 | 3,649,292 |
def RandomCrop(parent, new_shape, name=""):
"""\
Crop an image layer at a random location with size ``[height, width]``.
:param parent: parent layer
:param new_shape: [height, width] size
:param name: name of the output layer
:return: CropRandom layer
"""
return _eddl.RandomCrop(parent, new_shape, name) | 6078cf9f6daf73876d3503a1e77523df079c41d1 | 3,649,293 |
import time
def get_linear_sys(eqns, params):
"""Gets the linear system corresponding to the symbolic equations
Note that this function only work for models where the left-hand side of
the equations all contain only linear terms with respect to the given model
parameters. For these linear cases, this function will return a matrix
:math:`\\mathbf{A}` and a vector :math:`\\mathbf{v}` such that the given
equations can be written as
.. math::
\\mathbf{A} \\mathbf{x} = \\mathbf{v}
with :math:`\\mathbf{x}` being the column vector of the values of the model
symbols. Normally the matrix will have more rows than columns for over-
determined fitting.
:param eqns: A sequence of ``Eqn`` objects for the equations of the
fitting.
:param params: A sequence of the ``ModelParam`` objects for the parameters
to be fitted.
:returns: The matrix :math:`\\mathbf{A}` and the vector
:math:`\\mathbf{v}`.
:rtype: tuple
:raises ValueError: if the system of equations are not linear.
"""
# We treat the equations one-by-one, write rows of the matrix and
# the vector one-by-one.
n_params = len(params)
n_eqns = len(eqns)
mat = np.zeros((n_eqns, n_params), dtype=np.float)
vec = np.empty((n_eqns, ), dtype=np.float)
# Extract the symbols for the parameters and assort the result into a
# dictionary for fast loop up of the location of the symbols.
symbs = {
param.symb: idx
for idx, param in enumerate(params)
}
print('\nForming the matrix and vectors for the linear model...')
start_time = time.process_time()
for idx, eqn in enumerate(eqns):
# First get the vector to the reference value of the equation.
vec[idx] = eqn.ref_val
# Get the symbolic expression.
expr = eqn.modelled_val.simplify().expand()
# Get its terms.
if isinstance(expr, Add):
terms = expr.args
else:
terms = [expr, ]
# Loop over the terms to get the coefficients ahead of the symbols.
for term in terms:
# Split the term into a symbol and a coefficient.
symb, coeff = _get_symb_w_coeff(term)
if symb is None:
# When we are treating a pure number term, we can move it to
# the left-hand side of the equation.
vec[idx] -= coeff
else:
# When we are going a symbol, we need to locate the symbol.
try:
col_idx = symbs[symb]
except KeyError:
raise ValueError(
'Unrecognised symbol {!r}'.format(symb)
)
else:
mat[idx, col_idx] += coeff
# Go on to the next term.
continue
# Go on to the next equation.
continue
print(
'Finished: {!s}sec.'.format(time.process_time() - start_time)
)
# Return the matrix and the vector.
return mat, vec | e3ef88c695bcbcd6e7ab1dfbe8ee45ad552e3be7 | 3,649,295 |
def slightly(membership: npt.ArrayLike) -> npt.ArrayLike:
"""
Applies the element-wise function fn(u) = u^(1/2).
:param membership: Membership function to be modified.
>>> from fuzzy_expert.operators import slightly
>>> slightly([0, 0.25, 0.5, 0.75, 1])
array([0. , 0.16326531, 0.99696182, 1. , 0. ])
"""
plus_membership: npt.ArrayLike = np.power(membership, 1.25)
not_very_membership: npt.ArrayLike = 1 - np.power(membership, 2)
membership: npt.ArrayLike = np.where(
membership < not_very_membership, plus_membership, not_very_membership
)
membership: npt.ArrayLike = membership / np.max(membership)
return np.where(membership <= 0.5, membership ** 2, 1 - 2 * (1 - membership) ** 2) | eb0e71462c5e3959584970e9e9b84a3dff876d54 | 3,649,296 |
def int_max(int_a, int_b):
"""
max(a, b)
"""
if int_a > int_b:
return int_a
else:
return int_b | 5ae0df8ff7bdc5539d127fad4df03b6215d9380f | 3,649,297 |
def extract_depth_map(frame):
"""
Extract front-view lidar camera projection for ground-truth depth maps
"""
(range_images, camera_projections, range_image_top_pose) = frame_utils.parse_range_image_and_camera_projection(frame)
for c in frame.context.camera_calibrations:
if dataset_pb2.CameraName.Name.Name(c.name) == 'FRONT':
extrinsic = np.reshape(
np.array(c.extrinsic.transform, np.float32), [4, 4])
range_images_cartesian = convert_range_image_to_cartesian(frame,range_images,range_image_top_pose)
cam_projection = (np.array(camera_projections[1][0].data).reshape(64,2650,6))[np.newaxis,...]
depth = range_image_utils.build_camera_depth_image(range_images_cartesian[1][np.newaxis,...],extrinsic[np.newaxis,...],cam_projection ,[1280,1920],1)
p = np.where(depth[0]!= 0)
v = np.extract(depth[0]!=0,depth[0])
grid_w,grid_h = np.mgrid[0:1280,0:1920]
depth_map = griddata(p, v, (grid_w, grid_h), method='nearest')
depth_map = depth_map
return depth_map[0:1280:4,0:1920:4] | 2568d8563e256bde6c5df5c3bb34038b57993a1b | 3,649,298 |
from .functions import express
def cross(vect1, vect2):
"""
Returns cross product of two vectors.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector.vector import cross
>>> R = CoordSys3D('R')
>>> v1 = R.i + R.j + R.k
>>> v2 = R.x * R.i + R.y * R.j + R.z * R.k
>>> cross(v1, v2)
(-R.y + R.z)*R.i + (R.x - R.z)*R.j + (-R.x + R.y)*R.k
"""
if isinstance(vect1, Add):
return VectorAdd.fromiter(cross(i, vect2) for i in vect1.args)
if isinstance(vect2, Add):
return VectorAdd.fromiter(cross(vect1, i) for i in vect2.args)
if isinstance(vect1, BaseVector) and isinstance(vect2, BaseVector):
if vect1._sys == vect2._sys:
n1 = vect1.args[0]
n2 = vect2.args[0]
if n1 == n2:
return Vector.zero
n3 = ({0,1,2}.difference({n1, n2})).pop()
sign = 1 if ((n1 + 1) % 3 == n2) else -1
return sign*vect1._sys.base_vectors()[n3]
try:
v = express(vect1, vect2._sys)
except ValueError:
return Cross(vect1, vect2)
else:
return cross(v, vect2)
if isinstance(vect1, VectorZero) or isinstance(vect2, VectorZero):
return Vector.zero
if isinstance(vect1, VectorMul):
v1, m1 = next(iter(vect1.components.items()))
return m1*cross(v1, vect2)
if isinstance(vect2, VectorMul):
v2, m2 = next(iter(vect2.components.items()))
return m2*cross(vect1, v2)
return Cross(vect1, vect2) | 8857f53a3db4066b2be6cd0fc3443b89a9c97022 | 3,649,299 |
def get_cognates(wordlist, ref):
"""
Retrieve cognate sets from a wordlist.
"""
etd = wordlist.get_etymdict(ref=ref)
cognates = {}
if ref == "cogids":
for cogid, idxs_ in etd.items():
idxs, count = {}, 0
for idx, language in zip(idxs_, wordlist.cols):
if idx:
tks = wordlist[idx[0], "tokens"]
cogidx = wordlist[idx[0], ref].index(cogid)
idxs[language] = " ".join([
x.split("/")[1] if "/" in x else x for x in
tks.n[cogidx]])
count += 1
else:
idxs[language] = ""
if count >= 2:
cognates[cogid] = idxs
elif ref == "cogid":
for cogid, idxs_ in etd.items():
idxs, count = {}, 0
for idx, language in zip(idxs_, wordlist.cols):
if idx:
tks = wordlist[idx[0], "tokens"]
idxs[language] = " ".join([x.split("/")[1] if "/" in x
else x for x in tks])
count += 1
else:
idxs[language] = ""
if count >= 2:
cognates[cogid] = idxs
return cognates | bf64ecb8f2182dba06f0b28b384c0e66ba78d49e | 3,649,300 |
def get_actress_string(_movie, s):
"""Return the string of the actress names as per the naming convention specified
Takes in the html contents to filter out the actress names"""
a_list = get_actress_from_html(_movie, s)
actress_string = ''
# if javlibrary returns no actresses then we'll just say whatever we specified
if len(a_list) == 0:
return s['name-for-actress-if-blank']
for actress in a_list:
actress_string += actress + s['delimiter-between-multiple-actresses']
# strip the last delimiter, we don't want it
actress_string = actress_string[0:-1]
return actress_string | bf9bd07bfc6c3e5bac87c52f4c6cba113a607b2d | 3,649,301 |
def get_lessons_of_day(day):
"""
Returns the lessons as a string for the given day webelement
:param day: day webelement
:return: dictionary with day as key and list with lessons as value
"""
day_lessons = []
to_iterate = day.find_elements_by_class_name('event-content')
to_iterate.reverse()
for lesson in to_iterate:
text = lesson.text
day_lessons.append(text)
return day_lessons | 47b3ba18fd530ac8e724eb91e4b4d2886a008ac5 | 3,649,302 |
def ising_hamiltonian(n_qubits, g, h):
""" Construct the hamiltonian matrix of Ising model.
Args:
n_qubits: int, Number of qubits
g: float, Transverse magnetic field
h: float, Longitudinal magnetic field
"""
ham_matrix = 0
# Nearest-neighbor interaction
spin_coupling = jnp.kron(PauliBasis[3], PauliBasis[3])
for i in range(n_qubits - 1):
ham_matrix -= jnp.kron(jnp.kron(jnp.eye(2 ** i), spin_coupling),
jnp.eye(2 ** (n_qubits - 2 - i)))
ham_matrix -= jnp.kron(jnp.kron(PauliBasis[3], jnp.eye(2 ** (n_qubits - 2))),
PauliBasis[3]) # Periodic B.C
# Transverse magnetic field
for i in range(n_qubits):
ham_matrix -= g * jnp.kron(jnp.kron(jnp.eye(2 ** i), PauliBasis[1]),
jnp.eye(2 ** (n_qubits - 1 - i)))
# Longitudinal magnetic field
for i in range(n_qubits):
ham_matrix -= h * jnp.kron(jnp.kron(jnp.eye(2 ** i), PauliBasis[3]),
jnp.eye(2 ** (n_qubits - 1 - i)))
return ham_matrix | f485ac686001c0d9790276f19f5ba79b6de8db9c | 3,649,305 |
import shutil
def test_main(
mock_building_parser,
mock_return_logger,
config_dict,
db_connection,
monkeypatch,
test_dir,
):
"""Test main()"""
def mock_parser(*args, **kwargs):
parser = Namespace(
cache_dir=(test_dir / "test_outputs" / "test_outputs_uniprot"),
nodelete_cache=False,
config=None,
classes=None,
database="fake_database_path",
ec=True,
force=False,
families=None,
genbank_accessions=None,
genera=None,
get_pages=True,
kingdoms=None,
log=None,
nodelete=False,
output=None,
retries=10,
sequence=True,
seq_update=True,
subfamilies=True,
species=None,
strains=None,
streamline=None,
timeout=45,
uniprot_accessions=None,
uniprot_batch_size=150,
uniprot_data=None,
verbose=False,
pdb=True,
skip_uniprot_accessions=None,
use_uniprot_cache=None,
)
return parser
def mock_return_none(*args, **kwargs):
return
def mock_connect_existing_db(*args, **kwards):
return db_connection, None, "cache_dir"
def mock_get_expansion_configuration(*args, **kwards):
return config_dict, set(), set(), set(), dict(), set()
def mock_get_genbank_accessions(*args, **kwards):
return {1: 1, 2:2, 3:3}
def mock_get_uniprot_data(*args, **kwards):
return {1: {'ec': {1,2,3}, 'pdb': {1,2,3}}, 2: {'ec': {1,2,3}, 'pdb': {1,2,3}}, 3: {'ec': {1,2,3}, 'pdb': {1,2,3}}}, {1, 2, 3}
monkeypatch.setattr(uniprot_parser, "build_parser", mock_building_parser)
monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
monkeypatch.setattr(saint_logger, "config_logger", mock_return_logger)
monkeypatch.setattr(get_uniprot_data, "connect_existing_db", mock_connect_existing_db)
monkeypatch.setattr("cazy_webscraper.expand.uniprot.get_uniprot_data.make_output_directory", mock_return_none)
monkeypatch.setattr(get_uniprot_data, "get_expansion_configuration", mock_get_expansion_configuration)
monkeypatch.setattr(sql_interface, "log_scrape_in_db", mock_return_none)
monkeypatch.setattr(get_selected_gbks, "get_genbank_accessions", mock_get_genbank_accessions)
monkeypatch.setattr(get_uniprot_data, "get_uniprot_accessions", mock_get_genbank_accessions)
monkeypatch.setattr(get_uniprot_data, "get_uniprot_data", mock_get_uniprot_data)
monkeypatch.setattr(get_uniprot_data, "add_uniprot_accessions", mock_return_none)
monkeypatch.setattr(get_uniprot_data, "add_ec_numbers", mock_return_none)
monkeypatch.setattr(get_uniprot_data, "add_pdb_accessions", mock_return_none)
monkeypatch.setattr(cazy_webscraper, "closing_message", mock_return_none)
output = test_dir / "test_outputs" / "test_outputs_uniprot"
output.mkdir(parents=True, exist_ok=True)
get_uniprot_data.main()
shutil.rmtree((test_dir / "test_outputs" / "test_outputs_uniprot"))
output.mkdir(parents=True, exist_ok=True) | 5155b914d1f0320abc7991b7cc86e30664e75b53 | 3,649,306 |
def _slots_from_params(func):
"""List out slot names based on the names of parameters of func
Usage: __slots__ = _slots_from_signature(__init__)
"""
funcsig = signature(func)
slots = list(funcsig.parameters)
slots.remove('self')
return slots | fc55665a2bfa0ee27545734699f8527af5d57e6d | 3,649,307 |
import time
import requests
import re
def gnd_to_wd_id(gnd_id):
"""
Searches for a Wikidata entry which contains
the provided GND ID. Outputs the Wikidata ID (if found).
---------
gnd_id : str
GND ID of entity.
Returns
-----------
str.
"""
url = 'https://query.wikidata.org/bigdata/namespace/wdq/sparql'
try:
time.sleep(2.0)
data = requests.get(url, params={'query': "SELECT ?Entity WHERE {?Entity wdt:P227 '%s'.}" % (gnd_id),
'format': 'json'}).json()
result = []
for item in data['results']['bindings']:
if result == []:
result.append(item['Entity']['value'])
wd_id= re.findall("[Q]\d*",str(result))
return wd_id[0]
except:
return "Exception" | acf482aa05eac7c5307529643f30b7ef6880c55b | 3,649,308 |
from typing import Optional
def get_instance_category(entry) -> Optional[str]:
"""Determines the instance category for which the entry was submitted.
If it does not match the config of any instance category, returns None.
"""
instance_categories = RidehailEnv.DIMACS_CONFIGS.ALL_CONFIGS
entry_config = entry["config"]
keys_to_check = list(entry_config.keys())
try:
keys_to_check.remove("nickname")
except:
return None
for category_name, category_config in instance_categories.items():
if all((entry_config[key] == category_config[key] for key in keys_to_check)):
return category_name
return None | ad1208f1bbc93b3579eb1e82f0752671d856f501 | 3,649,309 |
def extendheader(table, fields):
"""
Extend header row in the given table. E.g.::
>>> import petl as etl
>>> table1 = [['foo'],
... ['a', 1, True],
... ['b', 2, False]]
>>> table2 = etl.extendheader(table1, ['bar', 'baz'])
>>> table2
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'a' | 1 | True |
+-----+-----+-------+
| 'b' | 2 | False |
+-----+-----+-------+
See also :func:`petl.transform.headers.setheader`,
:func:`petl.transform.headers.pushheader`.
"""
return ExtendHeaderView(table, fields) | 352fc187e5778f415221b73c179cff496da9b8a5 | 3,649,310 |
def get_teamcount():
"""Get a count of teams."""
#FINISHED FOR SASO
teamlist = get_list_of_teams()
return len(teamlist) | 512dd11ff27600d91a8e6ee461b3f1e761604734 | 3,649,311 |
def make_adjacencyW(I, D, sigma):
"""Create adjacency matrix with a Gaussian kernel.
Args:
I (numpy array): for each vertex the ids to its nnn linked vertices
+ first column of identity.
D (numpy array): for each data the l2 distances to its nnn linked vertices
+ first column of zeros.
sigma (float): Bandwith of the Gaussian kernel.
Returns:
csr_matrix: affinity matrix of the graph.
"""
V, k = I.shape
k = k - 1
indices = np.reshape(np.delete(I, 0, 1), (1, -1))
indptr = np.multiply(k, np.arange(V + 1))
def exp_ker(d):
return np.exp(-d / sigma ** 2)
exp_ker = np.vectorize(exp_ker)
res_D = exp_ker(D)
data = np.reshape(np.delete(res_D, 0, 1), (1, -1))
adj_matrix = csr_matrix((data[0], indices[0], indptr), shape=(V, V))
return adj_matrix | 4e310c5677d66b7fef66db5f96eda1c9bf8efdc7 | 3,649,312 |
def blackbody2d(wavelengths, temperature):
"""
Planck function evaluated for a vector of wavelengths in units of meters
and temperature in units of Kelvin
Parameters
----------
wavelengths : `~numpy.ndarray`
Wavelength array in units of meters
temperature : `~numpy.ndarray`
Temperature in units of Kelvin
Returns
-------
pl : `~numpy.ndarray`
Planck function evaluated at each wavelength
"""
return blackbody_lambda(wavelengths, temperature) | 168c9e951350e3c93ef36cf95ec3e7a335d06102 | 3,649,313 |
def bookShop():
"""
Este programa resuelve el siguiente ejercicio: Book Shop
Link: https://cses.fi/problemset/task/1158
Este programa retorna el máximo número de páginas que se pueden conseguir
comprando libros dados el precio y páginas de los libros disponibles y la
cantidad de dinero disponible.
"""
inputLine = input()
inputArray = inputLine.split()
inputArray = [int(x) for x in inputArray]
numBooks = inputArray[0]
totalPrice = inputArray[1]
inputLine = input()
inputArray = inputLine.split()
inputArray = [int(x) for x in inputArray]
prices = inputArray
inputLine = input()
inputArray = inputLine.split()
inputArray = [int(x) for x in inputArray]
pages = inputArray
bag = [[0 for y in range(totalPrice + 1)] for x in range(numBooks + 1)]
for i in range(1, len(bag)):
price = prices[i - 1]
page = pages[i - 1]
for j in range(1, len(bag[0])):
if j - price < 0:
bag[i][j] = bag[i - 1][j]
elif bag[i - 1][j - price] + page > bag[i - 1][j]:
bag[i][j] = bag[i - 1][j - price] + page
else:
bag[i][j] = bag[i - 1][j]
return bag[-1][-1] | 52f2b3ca84c7d6db529f51e2c05ad4767d4466c7 | 3,649,314 |
import mpmath
def pdf(x, nu, sigma):
"""
PDF for the Rice distribution.
"""
if x <= 0:
return mpmath.mp.zero
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
sigma = mpmath.mpf(sigma)
sigma2 = sigma**2
p = ((x / sigma2) * mpmath.exp(-(x**2 + nu**2)/(2*sigma2)) *
mpmath.besseli(0, x*nu/sigma2))
return p | b2d96bc19fb61e5aaf542b916d06c11a0e3dea46 | 3,649,315 |
import typing
def get_310_prob(score_prob_dct: dict) -> typing.Dict[str, float]:
"""get home win, draw, away win prob"""
prob = {}
result_dct = get_score_pairs(0)
type_dict = ['home_win', 'draw', 'away_win']
for i in type_dict:
prob[i] = get_one_prob(score_prob_dct, result_dct, i)
sum_value = float(sum(prob.values()))
if sum_value != 1:
avg_value = round((1 - sum_value) / 3, 2)
prob['home_win'] += avg_value
prob['draw'] += avg_value
prob['away_win'] += avg_value
return prob | c003e75796b6f3e67d6558f59492a9db065c6a51 | 3,649,316 |
def load_project_data(storage):
"""Load project data using provided open_func and project directory."""
# Load items and extractors from project
schemas = storage.open('items.json')
extractors = storage.open('extractors.json')
# Load spiders and templates
spider_loader = SpiderLoader(storage)
spiders = {}
for spider_name in spider_loader.spider_names:
spider = spider_loader[spider_name]
crawler = IblSpider(spider_name, spider, schemas, extractors,
Settings())
spiders[spider_name] = (crawler, spider)
return schemas, extractors, spiders | c0f8e2b339a21bf4f73dcba098e992e550005098 | 3,649,317 |
import json
def home():
"""
Route to display home page and form to receive text from user for speech synthesis.
"""
form = TextToSpeechForm()
# Instantiates a client
client = texttospeech.TextToSpeechClient()
# Get the language list
voices = client.list_voices()
voice_codes_list = list(dict.fromkeys([voice.language_codes[0] for voice in voices.voices]))
language_list = [(ind + 1, voice) for ind, voice in enumerate(voice_codes_list)]
if request.method == 'POST':
lang = dict(language_list).get(int(form.language_options.data))
gender = dict([(1, texttospeech.SsmlVoiceGender.MALE),
(2, texttospeech.SsmlVoiceGender.FEMALE)]).get(int(form.gender_options.data))
messages = json.dumps({'text': form.text_field.data,
'language': lang,
'gender': gender})
return redirect(url_for('.translate', messages=messages))
return render_template('main.html', form=form) | 626dc2cde1dc326034772acb8b87cb35621c3e3f | 3,649,318 |
def load_c6_file(filename, is_radar):
"""
Loads ice scattering LUTs from a file (based on Yang et al., JAS, 2013).
Parameters
----------
filename: str
The name of the file storing the Mie scattering parameters
is_radar: bool
If True, the first LUT column is treated as the frequency,
otherwise, wavelength.
Returns
-------
my_df: xarray.Dataset
The xarray Dataset storing the scattering data, including
descriptive metadata.
"""
if is_radar is True:
my_df = pd.read_csv(filename,
names=["frequency", "p_diam", "p_diam_eq_A", "p_diam_eq_V",
"V", "A", "beta_p", "scat_p", "alpha_p", "beta_p_cross"])
else:
my_df = pd.read_csv(filename,
names=["wavelength", "p_diam", "p_diam_eq_A", "p_diam_eq_V",
"V", "A", "beta_p", "scat_p", "alpha_p", "beta_p_cross"])
my_df["alpha_p"] = my_df["alpha_p"] * 1e-12
my_df["beta_p"] = my_df["beta_p"] * 1e-12
my_df["beta_p_cross"] = my_df["beta_p_cross"] * 1e-12
my_df["scat_p"] = my_df["scat_p"] * 1e-12
my_df["p_diam"] = 1e-6 * my_df["p_diam"]
my_df["p_diam_eq_A"] = 1e-6 * my_df["p_diam_eq_A"]
my_df["p_diam_eq_V"] = 1e-6 * my_df["p_diam_eq_V"]
my_df["A"] = my_df["A"] * 1e-12
my_df["V"] = my_df["V"] * 1e-18
my_df = my_df.to_xarray()
if is_radar is True:
my_df["frequency"].attrs["units"] = "GHz"
my_df["frequency"].attrs["long_name"] = "Pulse frequency"
my_df["frequency"].attrs["standard_name"] = "Frequency"
else:
my_df["wavelength"].attrs["units"] = "microns"
my_df["wavelength"].attrs["long_name"] = "Wavelength of beam"
my_df["wavelength"].attrs["standard_name"] = "Wavelength"
my_df["p_diam"].attrs["units"] = "meters"
my_df["p_diam"].attrs["long_name"] = "Maximum dimension of the particle"
my_df['p_diam'].attrs["standard_name"] = "Maximum dimension"
my_df["p_diam_eq_A"].attrs["units"] = "meters"
my_df["p_diam_eq_A"].attrs["long_name"] = "Diameter of equivalent projected area sphere"
my_df['p_diam_eq_A'].attrs["standard_name"] = "Diameter of equivalent A sphere"
my_df["p_diam_eq_V"].attrs["units"] = "meters"
my_df["p_diam_eq_V"].attrs["long_name"] = "Diameter of equivalent volume sphere"
my_df['p_diam_eq_V'].attrs["standard_name"] = "Diameter of equivalent V sphere"
my_df["A"].attrs["units"] = "meters^2"
my_df["A"].attrs["long_name"] = "Projected area of particle"
my_df['A'].attrs["standard_name"] = "Projected area"
my_df["V"].attrs["units"] = "meters^3"
my_df["V"].attrs["long_name"] = "Particle volume"
my_df['V'].attrs["standard_name"] = "Volume"
my_df["scat_p"].attrs["units"] = "microns^2"
my_df["scat_p"].attrs["long_name"] = "Scattering cross section"
my_df["scat_p"].attrs["standard_name"] = "Scat_cross_section"
my_df["beta_p"].attrs["units"] = "meters^2"
my_df["beta_p"].attrs["long_name"] = "Backscattering cross section"
my_df["beta_p"].attrs["standard_name"] = "Scat_cross_section_back"
my_df["alpha_p"].attrs["units"] = "meters^2"
my_df["alpha_p"].attrs["long_name"] = "Extinction cross section"
my_df["alpha_p"].attrs["standard_name"] = "Ext_cross_section"
my_df["beta_p_cross"].attrs["units"] = "meters^2"
my_df["beta_p_cross"].attrs["long_name"] = "Cross-polar backscattering cross section"
my_df["beta_p_cross"].attrs["standard_name"] = "Scat_cross_section_back_crosspol"
return my_df | c07256fde7ab5eac577caae2289a5d3d0dff583e | 3,649,319 |
import typing
import click
import copy
def gwrite(document: vp.Document, output: typing.TextIO, profile: str):
"""
Write gcode or other ascii files for the vpype pipeline.
The output format can be customized by the user heavily to an extent that you can also output most known
non-gcode ascii text files.
"""
gwrite_config = vp.CONFIG_MANAGER.config["gwrite"]
# If no profile was provided, try to use a default
if not profile:
# Try to get the default profile from the config
if "default_profile" in gwrite_config:
profile = gwrite_config["default_profile"]
else:
raise click.BadParameter(
"no gwrite profile provided on the commandline and no default gwrite "
+ "profile configured in the vpype configuration. This can be done using "
+ 'the "default_default" key in the "gwrite" section'
)
# Check that the profile is actually there, we can be sure that the `gwrite`
# part exists as there are several default profiles.
if profile not in gwrite_config:
profiles = [p for p in gwrite_config.keys() if p != "default_profile"]
raise click.BadParameter(
"gwrite profile "
+ profile
+ " not found in vpype configuration. Available gwrite profiles: "
+ ", ".join(profiles)
)
# Read the config for the profile from the main vpype
config = gwrite_config[profile]
document_start = config.get("document_start", None)
document_end = config.get("document_end", None)
layer_start = config.get("layer_start", None)
layer_end = config.get("layer_end", None)
layer_join = config.get("layer_join", None)
line_start = config.get("line_start", None)
line_end = config.get("line_end", None)
line_join = config.get("line_join", None)
segment_first = config.get("segment_first", None)
segment = config.get("segment", None)
segment_last = config.get("segment_last", None)
unit = config.get("unit", "mm")
offset_x = config.get("offset_x", 0.0)
offset_y = config.get("offset_y", 0.0)
scale_x = config.get("scale_x", 1.0)
scale_y = config.get("scale_y", 1.0)
# transform the document according to the desired parameters
orig_document = document
document = copy.deepcopy(document) # do NOT affect the pipeline's document
unit_scale = vp.convert_length(unit)
document.scale(scale_x / unit_scale, scale_y / unit_scale)
document.translate(offset_x, offset_y)
invert_x = config.get("invert_x", False)
invert_y = config.get("invert_y", False)
# transform the document according to inversion parameters
if invert_x or invert_y:
document = invert_axis(document, invert_x, invert_y)
# process file
filename = output.name
if document_start is not None:
output.write(document_start.format(filename=filename))
last_x = 0
last_y = 0
xx = 0
yy = 0
lastlayer_index = len(document.layers.values()) - 1
for layer_index, layer_id in enumerate(document.layers):
layer = document.layers[layer_id]
if layer_start is not None:
output.write(
layer_start.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=layer_index,
index1=layer_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
lastlines_index = len(layer) - 1
for lines_index, line in enumerate(layer):
if line_start is not None:
output.write(
line_start.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=lines_index,
index1=lines_index + 1,
lines_index=lines_index,
lines_index1=lines_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
segment_last_index = len(line) - 1
for segment_index, seg in enumerate(line):
x = seg.real
y = seg.imag
dx = x - last_x
dy = y - last_y
idx = int(round(x - xx))
idy = int(round(y - yy))
xx += idx
yy += idy
if segment_first is not None and segment_index == 0:
seg_write = segment_first
elif segment_last is not None and segment_index == segment_last_index:
seg_write = segment_last
else:
seg_write = segment
if seg_write is not None:
output.write(
seg_write.format(
x=x,
y=y,
dx=dx,
dy=dy,
_x=-x,
_y=-y,
_dx=-dx,
_dy=-dy,
ix=xx,
iy=yy,
idx=idx,
idy=idy,
index=segment_index,
index1=segment_index + 1,
segment_index=segment_index,
segment_index1=segment_index + 1,
lines_index=lines_index,
lines_index1=lines_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
last_x = x
last_y = y
if line_end is not None:
output.write(
line_end.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=lines_index,
index1=lines_index + 1,
lines_index=lines_index,
lines_index1=lines_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
if line_join is not None and lines_index != lastlines_index:
output.write(
line_join.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=lines_index,
index1=lines_index + 1,
lines_index=lines_index,
lines_index1=lines_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
if layer_end is not None:
output.write(
layer_end.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=layer_index,
index1=layer_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
if layer_join is not None and layer_index != lastlayer_index:
output.write(
layer_join.format(
x=last_x,
y=last_y,
ix=xx,
iy=yy,
index=layer_index,
index1=layer_index + 1,
layer_index=layer_index,
layer_index1=layer_index + 1,
layer_id=layer_id,
filename=filename,
)
)
if document_end is not None:
output.write(document_end.format(filename=filename))
output.flush()
output.close()
info = config.get("info", None)
if info:
print(info)
return orig_document | d5a28fa7542db297d97c6252021bc4f103dea05d | 3,649,320 |
def user_int(arg):
"""
Convert a :class:`~int` to a `USER` instruction.
:param arg: Int that represents instruction arguments.
:return: Fully-qualified `USER` instruction.
"""
return str(arg) | df3f72eac3de12b4c8cbb1ccee5305dc43837bc3 | 3,649,322 |
def timezoneAdjuster(context, dt):
"""Convinience: new datetime with given timezone."""
newtz = ITimezoneFactory(context)
return dt.astimezone(newtz) | ba75bad6b8edfbc3198aad0adc0b1250626b9ce7 | 3,649,323 |
def make_adder(n):
"""Return a function that takes one argument k and returns k + n.
>>> add_three = make_adder(3)
>>> add_three(4)
7
"""
def adder(k):
return k + n
return adder | 64808cb857f7bd17c8c81bfd749ed96efcc88a9f | 3,649,324 |
def IsWritable(Feature):
"""IsWritable(Feature) -> Writable
Parameters:
Feature: str
Return value:
Writable: ctypes.c_int"""
if _at_camera_handle is not None:
return _at_core_lib.AT_IsWritable(_at_camera_handle, Feature) != AT_FALSE
else:
raise AndorError('Andor library not initialized') | bfe4ff1b93595e8b8df11193e04e13b8bd6c39d9 | 3,649,325 |
import torch
from typing import Union
from typing import Tuple
def groupby_apply(
keys: torch.Tensor, values: torch.Tensor, bins: int = 95, reduction: str = "mean", return_histogram: bool = False
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Groupby apply for torch tensors
Args:
keys: tensor of groups (``0`` to ``bins``)
values: values to aggregate - same size as keys
bins: total number of groups
reduction: either "mean" or "sum"
return_histogram: if to return histogram on top
Returns:
tensor of size ``bins`` with aggregated values and optionally with counts of values
"""
if reduction == "mean":
reduce = torch.mean
elif reduction == "sum":
reduce = torch.sum
else:
raise ValueError(f"Unknown reduction '{reduction}'")
uniques, counts = keys.unique(return_counts=True)
groups = torch.stack([reduce(item) for item in torch.split_with_sizes(values, tuple(counts))])
reduced = torch.zeros(bins, dtype=values.dtype, device=values.device).scatter(dim=0, index=uniques, src=groups)
if return_histogram:
hist = torch.zeros(bins, dtype=torch.long, device=values.device).scatter(dim=0, index=uniques, src=counts)
return reduced, hist
else:
return reduced | 711acc0cf2eb30e978f7f30686dbf67644d51fb0 | 3,649,326 |
def cube(x):
"""return x^3"""
return x*x*x | df9aa4330b7cfb1946b3c403935c864a2e7fae7a | 3,649,327 |
def get_candidados(vetor):
"""Retorna o dado dos candidatos"""
lista_retorno = []
for i in vetor:
lista_retorno.append(candidatos[int(i)])
return lista_retorno | 259b921db5d3840ea220b9690f4eca1b84c2d98d | 3,649,328 |
import copy
def get_fmin_tree(f_df, tree):
"""
"""
f = f_df[f_df['F4ratio']>=0].reset_index()
t = copy.deepcopy(tree)
i=0
for node in t.traverse():
if node.children:
l = node.children[0]
r = node.children[1]
lleaves = l.get_leaf_names()
rleaves = r.get_leaf_names()
node_fl = f[f['h2'].isin(lleaves)&f['h1'].isin(rleaves)]
node_fr = f[f['h2'].isin(rleaves)&f['h1'].isin(lleaves)]
for side, node_f, sister_f in [(0,node_fl, node_fr),(1,node_fr, node_fl)]:
if len(node_f) or len(sister_f):
sister_f0 = sister_f.rename(columns={'h1':'h2','h2':'h1'})
sister_f0['F4ratio'] = 0
sister_f0['Z'] = 0
nf = pd.concat([node_f, sister_f0])
#node_f.sort_values('|f|', ascending=False)
#only take h3 with maximum mean '|f|' on this branch
#h3 = node_f.groupby('h3').mean().sort_values('|f|', ascending=False).iloc[0].name
#node_f1 = node_f[node_f['h3']==h3]
child = node.get_children()[side]
#child.add_feature('rscore', summary(node_f1['|f|']))
#child.add_feature('h3', h3)
child.add_feature('branch_f', nf.groupby(['h2','h3']).min().reset_index())
return t | 78da92675edfa2d8b9fd75591d104fbdb6adb369 | 3,649,329 |
def primes(n):
""" Returns a list of primes < n """
n = int(n)
sieve = [True] * n
for i in np.arange(3, n ** 0.5 + 1, 2, dtype=int):
if sieve[i]:
sieve[i*i::2*i]=[False]*((n-i*i-1)//(2*i)+1)
return [2] + [i for i in np.arange(3,n,2) if sieve[i]] | 91af8e025c688e3b09638c8f00ca67a358e7137d | 3,649,330 |
def H_TP(Z, T, P):
"""
Enthalpy defined by temperature and pressure (reference state at 300 K and 1 bar)
Z - array of molar composition
T, P - temperature and pressure
Units are specified above
"""
H = RP.ABFLSHdll('TP', T, P*100, Z, 2).h - RP.ABFLSHdll('TP', 300, 100, Z, 2).h
return H | 36078f63478f7582e462f92d991a9941f07308c9 | 3,649,331 |
def test_fallback_round_with_input_n_not_int():
"""
Feature: JIT Fallback
Description: Test round() in graph mode with input x is not int.
Expectation: TypeError.
"""
@ms_function
def foo():
x = round(10.123, 1.0)
return x
with pytest.raises(TypeError) as ex:
foo()
assert "cannot be interpreted as an integer" in str(ex.value) | 3acef5aeaf1fdc40b66e0eede3f6df9d76bb5b9b | 3,649,333 |
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
mask = bin(ip2network(s))[2:]
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False | 9462f27dc53ad907c8b6ef99db7a09631ca7157b | 3,649,334 |
def translate():
"""
A handler for translating given english word which about digit to chinese
character
Return:
- `JSON`
.. code-block
# message = isError ? reason : "OK"
# output = isError ? '' : <TRANSLATION>
{
message: string,
output: string,
}
"""
ret = {'message': 'OK', 'output': ''}
try:
# Get request words from query string
words = request.args.get('words')
# Raise exception if there is no such query defined
if words is None:
raise MissingWordsQueryException()
# Define our lookup table
myMap = {
'zero': '零',
'one': '一',
'two': '二',
'three': '三',
'four': '四',
'five': '五',
'six': '六',
'seven': '七',
'eight': '八',
'nine': '九',
'ten': '十',
}
# Since there maybe more than one words, loop through those words
for word in words.split(' '):
# Translate word by look up values in our lookup table
output = myMap[word]
# Set word to be output if there are no records at first
# Otherwise, append to the output string
ret['output'] = output \
if (len(ret['output']) == 0) \
else '%s %s' % (ret['output'], output)
except MissingWordsQueryException:
# Setup error message
ret['message'] = 'Missing "words" query string'
ret['output'] = ''
except KeyError:
# Setup error message
ret['message'] = 'Translation error for word |%s|' % word
ret['output'] = ''
# Encode ret in JSON format so that it prints a json string on the web
return jsonify(**ret) | 5e40a724e6d183151f155ec3e951ec6098a92016 | 3,649,335 |
def create_matrix_sars_overlap_between_networks(networks_summary_df, networks_dict):
""" Creates matrix where element (i,j) quantifies the number of common SARS-Cov-2 partners in networks i and j
divided by the total number of SARS-Cov-2 partners in both networks
Args:
networks_summary_df: dataframe, output of make_summary
networks_dict: dictionary of networks
Returns:
A matrix where element (i,j) quantifies the number of common SARS-Cov-2 partners in networks i and j divided by the total number of SARS-Cov-2 partners in both networks
"""
N = len(networks_summary_df)
mat_sars = np.zeros((N,N))
mat_sars = np.zeros((N,N))
for i in np.arange(N):
for j in np.arange(0,N,1):
# Select networks
paramstring_a = networks_summary_df.loc[networks_summary_df['index']==i].index[0]
paramstring_b = networks_summary_df.loc[networks_summary_df['index']==j].index[0]
network_a = networks_dict[paramstring_a]
network_b = networks_dict[paramstring_b]
# Compute intersection/union
df_a = oi.get_networkx_graph_as_dataframe_of_nodes(network_a)
sars_in_a = set(df_a[df_a['interact_sars_bool']==True].index)
df_b = oi.get_networkx_graph_as_dataframe_of_nodes(network_b)
sars_in_b = set(df_b[df_b['interact_sars_bool']==True].index)
difference = float(len(sars_in_a.difference(sars_in_b)))
mat_sars[i,j] = difference/float(len(sars_in_a))
return(mat_sars) | 4b1d7c36e0781e7875f99cb7ac366ca3063cb77f | 3,649,336 |
from typing import Tuple
def load_forcings_gauge_metadata(path: str) -> Tuple[float, float, float]:
"""
Loads gauge metadata from the header of a CAMELS-USE forcings file.
Parameters
----------
path: str
Path to the forcings file.
Returns
-------
tuple
(gauge latitude, gauge elevation, basin area [m²])
"""
with open(path, 'r') as file:
latitude = float(file.readline())
elevation = float(file.readline())
area = float(file.readline())
return latitude, elevation, area | c91c3bafb83709967d6dd480afd8e53ac9f94445 | 3,649,337 |
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
elif measure == 'tlagt':
# beta = 1 corresponds to no tilt
b = measure_args.get('beta', 1.0)
A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Generalized Laguerre
# alpha 0, beta small is most stable (limits to the 'lagt' measure)
# alpha 0, beta 1 has transition matrix A = [lower triangular 1]
elif measure == 'glagt':
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1)
B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None]
L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
A = (1./L[:, None]) * A * L[None, :]
B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# LMU: equivalent to LegT up to normalization
elif measure == 'lmu':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
else:
raise NotImplementedError
return A, B | 8a701ace9b0d73e8f27062084dbf78711b8b4185 | 3,649,338 |
import numpy
import pandas
def interpolate_coord(df, xcol, ycol, step, distcol='d'):
"""
Interpolates x/y coordinates along a line at a fixed distance.
Parameters
----------
df : pandas.DataFrame
xcol, ycol : str
Labels of the columns in ``df`` containing the x- and y-coords,
respectively.
step : int
The spacing between the interpolated points.
distcol : string, optional (default = 'd')
Label of the column where the distance along the line is stored.
Returns
-------
pandas.DataFrame
"""
dist = _linear_distance(df, xcol, ycol)
d_ = numpy.arange(0, numpy.floor(dist.max()), step)
x_interp = interpolate.interp1d(dist, df[xcol])
y_interp = interpolate.interp1d(dist, df[ycol])
return pandas.DataFrame({'d': d_, 'x': x_interp(d_), 'y': y_interp(d_)}) | 6ac736a4f82ffd7c0b3b45027bf7ab17b5d7d71c | 3,649,339 |
def oven_cook_setting_to_str(cook_setting: OvenCookSetting, units: str) -> str:
"""Format OvenCookSetting values nicely."""
cook_mode = cook_setting.cook_mode
cook_state = cook_mode.oven_state
temperature = cook_setting.temperature
modifiers = []
if cook_mode.timed:
modifiers.append(STATE_OVEN_TIMED)
if cook_mode.delayed:
modifiers.append(STATE_OVEN_DELAY)
if cook_mode.probe:
modifiers.append(STATE_OVEN_PROBE)
if cook_mode.sabbath:
modifiers.append(STATE_OVEN_SABBATH)
temp_str = f" ({temperature}{units})" if temperature > 0 else ""
modifier_str = f" ({', '.join(modifiers)})" if modifiers else ""
display_state = oven_display_state_to_str(cook_state)
return f"{display_state}{temp_str}{modifier_str}" | 02d339b82c8dbfb34f4bb6cc968fee83496df04e | 3,649,340 |
def print_location(location: Location) -> str:
"""Render a helpful description of the location in the GraphQL Source document."""
return print_source_location(
location.source, get_location(location.source, location.start)
) | 2c6f0f5e475fdbb14060b55f5698a2548720cc01 | 3,649,341 |
def run_metrics(ground_truth, simulation, measurement_name,users=None,repos=None):
"""
Run all of the assigned metrics for a given measurement.
Inputs:
ground_truth - DataFrame of ground truth data
simulation - DataFrame of simulated data
measurement_name - Name of measurement corresponding to keys of measurement_params
users - list of user IDs for user-centric, node-level measurements
repos - list of repo IDs for repo-centric, node-level measurements
Outputs:
measurement_on_gt - Output of the measurement for the ground truth data
measurement_on_sim - Output of the measurement for the simulation data
metrics_output - Dictionary containing metric results for each metric assigned to the measurement
"""
p = measurement_params[measurement_name]
if p["node_type"] == "user":
nodes = users
else:
nodes = repos
if "filters" in p:
ground_truth = prefilter(ground_truth, p['filters'])
simulation = prefilter(simulation, p['filters'])
#for node-level measurements default to the most active node if a
#list of nodes is not provided
if p["scale"] == "node" and nodes is None:
nodes = ground_truth.groupby([p["node_type"],'event'])["time"].count().reset_index()
nodes = nodes.groupby(p["node_type"])["time"].median().sort_values(ascending=False).reset_index()
nodes = nodes.head(1)[p["node_type"]]
elif p["scale"] != "node":
nodes = ['']
metrics_output = {}
#for node level measurements iterate over nodes
for node in nodes:
if p["scale"] == "node":
metrics_output[node] = {}
#select data for individual node
filter = {p["node_type"]:[node]}
gt = prefilter(ground_truth, filter)
sim = prefilter(simulation, filter)
else:
gt = ground_truth.copy()
sim = simulation.copy()
measurement_function = p['measurement']
empty_df = False
if len(gt.index) > 0:
print("Measuring {} for ground truth data".format(measurement_function.__name__))
measurement_on_gt = measurement_function(gt)
else:
print("Ground truth data frame is empty for {} measurement".format(measurement_function.__name__))
empty_df = True
measurement_on_gt = []
if len(sim.index) > 0:
print("Measuring {} for simulation data".format(measurement_function.__name__))
measurement_on_sim = measurement_function(sim)
else:
print("Simulation data frame is empty for {} measurement".format(measurement_function.__name__))
empty_df = True
measurement_on_sim = []
metrics = p['metrics']
#iterate over the metrics assigned to the measurement
for m, metric_function in metrics.items():
print("Calculating {} for {}".format(metric_function.__name__, measurement_function.__name__))
if not empty_df:
metric = metric_function(measurement_on_gt, measurement_on_sim)
else:
metric = None
if p["scale"] == "node":
metrics_output[node][m] = metric
else:
metrics_output[m] = metric
return measurement_on_gt, measurement_on_sim, metrics_output | 733a04165853cb76f84bd15ff8f38585224ec039 | 3,649,342 |
def checkTrue(comment,res,update=True):
"""
This method is a pass-through for consistency and updating
@ In, comment, string, a comment printed out if it fails
@ In, res, bool, the tested value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if test
"""
if update:
if res:
results["pass"] += 1
else:
print("checking bool",comment,'|',res,'is not True!')
results["fail"] += 1
return res | 4f8c0cf99921477e7187178d5576f2df03881417 | 3,649,343 |
def compute_votes(
candidates,
voters,
voter_id,
node_degree_normalization,
):
"""Comptue neighbor voting for a given set of candidates and voters
Arguments:
candidates {np.ndarray} -- genes x cells normalized expression for candidates
voters {np.ndarray} -- genes x cells normalized expression for voters
voter_id {np.ndarray} -- design_matrix for voters for cell type identities
node_degree_normalization {bool} -- Flag indicating whether to normalize votes by degree
Returns:
np.ndarray -- Votes for each candidate
"""
votes = np.dot(candidates, np.dot(voters, voter_id))
if node_degree_normalization:
node_degree = np.sum(voter_id, axis=0)
votes += node_degree
norm = np.dot(candidates, np.sum(voters, axis=1)) + voters.shape[1]
votes /= norm[:, None]
return votes | a04e4030b01d1188830a1ad2f55419d732afa432 | 3,649,344 |
def _precompute_cache(x, y, num_classes):
"""Cache quantities to speed-up the computation of L2-regularized least-sq."""
# Whiten
mean = jnp.mean(x, axis=0, keepdims=True)
std = jnp.std(x, axis=0, keepdims=True) + 1e-5
x = (x - mean) / std
# Add a constant feature for the bias, large so it's almost unregularized:
x = jnp.pad(x, ((0, 0), (0, 1)), constant_values=BIAS_CONSTANT)
# To one-hot representation rescaled into {-1, 1}
y = 2.0 * jax.nn.one_hot(y, num_classes) - 1.0
num_points, dim = x.shape
# Let N be the number of points, D the dimension and C the number of classes.
# We have x of shape (N, D) and y of shape (N, C).
# For least-squares, we can compute
#
# (A) when N >= D, (x^T x + l2 Id)^{-1} x^T y
# (B) when D > N, x^T (x x^T + l2 Id)^{-1} y
#
# We pre-compute the eigen-decomposition of either x^T x or x x^T which
# becomes q diag(eigs) q^T with q unitary matrix either (D, D) or (N, N)
# and eigs a vector (D,) or (N,).
#
# For any l2 > 0, we can compute (x^T x + l2 Id)^{-1} or (x x^T + l2 Id)^{-1}
# by simply computing q (diag(eigs) + l2 Id)^{-1} q^T.
# (SVD would be more natural here, but it proved slower, so we use eigh)
#
# Both cases (A) and (B) can be viewed as lhs (diag(eigs) + l2 Id)^{-1} rhs,
# where lhs/rhs are pre-computed left/right-hand sides to specify.
#
if num_points >= dim:
eigs, q = jnp.linalg.eigh(x.T @ x)
rhs = q.T @ (x.T @ y)
lhs = q
else:
eigs, q = jnp.linalg.eigh(x @ x.T)
rhs = q.T @ y
lhs = x.T @ q
cache = {'eigs': eigs, 'rhs': rhs, 'lhs': lhs, 'mean': mean, 'std': std}
return cache | b357620b7f2883182a33f040b2f7d82e0205bcaa | 3,649,346 |
def show_user():
"""Return page showing details: walks, landmarks rated, scores."""
user = User.query.filter_by(user_id=session.get('user_id')).first()
ratings = user.ratings
# import pdb; pdb.set_trace()
walks = user.walks
# for walk in walks:
# origin = Landmark.query.filter(Landmark.landmark_id == walk.origin).first()
# origin = origin.landmark_name
# destination = Landmark.query.filter(Landmark.landmark_id == walk.destination).first()
# destination = destination.landmark_name
# metaWalks = {
# "walk_id": walk.walk_id,
# "metadata": {
# "origin": origin,
# "destination": destination,
# "datetime": walk.log_datetime,
# "duration": walk.duration,
# "distance": walk.distance
# }
# }
saved = UserSaved.query.filter_by(user_id=session.get('user_id')).all()
# import pdb; pdb.set_trace()
return render_template('profile.html',
user=user,
ratings=ratings,
walks=walks,
# metaWalks=metaWalks,
saved=saved) | ddcfed7ac98576cd6273bef5937f1ffbc4e3ecb9 | 3,649,347 |
def DesignCustomSineWave(family_list, how_many_gen, amp, per, shift_h, shift_v,
show=False, print_phase_mse=False, return_phases=False):
""" "Grid Search" Approach:
Create sine waves with unknown amp, per, shift_h and shift_v in combinatorial manner
and align families to minimize loss across the entire dataset - plot the best fit!
:param family_list: (list) ->
:param how_many_gen: (int) ->
:param amp: (float) ->
:param per: (float) ->
:param shift_h: (float) ->
:param shift_v: (float) ->
:param show: (bool) ->
:param print_phase_mse: (bool) ->
:return: best_model_mse (float) -> the best possible phasing of families
to reach lowest mse for given model
"""
# Specify how many generations do your families have:
if int(how_many_gen) != 3 and int(how_many_gen) != 2:
raise Exception("Warning, number of generations to consider is not specified: how_many_gen must be 2 or 3!")
# Prepare the sine wave specified by the function parameters:
repeats = int(72.0 / per)
if repeats <= 1:
repeats += 1
x_sine = np.linspace(0, repeats * per + 1, int(repeats * per * 5))
y_sine = sine_function(x=x_sine, amp=amp, per=per, shift_h=shift_h, shift_v=shift_v)
if show is True:
plt.plot(x_sine, y_sine, color="dodgerblue")
# Create the return variable - list of all best MSE that could be fitted for each family which will be summed:
mse_best_list = []
phase_best_list = []
for family in family_list:
mse_list = []
mse_family = 100
phase_family = 0
# Increments of 0.1 for chosen period:
for phase in np.linspace(0, per, int(per*10) + 1):
# Create x & y axes:
x_data = np.array([phase, phase + family[0]])
if how_many_gen == 3:
x_data = np.array([phase, phase + family[0], phase + family[0] + family[1]])
y_data_true = np.array(family)
y_data_sine = sine_function(x=x_data, amp=amp, per=per, shift_h=shift_h, shift_v=shift_v)
# Calculate mean squared error:
mse = (np.square(y_data_true - y_data_sine)).mean(axis=None)
mse_list.append(mse)
if print_phase_mse is True:
print ("Mean Square Error = {}; for Phase {} for Family {} for Sine Wave: {} * sin(2*pi/{}*x + {}) + {}"
.format(mse, phase, family, amp, per, shift_h, shift_v))
# Update the lowest mse & the phase when such number was reached:
if mse < mse_family:
mse_family = mse
phase_family = phase
if print_phase_mse is True:
print ("Lowest MSE reached: {} for Phase: {}".format(mse_family, phase_family))
# Plot the best result for the family:
x_best = np.array([phase_family, phase_family + family[0]])
if how_many_gen == 3:
x_best = np.array([phase_family, phase_family + family[0], phase_family + family[0] + family[1]])
y_best = np.array(family)
if show is True:
plt.scatter(x=x_best, y=y_best)
# Append the lowest MSE for this model:
mse_best_list.append(mse_family)
phase_best_list.append(phase_family)
sum = float(np.sum(mse_best_list))
best_model_mse = round(sum, 2)
# Annotate the plot:
if show is True:
plt.xticks(np.arange(0, repeats * per + 1, 6))
plt.xlabel("Oscillation Period / Time [hours]")
plt.ylabel("Cell Cycle Duration [hours]")
plt.title("Sine Wave Parameters: y(x) = {} * sin(2*pi/{}*x + {}) + {}\n"
"Sum of Lowest MSE per each family = {}"
.format(amp, per, shift_h, shift_v, best_model_mse))
plt.grid(axis="both")
plt.savefig("/Users/kristinaulicna/Documents/Rotation_2/Top_Solution_Sine_Wave_{}_gen_families.png"
.format(how_many_gen), bbox_inches="tight")
plt.show()
plt.close()
return best_model_mse | 5905a25b5be585303f5dbeaf42174a2a7be4879e | 3,649,348 |
def get_environment_names():
"""Return a list of defined environment names, with user preference first."""
envlist = [r[0] for r in _session.query(models.Environment.name).order_by(models.Environment.name).all()]
# move user preference to top of list
userenvname = _config.userconfig.get("environmentname", u"default")
envlist.remove(userenvname)
envlist.insert(0, userenvname)
return envlist | 0348738aa0bf3ea2df3783e2308b185e95292215 | 3,649,350 |
def plot(direction, speed, **kwargs):
"""Create a WindrosePlot, add bars and other standard things.
Args:
direction (pint.Quantity): wind direction from North.
speed (pint.Quantity): wind speeds with units attached.
**bins (pint.Quantity): wind speed bins to produce the histogram for.
**nsector (int): The number of directional centers to divide the wind
rose into. The first sector is centered on north.
**rmax (float): Hard codes the max radius value for the polar plot.
**cmap (colormap): Matplotlib colormap to use.
Returns:
WindrosePlot
"""
wp = WindrosePlot(**kwargs)
bins = kwargs.get("bins")
if bins is None:
bins = np.array([2, 5, 10, 20]) * units("mph")
nsector = kwargs.get("nsector", 8)
wp.barplot(direction, speed, bins, nsector, cmap=kwargs.get("cmap"))
wp.plot_calm()
wp.draw_arrows()
wp.draw_logo()
return wp | f5020da6946b0242bea826d166bb32b83547bc40 | 3,649,351 |
def convert_floor(node, **kwargs):
"""Map MXNet's floor operator attributes to onnx's Floor operator
and return the created node.
"""
return create_basic_op_node('Floor', node, kwargs) | 476ff140cde55db2d489b745a08a7257576e3209 | 3,649,352 |
async def get_qrcode_login_info():
"""获取二维码登录信息"""
url = f"{BASE_URL}qrcode/auth_code"
return await post(url, reqtype="app") | 93f131cdfcf6cd7b18d126cd32c7836e10a67870 | 3,649,353 |
def get_global_free_state(self):
"""
Recurse get_global_free_state on all child parameters, and hstack them.
Return: Stacked np-array for all Param except for LocalParam
"""
# check if the child has 'get_local_free_state' method
for p in self.sorted_params:
if isinstance(p, (param.Param,param.Parameterized)) and \
not hasattr(p, 'get_global_free_state'):
self.set_local_methods(p)
# Here, additional empty array allows hstacking of empty list
return np.hstack([p.get_global_free_state() for p in self.sorted_params
if isinstance(p, (param.Parameterized, param.Param))]
+ [np.empty(0, np_float_type)]) | 29c2a397261dddb92b718a57ba4ec3747d1ce661 | 3,649,354 |
def case_activity_update_type():
""" Case Activity Update Types: RESTful CRUD Controller """
return crud_controller() | 4722001b25857bd56d1470334551c8bbe085f18e | 3,649,355 |
def dftregistration(buf1ft,buf2ft,usfac=100):
"""
# function [output Greg] = dftregistration(buf1ft,buf2ft,usfac);
# Efficient subpixel image registration by crosscorrelation. This code
# gives the same precision as the FFT upsampled cross correlation in a
# small fraction of the computation time and with reduced memory
# requirements. It obtains an initial estimate of the
crosscorrelation peak
# by an FFT and then refines the shift estimation by upsampling the DFT
# only in a small neighborhood of that estimate by means of a
# matrix-multiply DFT. With this procedure all the image points
are used to
# compute the upsampled crosscorrelation.
# Manuel Guizar - Dec 13, 2007
# Portions of this code were taken from code written by Ann M. Kowalczyk
# and James R. Fienup.
# J.R. Fienup and A.M. Kowalczyk, "Phase retrieval for a complex-valued
# object by using a low-resolution image," J. Opt. Soc. Am. A 7, 450-458
# (1990).
# Citation for this algorithm:
# Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
# "Efficient subpixel image registration algorithms," Opt. Lett. 33,
# 156-158 (2008).
# Inputs
# buf1ft Fourier transform of reference image,
# DC in (1,1) [DO NOT FFTSHIFT]
# buf2ft Fourier transform of image to register,
# DC in (1,1) [DO NOT FFTSHIFT]
# usfac Upsampling factor (integer). Images will be registered to
# within 1/usfac of a pixel. For example usfac = 20 means the
# images will be registered within 1/20 of a pixel.
(default = 1)
# Outputs
# output = [error,diffphase,net_row_shift,net_col_shift]
# error Translation invariant normalized RMS error between f and g
# diffphase Global phase difference between the two images (should be
# zero if images are non-negative).
# net_row_shift net_col_shift Pixel shifts between images
# Greg (Optional) Fourier transform of registered version of buf2ft,
# the global phase difference is compensated for.
"""
# Compute error for no pixel shift
if usfac == 0:
CCmax = np.sum(buf1ft*np.conj(buf2ft))
rfzero = np.sum(abs(buf1ft)**2)
rgzero = np.sum(abs(buf2ft)**2)
error = 1.0 - CCmax*np.conj(CCmax)/(rgzero*rfzero)
error = np.sqrt(np.abs(error))
diffphase = np.arctan2(np.imag(CCmax),np.real(CCmax))
return error, diffphase
# Whole-pixel shift - Compute crosscorrelation by an IFFT and locate the
# peak
elif usfac == 1:
ndim = np.shape(buf1ft)
m = ndim[0]
n = ndim[1]
CC = sf.ifft2(buf1ft*np.conj(buf2ft))
max1,loc1 = idxmax(CC)
rloc = loc1[0]
cloc = loc1[1]
CCmax=CC[rloc,cloc]
rfzero = np.sum(np.abs(buf1ft)**2)/(m*n)
rgzero = np.sum(np.abs(buf2ft)**2)/(m*n)
error = 1.0 - CCmax*np.conj(CCmax)/(rgzero*rfzero)
error = np.sqrt(np.abs(error))
diffphase=np.arctan2(np.imag(CCmax),np.real(CCmax))
md2 = np.fix(m/2)
nd2 = np.fix(n/2)
if rloc > md2:
row_shift = rloc - m
else:
row_shift = rloc
if cloc > nd2:
col_shift = cloc - n
else:
col_shift = cloc
ndim = np.shape(buf2ft)
nr = int(round(ndim[0]))
nc = int(round(ndim[1]))
Nr = sf.ifftshift(np.arange(-np.fix(1.*nr/2),np.ceil(1.*nr/2)))
Nc = sf.ifftshift(np.arange(-np.fix(1.*nc/2),np.ceil(1.*nc/2)))
Nc,Nr = np.meshgrid(Nc,Nr)
Greg = buf2ft*np.exp(1j*2*np.pi*(-1.*row_shift*Nr/nr-1.*col_shift*Nc/nc))
Greg = Greg*np.exp(1j*diffphase)
image_reg = sf.ifft2(Greg) * np.sqrt(nr*nc)
#return error,diffphase,row_shift,col_shift
return error,diffphase,row_shift,col_shift, image_reg
# Partial-pixel shift
else:
# First upsample by a factor of 2 to obtain initial estimate
# Embed Fourier data in a 2x larger array
ndim = np.shape(buf1ft)
m = int(round(ndim[0]))
n = int(round(ndim[1]))
mlarge=m*2
nlarge=n*2
CC=np.zeros([mlarge,nlarge],dtype=np.complex128)
CC[int(m-np.fix(m/2)):int(m+1+np.fix((m-1)/2)),int(n-np.fix(n/2)):int(n+1+np.fix((n-1)/2))] = (sf.fftshift(buf1ft)*np.conj(sf.fftshift(buf2ft)))[:,:]
# Compute crosscorrelation and locate the peak
CC = sf.ifft2(sf.ifftshift(CC)) # Calculate cross-correlation
max1,loc1 = idxmax(np.abs(CC))
rloc = int(round(loc1[0]))
cloc = int(round(loc1[1]))
CCmax = CC[rloc,cloc]
# Obtain shift in original pixel grid from the position of the
# crosscorrelation peak
ndim = np.shape(CC)
m = ndim[0]
n = ndim[1]
md2 = np.fix(m/2)
nd2 = np.fix(n/2)
if rloc > md2:
row_shift = rloc - m
else:
row_shift = rloc
if cloc > nd2:
col_shift = cloc - n
else:
col_shift = cloc
row_shift=row_shift/2
col_shift=col_shift/2
# If upsampling > 2, then refine estimate with matrix multiply DFT
if usfac > 2:
### DFT computation ###
# Initial shift estimate in upsampled grid
row_shift = 1.*np.round(row_shift*usfac)/usfac;
col_shift = 1.*np.round(col_shift*usfac)/usfac;
dftshift = np.fix(np.ceil(usfac*1.5)/2); ## Center of output array at dftshift+1
# Matrix multiply DFT around the current shift estimate
CC = np.conj(dftups(buf2ft*np.conj(buf1ft),np.ceil(usfac*1.5),np.ceil(usfac*1.5),usfac,\
dftshift-row_shift*usfac,dftshift-col_shift*usfac))/(md2*nd2*usfac**2)
# Locate maximum and map back to original pixel grid
max1,loc1 = idxmax(np.abs(CC))
rloc = int(round(loc1[0]))
cloc = int(round(loc1[1]))
CCmax = CC[rloc,cloc]
rg00 = dftups(buf1ft*np.conj(buf1ft),1,1,usfac)/(md2*nd2*usfac**2)
rf00 = dftups(buf2ft*np.conj(buf2ft),1,1,usfac)/(md2*nd2*usfac**2)
rloc = rloc - dftshift
cloc = cloc - dftshift
row_shift = 1.*row_shift + 1.*rloc/usfac
col_shift = 1.*col_shift + 1.*cloc/usfac
# If upsampling = 2, no additional pixel shift refinement
else:
rg00 = np.sum(buf1ft*np.conj(buf1ft))/m/n;
rf00 = np.sum(buf2ft*np.conj(buf2ft))/m/n;
error = 1.0 - CCmax*np.conj(CCmax)/(rg00*rf00);
error = np.sqrt(np.abs(error));
diffphase = np.arctan2(np.imag(CCmax),np.real(CCmax));
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
if md2 == 1:
row_shift = 0
if nd2 == 1:
col_shift = 0;
# Compute registered version of buf2ft
if usfac > 0:
ndim = np.shape(buf2ft)
nr = ndim[0]
nc = ndim[1]
Nr = sf.ifftshift(np.arange(-np.fix(1.*nr/2),np.ceil(1.*nr/2)))
Nc = sf.ifftshift(np.arange(-np.fix(1.*nc/2),np.ceil(1.*nc/2)))
Nc,Nr = np.meshgrid(Nc,Nr)
Greg = buf2ft*np.exp(1j*2*np.pi*(-1.*row_shift*Nr/nr-1.*col_shift*Nc/nc))
Greg = Greg*np.exp(1j*diffphase)
elif (nargout > 1) and (usfac == 0):
Greg = np.dot(buf2ft,np.exp(1j*diffphase))
#plt.figure(3)
image_reg = sf.ifft2(Greg) * np.sqrt(nr*nc)
#imgplot = plt.imshow(np.abs(image_reg))
#a_ini = np.zeros((100,100))
#a_ini[40:59,40:59] = 1.
#a = a_ini * np.exp(1j*15.)
#plt.figure(6)
#imgplot = plt.imshow(np.abs(a))
#plt.figure(3)
#imgplot = plt.imshow(np.abs(a)-np.abs(image_reg))
#plt.colorbar()
# return error,diffphase,row_shift,col_shift,Greg
return error,diffphase,row_shift,col_shift, image_reg | 1cd8cd37efebea29da1086b998e98a334697e2d4 | 3,649,356 |
def parametrize_simulations(args):
"""Parametrize simulations"""
if args.type == INSTANCE_COUNTS:
return instance_count_sims(args)
if args.type == FEATURE_COUNTS:
return feature_count_sims(args)
if args.type == NOISE_LEVELS:
return noise_level_sims(args)
if args.type == SHUFFLING_COUNTS:
return shuffling_count_sims(args)
raise NotImplementedError("Unknown simulation type") | ad592e64cbf7fa8ae79ccb753a1fd87db85e1f11 | 3,649,357 |
from typing import Union
def connect(base_url: Union[str, URL], database_id: int = DJ_DATABASE_ID) -> Connection:
"""
Create a connection to the database.
"""
if not isinstance(base_url, URL):
base_url = URL(base_url)
return Connection(base_url, database_id) | aac724326f0f6e487caf8d614265c8028bae4e79 | 3,649,358 |
def micore_tf_deps():
"""Dependencies for Tensorflow builds.
Returns:
list of dependencies which must be used by each cc_library
which refers to Tensorflow. Enables the library to compile both for
Android and for Linux. Use this macro instead of directly
declaring dependencies on Tensorflow.
"""
return micore_if(
android = [
# Link to library which does not contain any ops.
"@org_tensorflow//tensorflow/core:portable_tensorflow_lib_lite",
"@gemmlowp//:eight_bit_int_gemm",
"@fft2d//:fft2d",
],
ios = [
"@org_tensorflow//tensorflow/core:portable_tensorflow_lib",
"@gemmlowp//:eight_bit_int_gemm",
"@fft2d//:fft2d",
],
default = [
# Standard references for Tensorflow when building for Linux. We use
# an indirection via the alias targets below, to facilitate whitelisting
# these deps in the mobile license presubmit checks.
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
],
) | b4c8786df978a536f1adf1384209f0a0b663c100 | 3,649,359 |
def de_bruijn(k, n):
"""
de Bruijn sequence for alphabet k
and subsequences of length n.
"""
try:
# let's see if k can be cast to an integer;
# if so, make our alphabet a list
_ = int(k)
alphabet = list(map(str, range(k)))
except (ValueError, TypeError):
alphabet = k
k = len(k)
a = [0] * k * n
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return "".join(alphabet[i] for i in sequence) | 7e39d51bccbbb42bdda0594fa8c7077d4f2af1a1 | 3,649,360 |
def append_artist(songs, artist):
"""
When the songs gathered from the description just contains the
titles of the songs usually means it's an artist's album.
If an artist was provided appends the song title to the artist
using a hyphen (artist - song)
:param list songs: List of song titles (only song title)
:param str artist: Artist to search for with the song names
:return list: song titles along with the artist
"""
songs_complete = []
for song in songs:
song_complete = f'{artist} - {song}'
songs_complete.append(song_complete)
return songs_complete | b3fbda311849f68ab01c2069f44ea0f694365270 | 3,649,361 |
def pose2pandas(pose: pyrosetta.Pose, scorefxn: pyrosetta.ScoreFunction) -> pd.DataFrame:
"""
Return a pandas dataframe from the scores of the pose
:param pose:
:return:
"""
pose.energies().clear_energies()
scorefxn.weights() # neccessary?
emopts = pyrosetta.rosetta.core.scoring.methods.EnergyMethodOptions(scorefxn.energy_method_options())
emopts.hbond_options().decompose_bb_hb_into_pair_energies(True)
scorefxn.set_energy_method_options(emopts)
scorefxn(pose)
scores = pd.DataFrame(pose.energies().residue_total_energies_array())
pi = pose.pdb_info()
scores['residue'] = scores.index.to_series() \
.apply(lambda r: pose.residue(r + 1) \
.name1() + pi.pose2pdb(r + 1)
)
return scores | 71c3342f86138f28411302da271ca0fb252727d2 | 3,649,362 |
def rantest(seed,N=100):
"""get some random numbers"""
buff = np.zeros(N,dtype=np.double)
ct_buff = buff.ctypes.data_as(ct.POINTER(ct.c_double))
sim.rantest(seed,N,ct_buff)
return buff | 4764968f8b0c46ab58b0b2710f4a0764a417f51c | 3,649,363 |
def tf_efficientnet_b0_ap(pretrained=False, **kwargs):
""" EfficientNet-B0 AdvProp. Tensorflow compatible variant """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet(
'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model | 4cbd2d791301c4001f9bd07da0357550ea93f585 | 3,649,364 |
import torch
def pi_del(
shape,
y_tgt_star,
pad_symbol=0,
plh_symbol=0,
bos_symbol=0,
eos_symbol=0,
Kmax=100,
device="cpu",
):
"""Operations and states to edit a partially deleted version of y_star back to y_star."""
# shape = B x N x M
# y_tgt_star : B x M
shape = list(shape)
shape[-1] = y_tgt_star.size(-1)
shape = tuple(shape)
del_tgt = torch.ones(shape, dtype=torch.long, device=device)
plh_tgt = -torch.ones(
(shape[0], shape[1], shape[2] - 1), dtype=torch.long, device=device
)
cmb_tgt = -torch.ones(shape[0], shape[2], shape[1], dtype=torch.long, device=device)
y_plh = torch.full(
(shape[0], shape[1], shape[2]), pad_symbol, dtype=torch.long, device=device
)
y_cmb = torch.full(shape, pad_symbol, dtype=torch.long, device=device)
y_tok = torch.full_like(y_tgt_star, pad_symbol, dtype=torch.long, device=device)
y_star_n = y_tgt_star.view(shape[0], 1, shape[-1]).expand(shape)
# tok_mask = torch.zeros_like(y_star_n, dtype=bool, device=device)
mask = (
((torch.rand(y_star_n.shape, device=device) > 0.2) & (y_star_n.ne(pad_symbol)))
| (y_star_n == bos_symbol)
| (y_star_n == eos_symbol)
)
tok_mask = mask.any(1)
sorted_ = mask.long().sort(-1, descending=True)
sorted_mask = sorted_[0].bool()
y_plh[sorted_mask] = y_star_n[mask]
y_cmb[y_star_n.ne(pad_symbol)] = plh_symbol
y_cmb[mask] = y_star_n[mask]
y_tok[y_tgt_star.ne(pad_symbol)] = plh_symbol
y_tok[tok_mask] = y_tgt_star[tok_mask]
idx = sorted_[1]
plh_tgt = idx[:, :, 1:] - idx[:, :, :-1] - 1
plh_tgt[~sorted_mask[:, :, 1:]] = 0
plh_tgt = plh_tgt.clamp(0, Kmax - 1)
cmb_tgt = mask.long()
plh_mask = y_plh.ne(pad_symbol)[:, :, 1:]
del_mask = torch.zeros(shape, dtype=bool, device=device)
cmb_mask = y_tgt_star.ne(pad_symbol).view(shape[0], 1, shape[-1]).expand_as(y_cmb)
return {
"del_tgt": del_tgt,
"plh_tgt": plh_tgt,
"cmb_tgt": cmb_tgt,
"tok_tgt": y_tgt_star,
"del_mask": del_mask,
"plh_mask": plh_mask,
"cmb_mask": cmb_mask,
"tok_mask": tok_mask,
"y_plh": y_plh,
"y_cmb": y_cmb,
"y_tok": y_tok,
} | edbd9c40de5b8d5639bfa382d90071e7405aa062 | 3,649,365 |
from typing import Dict
from typing import Any
import logging
def create_service_account(project_id: str, service_account_name: str,
role_name: str, file_name: str) -> Dict[str, Any]:
"""Create a new service account.
Args:
project_id: GCP project id.
service_account_name: The service account name.
role_name: The role to be assigned to the service account.
file_name: The file where service account key will be stored.
Returns:
service_account: The newly created service account.
Raises:
ValueError: If the service_account_name is empty.
ValueError: If the file_name is empty.
"""
if not service_account_name:
raise ValueError('Service account name cannot be empty.')
if not file_name:
raise ValueError('The file name cannot be empty.')
service_account_details = get_service_account(project_id,
service_account_name)
if service_account_details:
return service_account_details
logging.info('Creating "%s" service account in "%s" project',
service_account_name, project_id)
request = _get_service_account_client().create(
name='projects/' + project_id,
body={
'accountId': service_account_name,
'serviceAccount': {
'displayName': service_account_name.upper()
},
})
service_account_details = utils.execute_request(request)
set_service_account_role(project_id, service_account_name, role_name)
create_service_account_key(project_id, service_account_name, file_name)
return service_account_details | 48bcd081cef0eb76be5412febb76e05266a12968 | 3,649,366 |
def test_ode_FE():
"""Test that a linear u(t)=a*t+b is exactly reproduced."""
def exact_solution(t):
return a*t + b
def f(u, t): # ODE
return a + (u - exact_solution(t))**m
a = 4
b = -1
m = 6
dt = 0.5
T = 20.0
u, t = ode_FE(f, exact_solution(0), dt, T)
diff = abs(exact_solution(t) - u).max()
tol = 1E-15 # Tolerance for float comparison
success = diff < tol
assert success | 992aaf22b89a235b7ab5505225a8f3ff4f34ae10 | 3,649,367 |
def file_finder():
"""
This function allows to the user
to select a file using the dialog with tkinter.
:return path_name
:rtype str
the string of the path_name file.
"""
root = Tk()
root.title("File Finder")
root.geometry("500x400")
root.attributes("-topmost", True)
root.withdraw()
path_name = filedialog.askopenfilename()
root.withdraw()
return path_name | 2da5df55010e8edbd5975fe527ad4780ce6e69e9 | 3,649,368 |
def htm_search_cone(IndexFile_data,Long,Lat,Radius,Ind=None,Son_index=np.arange(2,6),PolesLong_index=np.arange(6,11,2),PolesLat_index=np.arange(7,12,2)):
#print('I am running htm_search_cone')
"""Description: Search for all HTM leafs intersecting a small circles
Input :-Either a table of HTM data or an open HDF5 object in which the HTM data is stored
-Longitude (radians) to search
-Latitutde (radians) to search
-Radius of the small circle
Output : a vector of indexes of the winner(s):the "adress" in the indexfile of the smallest leaf(s) intercepting the cone
By : Maayane Soumagnac (original Matlab function by Eran Ofek) Feb 2018
"""
if Ind is None:
Sons=np.arange(8)
else:
Sons=Ind.astype(int)
ID=[]
Nsons=len(Sons)
PolesLong=np.zeros((3,Nsons)) #3 lines, Nsons colomns, on veut mettre a chaque colomne les longitudes des poles du mesh
PolesLat=np.zeros((3, Nsons)) #3 lignes, Nsons colomnes
for i in range(Nsons):#OPTIMIZE
PolesLong[:,i]=IndexFile_data[PolesLong_index[:],Sons[i]] # array where each colomn is the 3 poles longitudes of a son mesh HERE: THIS? OR INVERSE?
PolesLat[:,i]=IndexFile_data[PolesLat_index[:],Sons[i]] # array where each colomn is the 3 poles latitude of a son mesh HERE: THIS? OR INVERSE?
Flag=celestial.cone_in_polysphere(PolesLong,PolesLat,Long,Lat,Radius) #check if the cone intercept any of the sons meshes
for i in range(Nsons): #OPTIMIZABLE?
if Flag[i]==1: #i.e. if the cone overlap the son with index i
if np.isnan(IndexFile_data[Son_index[:],Sons[i]]).all()==True:# there are nans in the index_file at the son's index, which means the data is where you are and you cannot go further in the tree
ID.append(Sons[i])
else:
Ind = IndexFile_data[Son_index[:], Sons[i]] - 1.
#RECURION IS HERE
ID.extend(htm_search_cone(IndexFile_data,Long,Lat,Radius,Ind=Ind))
return ID | 2b202f943264cc979c4271c41fca2386a3b2b14f | 3,649,369 |
def get_missing_columns(missing_data):
"""
Returns columns names as list that containes missing data
:param
missing_data : return of missing_data(df)
:return
list: list containing columns with missing data
"""
missing_data = missing_data[missing_data['percent'] > 0]
missing_columns = missing_data.index.tolist()
return missing_columns | 80feccec6148a417b89fb84f4c412d9ea4d0dd37 | 3,649,372 |
def read(request):
"""Render the page for a group."""
pubid = request.matchdict["pubid"]
slug = request.matchdict.get("slug")
group = models.Group.get_by_pubid(pubid)
if group is None:
raise exc.HTTPNotFound()
if slug is None or slug != group.slug:
url = request.route_url('group_read',
pubid=group.pubid,
slug=group.slug)
return exc.HTTPMovedPermanently(url)
if not request.authenticated_userid:
return _login_to_join(request, group)
else:
if group in request.authenticated_user.groups:
return _read_group(request, group)
else:
return _join(request, group) | 8376c94a4ffe6569cd7531d18427fbfd57629031 | 3,649,373 |
import time
import copy
import tqdm
import torch
from typing import OrderedDict
def train_model(
model,
device,
train_data_loader,
valid_data_loader,
criterion, optimizer, scheduler, num_epochs=5):
"""
training
Parameters
--------------
model : DogClassificationModel
Network model to be trained.
device : device
cuda or cpu
train_data_loader : dataloader
dataloader for training
valid_data_loader : dataloader
dataloader for validation
criterion :
Loss function.
optimizer :
Optimizer.
scheduler :
Learning rate scheduler.
num_epochs : int
The number of epochs.
Returns
--------------
model : DogClassificationModel
Trained model.
"""
since = time.time()
model = model.to(device)
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
bar = tqdm(total = len(train_data_loader))
bar.set_description("Epoch: {}/{}".format(epoch+1, num_epochs))
"""
Training Phase
"""
model.train()
running_loss = 0.0
running_corrects = 0
for j, (inputs, labels) in enumerate(train_data_loader):
optimizer.zero_grad()
tmp_loss_item = 0.0
# training
with torch.set_grad_enabled(True):
outputs = model(inputs.to(device))
torch.cuda.empty_cache()
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels.to(device))
# backward + optimize only if in training phase
loss.backward()
optimizer.step()
tmp_loss_item = loss.item()
# statistics
running_loss += tmp_loss_item * inputs.size(0)
running_corrects += torch.sum(preds.to('cpu') == labels.data)
# progress bar
bar.update(1)
tmp_loss = float(running_loss / (j+1)) / 32 # 32: mini-batch size
tmp_acc = float(running_corrects // (j+1)) / 32
bar.set_postfix(OrderedDict(loss=tmp_loss, acc=tmp_acc))
# update learning rate scheduler
scheduler.step()
dataset_size = len(train_data_loader.dataset)
epoch_loss = running_loss / dataset_size
epoch_acc = running_corrects.double() / dataset_size
"""
Validation Phase
"""
model.eval() # Set model to validation mode
val_running_loss = 0.0
val_running_corrects = 0
# Iterate over data.
for inputs, labels in valid_data_loader:
val_inputs = inputs.to(device)
val_labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.no_grad():
val_outputs = model(val_inputs)
_, preds = torch.max(val_outputs, 1)
loss = criterion(val_outputs, val_labels)
# statistics
val_running_loss += loss.item() * val_inputs.size(0)
val_running_corrects += torch.sum(preds == val_labels.data)
dataset_size = len(valid_data_loader.dataset)
val_epoch_loss = val_running_loss / dataset_size
val_epoch_acc = val_running_corrects.double() / dataset_size
print('VALIDATION Loss: {:.4f} Acc: {:.4f}'.format(val_epoch_loss, val_epoch_acc))
print("Elapsed time: {} [sec]".format(time.time() - since))
# deep copy the model
if val_epoch_acc > best_acc:
best_acc = val_epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model | c12929a8aec02031f8d293f5347beaeb5dc6d759 | 3,649,374 |
from src.dialogue_system.agent.agent_with_goal_3 import AgentWithGoal as AgentWithGoal3
import json
import time
import pickle
def run(parameter):
"""
The entry function of this code.
Args:
parameter: the super-parameter
"""
print(json.dumps(parameter, indent=2))
time.sleep(2)
slot_set = pickle.load(file=open(parameter["slot_set"], "rb"))
action_set = pickle.load(file=open(parameter["action_set"], "rb"))
disease_symptom = pickle.load(file=open(parameter["disease_symptom"], "rb"))
steward = RunningSteward(parameter=parameter,checkpoint_path=parameter["checkpoint_path"])
print('action_set', action_set)
warm_start = parameter.get("warm_start")
warm_start_epoch_number = parameter.get("warm_start_epoch_number")
train_mode = parameter.get("train_mode")
agent_id = parameter.get("agent_id")
simulate_epoch_number = parameter.get("simulate_epoch_number")
# Warm start.
if warm_start == True and train_mode == True:
print("warm starting...")
agent = AgentRule(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
steward.dialogue_manager.set_agent(agent=agent)
steward.warm_start(epoch_number=warm_start_epoch_number)
# exit()
if agent_id.lower() == 'agentdqn':
agent = AgentDQN(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agentrandom':
agent = AgentRandom(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agentrule':
agent = AgentRule(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agenthrl':
agent = AgentHRL(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoaljoint':
agent = AgentWithGoalJoint(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal':
agent = AgentWithGoal(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal2':
agent = AgentWithGoal2(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal3':
agent = AgentWithGoal3(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,
parameter=parameter)
else:
raise ValueError('Agent id should be one of [AgentRule, AgentDQN, AgentRandom, AgentHRL, AgentWithGoal, AgentWithGoal2, AgentWithGoalJoint].')
steward.dialogue_manager.set_agent(agent=agent)
if train_mode is True: # Train
steward.simulate(epoch_number=simulate_epoch_number, train_mode=train_mode)
else: # test
for index in range(simulate_epoch_number):
res = steward.evaluate_model(dataset='test', index=index)
return res | 75e9fd2204e2e3e0aa7101e9656c23617fb56099 | 3,649,375 |
from typing import Iterable
def convert(
value: str,
conversion_recipes: Iterable[ConversionRecipe[ConvertResultType]]) -> ConvertResultType:
"""
Given a string value and a series of conversion recipes, attempt to convert the value using the
recipes.
If none of the recipes declare themselves as applicable, then raise
:py:class:`NoApplicableConversionRecipeError`. If none of the recipes that declare themselves
as eligible run successfully, then raise :py:class:`NoSuccessfulConversionRecipeError`.
Parameters
----------
value : str
The string value we are attempting to convert.
conversion_recipes : Iterable[ConversionRecipe[ConvertResultType]]
A series of conversion recipes.
Returns
-------
The converted value.
"""
none_applied = True
for conversion_recipe in conversion_recipes:
if conversion_recipe.applicable(value):
none_applied = False
try:
return conversion_recipe.load(value)
except Exception:
pass
if none_applied:
raise NoApplicableConversionRecipeError(
f"Could not find applicable gonversion recipe for {value}")
raise NoSuccessfulConversionRecipeError(
f"All applicable conversion recipes failed to run successfully for {value}.") | a6f1162f4069a3846636ad95cecde0b3ba3601a8 | 3,649,376 |
def get_function_name(fcn):
"""Returns the fully-qualified function name for the given function.
Args:
fcn: a function
Returns:
the fully-qualified function name string, such as
"eta.core.utils.function_name"
"""
return fcn.__module__ + "." + fcn.__name__ | ae186415225bd5420de7f7b3aef98480d30d59f8 | 3,649,377 |
def clean_cases(text):
"""
Makes text all lowercase.
:param text: the text to be converted to all lowercase.
:type: str
:return: lowercase text
:type: str
"""
return text.lower() | 9b0c931336dbf762e5e3a18d103706ddf1e7c14f | 3,649,378 |
def root():
"""Refreshes data in database"""
db.drop_all()
db.create_all()
# Get data from api, make objects with it, and add to db
for row in df.index:
db_comment = Comment(user=df.User[row],text=df.Text[row]) # rating = df.Rating[row]
db.session.add(db_comment)
db.session.commit()
return 'Data stored' | 0731926301cd981cc9278964cb313a35bcfb4f43 | 3,649,379 |
def bond_value_to_description(value):
"""bond_value_to_description(value) -> string
Convert from a bond type string into its text description,
separated by "|"s. The result are compatible with
OEGetFPBontType and are in canonical order.
"""
return _get_type_description("bond", _btype_flags, value) | 2eeb333334740ed9cdad28b43742c7a2274885bc | 3,649,381 |
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection | ba71ed4fb6e85cf5156d35a85245058bb3711f9b | 3,649,382 |
def filter_0_alleles(allele_df, allele_num=2):
"""Drop alleles that do not appear in any of the strains.
"""
drop_cols = []
for col in allele_df.columns:
if allele_df[col].sum()<allele_num:
drop_cols.append(col)
allele_df.drop(drop_cols, inplace=True, axis=1)
return allele_df | 9b76152d6e6fc200c2d80d4721122d3958642286 | 3,649,383 |
def gradient_of_rmse(y_hat, y, Xn):
"""
Returns the gradient of the Root Mean Square error with respect to the
parameters of the linear model that generated the prediction `y_hat'.
Hence, y_hat should have been generated by a linear process of the form
Xn.T.dot(theta)
Args:
y_hat (np.array of shape N,): The predictions of the linear model
y (np.array of shape N,): The "ground-truth" values.
Returns:
The RMSE between y_hat and y
"""
N = y.shape[0]
assert N > 0, ('At least one sample is required in order to compute the '
'RMSE loss')
losses = y - y_hat
gradient = - 2 * Xn.T.dot(losses) / N
return gradient | 73a46197f90cf1b9c0a90a8ce2d2eae006c6d002 | 3,649,384 |
def is_commit_in_public_upstream(revision: str, public_upstream_branch: str, source_dir: str):
"""
Determine if the public upstream branch includes the specified commit.
:param revision: Git commit hash or reference
:param public_upstream_branch: Git branch of the public upstream source
:param source_dir: Path to the local Git repository
"""
cmd = ["git", "merge-base", "--is-ancestor", "--", revision, "public_upstream/" + public_upstream_branch]
# The command exits with status 0 if true, or with status 1 if not. Errors are signaled by a non-zero status that is not 1.
# https://git-scm.com/docs/git-merge-base#Documentation/git-merge-base.txt---is-ancestor
rc, out, err = exectools.cmd_gather(cmd)
if rc == 0:
return True
if rc == 1:
return False
raise IOError(
f"Couldn't determine if the commit {revision} is in the public upstream source repo. `git merge-base` exited with {rc}, stdout={out}, stderr={err}") | 6f7259f8e3a1893a7fbd41914df37e42fed73c7b | 3,649,385 |
def align_down(x: int, align: int) -> int:
"""
Align integer down.
:return:
``y`` such that ``y % align == 0`` and ``y <= x`` and ``(x - y) < align``
"""
return x - (x % align) | 8144309badf601999f4c291ee3af5cfbd18397ea | 3,649,386 |
def null():
"""return an empty bit buffer"""
return bits() | 53b7e87648c33ab1072651ee6ef6bfb3fe92da8d | 3,649,388 |
def find_coherent_patch(correlations, window=11):
"""Looks through 3d stack of correlation layers and finds strongest correlation patch
Also accepts a 2D array of the pre-compute means of the 3D stack.
Uses a window of size (window x window), finds the largest average patch
Args:
correlations (ndarray): 3D array of correlations:
correlations = read_stack('path/to/correlations', '.cc')
window (int): size of the patch to consider
Returns:
tuple[int, int]: the row, column of center of the max patch
Example:
>>> corrs = np.arange(25).reshape((5, 5))
>>> print(find_coherent_patch(corrs, window=3))
(3, 3)
>>> corrs = np.stack((corrs, corrs), axis=0)
>>> print(find_coherent_patch(corrs, window=3))
(3, 3)
"""
if correlations.ndim == 2:
mean_stack = correlations
elif correlations.ndim == 3:
mean_stack = np.mean(correlations, axis=0)
else:
raise ValueError("correlations must be a 2D mean array, or 3D correlations")
conv = uniform_filter(mean_stack, size=window, mode='constant')
max_idx = conv.argmax()
return np.unravel_index(max_idx, mean_stack.shape) | 27bc06ec5e73d854c094f909fdf507fad38168f3 | 3,649,389 |
def test():
"""HI :)"""
return 'Hi!' | 626b2ffcfc3f60dcd5456efa2d61a3ed18d428d8 | 3,649,390 |
def get_vdfdx(stuff_for_time_loop, vdfdx_implementation="exponential"):
"""
This function enables VlaPy to choose the implementation of the vdfdx stepper
to use in the lower level sections of the simulation
:param stuff_for_time_loop: (dictionary) contains the derived parameters for the simulation
:param vdfdx_implementation: (string) the chosen v df/dx implementation for for this simulation
:return:
"""
if vdfdx_implementation == "exponential":
vdfdx = get_vdfdx_exponential(
kx=stuff_for_time_loop["kx"], v=stuff_for_time_loop["v"]
)
elif vdfdx_implementation == "sl":
vdfdx = get_vdfdx_sl(x=stuff_for_time_loop["x"], v=stuff_for_time_loop["v"])
else:
raise NotImplementedError(
"v df/dx: <"
+ vdfdx_implementation
+ "> has not yet been implemented in NumPy/SciPy"
)
return vdfdx | 538548a2d2d39b83ad74ac052c3c1a51895357d2 | 3,649,392 |
def worker(args):
"""
1. Create the envelope request object
2. Send the envelope
"""
envelope_args = args["envelope_args"]
# 1. Create the envelope request object
envelope_definition = make_envelope(envelope_args)
# 2. call Envelopes::create API method
# Exceptions will be caught by the calling function
api_client = ApiClient()
api_client.host = args["base_path"]
api_client.set_default_header("Authorization", "Bearer " + args["ds_access_token"])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.create_envelope(args["account_id"], envelope_definition=envelope_definition)
envelope_id = results.envelope_id
# app.logger.info(f"Envelope was created. EnvelopeId {envelope_id}")
return {"envelope_id": envelope_id} | 79891142b34da8d9d5aacc4d55ab7f65198a4116 | 3,649,393 |
import glob
import csv
def write_colocated_data_time_avg(coloc_data, fname):
"""
Writes the time averaged data of gates colocated with two radars
Parameters
----------
coloc_data : dict
dictionary containing the colocated data parameters
fname : str
file name where to store the data
Returns
-------
fname : str
the name of the file where data has written
"""
filelist = glob.glob(fname)
if not filelist:
with open(fname, 'w', newline='') as csvfile:
csvfile.write('# Colocated radar gates data file\n')
csvfile.write('# Comment lines are preceded by "#"\n')
csvfile.write('#\n')
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
else:
with open(fname, 'a', newline='') as csvfile:
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
return fname | 2e786c6df8a617f187a7b50467111785342310c5 | 3,649,394 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.