content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def unique():
"""Return unique identification number."""
global uniqueLock
global counter
with uniqueLock:
counter = counter + 1
return counter | 12ac0e8f9ec5d4f8d6a41066f2325ef57d593d26 | 3,652,800 |
def pointCoordsDP2LP(dpX, dpY, dptZero, lPix = 1.0):
"""Convert device coordinates into logical coordinates
dpX - x device coordinate
dpY - y device coordinate
dptZero - device coordinates of logical 0,0 point
lPix - zoom value, number of logical points inside one device point (aka pixel)
return point in logical coordinates
"""
return point.fromXY(xDP2LP(dpX, dptZero, lPix), yDP2LP(dpY, dptZero, lPix)) | 2494b5d95756aab33434969fe2b02917a4529ef9 | 3,652,801 |
def geocode_input(api_key, input, geolocator):
"""
Use parallel processing to process inputted addresses as geocode
Parameters:
api_key (string): Google API key
input (string): user inputted addresses
geolocator: object from Google Maps API that generate geocode of address
Returns:
string[]: List of incorrect addresses
string[]: formatted addresses of the inputted addresses
float[]: coordinates of each address
string: original inputted addresses
"""
#lessThanOneInt = True
#time.sleep(1)
#print(input)
faultyAddress = None
coords = None
address = None
#print('1')
# for every line of input, generate location object
placeid = database.fetch_placeid(input)
if len(placeid) == 0:
try:
location = geolocator.geocode(input + " NC") # IMPORTANT: NC must be changed for usage in different states.
coords = (location[0]['geometry']['location']['lat'], location[0]['geometry']['location']['lng'])
address = location[0]["formatted_address"]
database.insert_data(input, location[0]['place_id'], coords[0], coords[1], address)
except:
faultyAddress = str(input)
#print(faultyAddress)
else:
out_data = database.fetch_output_data(placeid[0][0])
address = out_data[0][2]
coords = [float(out_data[0][0]), float(out_data[0][1])]
# output data
return (faultyAddress, address, coords, input) | b7c31ccc1364364a704602438e263b107de9046c | 3,652,802 |
def satContact(sat_R, gs_R):
"""
Determines if satellite is within sight of a Ground Station
Parameters
----------
sat_R : numpy matrix [3, 1]
- Input radius vector in Inertial System ([[X], [Y], [Y]])
gs_R : numpy matrix [3, 1]
- Input radius vector in Inertial System ([[X], [Y], [Y]])
Returns
-------
inContact : int
- 1 or 0 if sat is in sight our out of sight, respectively
See Also
--------
Sun_Contact_Times : Determine if a orbit vector is illuminated
Geo_Contact_Times : Determine if a orbit vector is in within a
geometric boundary
References
----------
[1] D. Vallado, `Fundamentals of Astrodynamics and Applications`. 4th ed.,
Microcosm Press, 2013.
- Modified Alg. 35, pg. 308
"""
# Simplifying equations
mag_sat = np.linalg.norm(sat_R)
mag_gs = np.linalg.norm(gs_R)
dot_ss = np.dot(np.transpose(sat_R), gs_R)
# Find minimum parametric value
Tmin = (((mag_sat ** 2) - dot_ss) /
((mag_sat ** 2) + (mag_gs ** 2) - 2 * (dot_ss)))
if Tmin < 0 or Tmin > 1:
InContact = 1 # Satellite can see GS
if Tmin > 0 and Tmin < 1:
cTmin = (((1 - Tmin) * (mag_sat ** 2) +
(dot_ss * Tmin)) / (6378.137 ** 2))
if cTmin > 1:
InContact = 1 # Satellite can see GS
if cTmin < 1:
InContact = 0 # Satellite can't see GS
return InContact | 6fb6d5fc9121ddb0627f276a13446891f1da7542 | 3,652,803 |
def determine_visible_field_names(hard_coded_keys, filter_string,
ref_genome):
"""Determine which fields to show, combining hard-coded keys and
the keys in the filter string.
"""
fields_from_filter_string = extract_filter_keys(filter_string, ref_genome)
return list(set(hard_coded_keys) | set(fields_from_filter_string)) | 2d885e7caa183916691def8abf685a6560f55309 | 3,652,804 |
def get_data_day(data: pd.DataFrame):
"""Get weekday/weekend designation value from data.
:param pandas.DataFrame data: the data to get day of week from.
:return: (*numpy.array*) -- indicates weekend or weekday for every day.
"""
return np.array(data["If Weekend"]) | 3e4654cf3ad3c2f0e213563e0dac3b21c7fb847c | 3,652,805 |
import sys
def bisection(f, a, b, power, iter_guess="yes"):
"""Given f(x) in [`a`,`b`] find x within tolerance, `tol`.
Root-finding method: f(x) = 0.
Parameters
----------
f : expression
Input function.
a : float
Left-hand bound of interval.
b : float
Right-hand bound of interval.
power : float
Signed, specified power of tolerance until satisfying method.
iter_guess : string or integer
Optional argument that is string by default. If integer, iterate for that integer.
Returns
-------
P : list
Aggregate collection of evaluated points, `p`.
ERROR : list
Propogation of `error` through method.
I : list
Running collection of iterations through method.
Raises
------
bad_iter : string
If input for desired iterations was assigned not an integer.
opposite_signs : string
If initial guesses did not evaluate to have opposite signs.
must_be_expression : string
If input `f` was of array, list, tuple, etcetera...
Warns
-----
solution_found : string
Inform user that solution was indeed found.
solution_not_found : string
If initial guess or tolerance were badly defined.
Notes
-----
Relying on the Intermediate Value Theorem, this is a bracketed, root-finding method. Generates a sequence {p_n}^{inf}_{n=1} to approximate a zero of f(x), `p` and converges by O(1 / (2**N)).
Examples
--------
If f(x) = x**3 + 4*x**2 = 10
=> f(x) = x**3 + 4*x**2 - 10 = 0
"""
a, b, tol = float(a), float(b), float(10**power)
# calculate if expression
if isinstance(f,(FunctionType, sp.Expr)):
# check if f(a) and f(b) are opposite signs
if f(a)*f(b) < 0:
P, ERROR, I = [], [], [] # initialize lists
if iter_guess == "yes":
# if left unassigned, guess
N = max_iterations(a, b, power, 'bisection')
elif isinstance(iter_guess, int):
# if defined as integer, use
N = iter_guess
# else, break for bad assignment
else: sys.exit("ERROR! " + bad_iter)
i, error = 0, tol*10 # initialize
# exit by whichever condition is TRUE first
while error >= tol and i <= N:
x = (b - a)/2
p = a + x # new value, p
P.append(p)
if f(a)*f(p) > 0: a = p # adjust next bounds
else: b = p
error = abs(x) # error of new value, p
ERROR.append(error); I.append(i)
i += 1 # iterate to i + 1
if i < N: print('Congratulations! ', solution_found)
else: print('Warning! ', solution_not_found)
# abort if f(a) is not opposite f(b)
else: sys.exit('ERROR! ' + opposite_signs)
# abort if not expression
else: sys.exit('ERROR! ' + must_be_expression)
return P, ERROR, I | eaaa1a28201fceaae39ced5edeb9e819a0c76ae1 | 3,652,806 |
def make_pretty(image, white_level=50):
"""Rescale and clip an astronomical image to make features more obvious.
This rescaling massively improves the sensitivity of alignment by
removing background and decreases the impact of hot pixels and cosmic
rays by introducing a white clipping level that should be set so that
most of a star's psf is clipped.
Arguments:
white_level -- the clipping level as a multiple of the median-subtracted
image's mean. For most images, 50 is good enough.
"""
pretty = (image - np.median(image)).clip(0)
pretty /= np.mean(pretty)
pretty = pretty.clip(0, white_level)
return pretty | c6d95a76db8aee7a8e2ca2bbc881094577e547ca | 3,652,807 |
import requests
import json
import click
def get_examples_version(idaes_version: str):
"""Given the specified 'idaes-pse' repository release version,
identify the matching 'examples-pse' repository release version.
Args:
idaes_version: IDAES version, e.g. "1.5.0" or "1.5.0.dev0+e1bbb[...]"
Returns:
Examples version, or if there is no match, return None.
"""
# Fetch the idaes:examples version mapping from Github
compat_file = 'idaes-compatibility.json'
url = f"{GITHUB_API}/repos/{REPO_ORG}/{REPO_NAME}/contents/{compat_file}"
headers = {'Accept': 'application/vnd.github.v3.raw'}
_log.debug(f'About to call requests.get({url}, {headers})')
res = requests.get(url, headers=headers)
if not res.ok:
_log.debug(f'Problem getting mapping file: {res.json()}')
raise DownloadError(res.json())
try:
compat_mapping = json.loads(res.text)['mapping']
except KeyError:
# return the latest version instead
_log.warning('Ill-formed compatibility mapping file for examples repository:')
_log.debug(f'compat_mapping: {res.text}')
_log.info('Defaulting to latest released version of examples.')
return None
idaes_version_num = idaes_version
version_numbers = idaes_version.split('.')
if len(version_numbers) > 3:
idaes_version_num = '.'.join(version_numbers[:3])
click.echo(f"Warning: non-release version of IDAES detected. "
f"Using IDAES {idaes_version_num} as reference; "
f"examples version compatibility is not guaranteed.")
try:
examples_version = compat_mapping[idaes_version_num]
except KeyError:
# return the latest version instead, as above
_log.warning('IDAES version not found in compatibility mapping file. \
Defaulting to latest released version of examples.')
return None
_log.debug(f'get_examples_version({idaes_version}: {examples_version}')
return examples_version | 6f9ee4d6cf9e9c542065d77ae6b7dcc41848247c | 3,652,808 |
def hash(data: bytes) -> bytes:
"""
Compute the hash of the input data using the default algorithm
Args:
data(bytes): the data to hash
Returns:
the hash of the input data
"""
return _blake2b_digest(data) | 62dec8f0e05b668dd486deb87bd3cc64a0cd5d08 | 3,652,809 |
import torch
def compute_cd_small_batch(gt, output,batch_size=50):
"""
compute cd in case n_pcd is large
"""
n_pcd = gt.shape[0]
dist = []
for i in range(0, n_pcd, batch_size):
last_idx = min(i+batch_size,n_pcd)
dist1, dist2 , _, _ = distChamfer(gt[i:last_idx], output[i:last_idx])
cd_loss = dist1.mean(1) + dist2.mean(1)
dist.append(cd_loss)
dist_tensor = torch.cat(dist)
cd_ls = (dist_tensor*10000).cpu().numpy().tolist()
return cd_ls | b7e1b22ab63624afd154a3228314a954304a3941 | 3,652,810 |
def find_sub_supra(axon, stimulus, eqdiff, sub_value=0, sup_value=0.1e-3):
"""
'find_sub_supra' computes boundary values for the bisection method (used to identify the threeshold)
Parameters
----------
axon (AxonModel): axon model
stimulus (StimulusModel): stimulus model
eqdiff (function): function that defines the ODE system
sub_value (float): initial guess of sub-threshold value (default is 0)
sup_value (float): initial guess of supra-threshold value (default is 0.1e-3)
Returns
-------
sub_value (float): sub-threshold value
sup_value (float): supra-threshold value
"""
# Identification of bound values
flag = 1
print('\n------------------------------------------------------')
print('Identifying sub and supra threshold values...')
print('------------------------------------------------------')
ts = timer()
while flag:
# update stimulus
stimulus.magnitude = -sup_value
stimulus.update_stimulus(axon)
# callback to save solution at each iteration of the integration
def solout(t, y):
time.append(t)
sol.append(y.copy())
# initialize solution variable
time = []
sol = []
# define integrator
r = ode(eqdiff).set_integrator('dopri5')
# set initial conditions
r.set_initial_value(axon.icond, 0).set_f_params(axon.Ga, axon.Gm, axon.Cm, stimulus.voltage_ext, axon.d, axon.l, axon.Vr)
# store solution at each iteration step
r.set_solout(solout)
# integrate
r.integrate(stimulus.tend)
# get complete solution
x = np.array(sol)
# get number of nodes with voltage > 80 mV
N80 = (np.max(x[:, 0:axon.node_num], axis=0) > 80e-3).sum()
if N80 > 3:
flag = 0
else:
sub_value = 1*sup_value
sup_value = 2 * sup_value
te = timer()
print('...done. (sub, sup) = ({},{})'.format(sub_value, sup_value))
print('\n elapsed time: {:3f} ms'.format(te - ts))
return sub_value, sup_value | 6efe62ac2d00d946422b1e0f915714cb9bd4dc50 | 3,652,811 |
def constantly(x):
"""constantly: returns the function const(x)"""
@wraps(const)
def wrapper(*args, **kwargs):
return x
return wrapper | 7fdc78248f6279b96a2d45edaa2f76abe7d60d54 | 3,652,812 |
def ToBaseBand(xc, f_offset, fs):
"""
Parametros:
xc: Señal a mandar a banda base
f_offset: Frecuencia que esta corrido
fs: Frecuencia de muestreo
"""
if PLOT:
PlotSpectrum(xc, "xc", "xc_offset_spectrum.pdf", fs)
# Se lo vuelve a banda base, multiplicando por una exponencial con fase f_offset / fs
x_baseband = xc * np.exp((-1.0j * 2.0 * np.pi * f_offset/fs) * np.arange(len(xc)))
if PLOT:
PlotSpectrum(x_baseband, "x baseband", "x_baseband_spectrum.pdf", fs)
return x_baseband | 0389c3a25b3268b04be8c47cebaf1bbb6b863235 | 3,652,813 |
def hvp(
f: DynamicJaxFunction,
x: TracerOrArray,
v: TracerOrArray,
) -> TracerOrArray:
"""Hessian-vector product function"""
return jax.grad(lambda y: jnp.vdot(jax.grad(f)(y), v))(x) | 585ca7a5c749b6d393ae04e1e89f21f87c6f0269 | 3,652,814 |
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
return hvd.allgather(tensor.contiguous()) | 97b2a3e43cf36adda6c517264f3307deb4d98ed6 | 3,652,815 |
from typing import List
import os
def find_files(
path: str,
skip_folders: tuple,
skip_files: tuple,
extensions: tuple = (".py",),
) -> List[str]:
"""Find recursively all files in path.
Parameters
----------
path : str
Path to a folder to find files in.
skip_folders : tuple
Skip folders containing folder to skip
skip_files : tuple
Skip files.
extensions : tuple, optional
Extensions to filter by. Default is (".py", )
Returns
-------
list
Sorted list of found files.
"""
found_files = []
for root, _dirs, files in os.walk(path, topdown=False):
for filename in files:
fpath = os.path.join(root, filename)
if any(folder in fpath for folder in skip_folders):
continue
if fpath in skip_files:
continue
if filename.endswith(extensions):
found_files.append(fpath)
return list(sorted(found_files)) | c3513c68cb246052f4ad677d1dc0116d253eae1a | 3,652,816 |
def get_min_area_rect(points):
"""
【得到点集的最小面积外接矩形】
:param points: 轮廓点集,n*1*2的ndarray
:return: 最小面积外接矩形的四个端点,4*1*2的ndarray
"""
rect = cv2.minAreaRect(points) # 最小面积外接矩形
box = cv2.boxPoints(rect) # 得到矩形的四个端点
box = np.int0(box)
box = box[:, np.newaxis, :] # 从4*2转化为4*1*2
return box | 59b801e77d03d3f81227c645a55b2c56f2ce5959 | 3,652,817 |
def vector_to_cyclic_matrix(vec):
"""vec is the first column of the cyclic matrix"""
n = len(vec)
if vec.is_sparse():
matrix_dict = dict((((x+y)%n, y), True) for x in vec.dict() for y in xrange(n))
return matrix(GF(2), n, n, matrix_dict)
vec_list = vec.list()
matrix_lists = [vec_list[-i:] + vec_list[:-i] for i in xrange(n)]
return matrix(GF(2), n, n, matrix_lists) | 79fdb28f1b254de4700e1e163b95b4bdbf579294 | 3,652,818 |
def cfn_resource_helper():
""" A helper method for the custom cloudformation resource """
# Custom logic goes here. This might include side effects or
# Producing a a return value used elsewhere in your code.
logger.info("cfn_resource_helper logic")
return True | 865216f77f09681e36e8b8409a8673c8dbcdffa0 | 3,652,819 |
def get_ts_code_and_list_date(engine):
"""查询ts_code"""
return pd.read_sql('select ts_code,list_date from stock_basic', engine) | 4bd31cbadfdb92a70983d53c74426b0727ad4d0b | 3,652,820 |
def nested_cv_ridge(
X, y, test_index, n_bins=4, n_folds=3,
alphas = 10**np.linspace(-20, 20, 81),
npcs=[10, 20, 40, 80, 160, 320, None],
train_index=None,
):
"""
Predict the scores of the testing subjects based on data from the training subjects using ridge regression. Hyperparameters are chosen based on a nested cross-validation. The inner-loop of the nested cross-validation is a stratified k-fold cross-validation.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
y : ndarray of shape (n_samples, )
test_idx : ndarray of shape (n_test_samples, )
Indices for the samples that are used for testing.
n_bins : int
Training data are divided into `n_bins` bins for stratified k-fold cross-validation.
n_folds : int
Number of folds for stratified k-fold cross-validation.
alphas : {list, ndarray of shape (n_alphas, )}
Choices of the regularization parameter for ridge regression.
npcs : list
Choices of the number of PCs used in the prediction model in increasing order. Each element in the list should be an integer or `None`. `None` means all PCs are used.
train_idx : {None, ndarray of shape (n_training_samples, )}
Indices for the samples that are used for training. If it is `None`, then all the samples except for the test samples are used.
Returns
-------
yhat : ndarray of shape (n_test_samples, )
Predicted scores for the test samples.
alpha : float
The chosen element of `alphas` based on nested cross-validation.
npc : {int, None}
The chosen element of `npcs` based on nested cross-validation.
cost : float
The cost based on the chosen hyperparameters, which is the minimum cost for training data among all hyperparameter choices.
"""
if train_index is None:
train_index = np.setdiff1d(np.arange(X.shape[0], dtype=int), test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
bin_limits = np.histogram(y_train, n_bins)[1]
bins = np.digitize(y_train, bin_limits[:-1])
cv = StratifiedKFold(n_splits=n_folds)
costs = []
for train, test in cv.split(X_train, bins):
yhat = grid_ridge(X_train[train], X_train[test], y_train[train], alphas, npcs)
cost = ((y_train[test][:, np.newaxis, np.newaxis] - yhat)**2).sum(axis=0)
costs.append(cost)
costs = np.sum(costs, axis=0)
a, b = np.unravel_index(costs.argmin(), costs.shape)
alpha = alphas[a]
npc = npcs[b]
yhat = ridge(X_train, X_test, y_train, alpha, npc)
return yhat, alpha, npc, costs[a, b] | 47d5d8821b796031298a194aaf1781dc4df68a2f | 3,652,821 |
def absolute_time(time_delta, meta):
"""Convert a MET into human readable date and time.
Parameters
----------
time_delta : `~astropy.time.TimeDelta`
time in seconds after the MET reference
meta : dict
dictionary with the keywords ``MJDREFI`` and ``MJDREFF``
Returns
-------
time : `~astropy.time.Time`
absolute time with ``format='ISOT'`` and ``scale='UTC'``
"""
time = time_ref_from_dict(meta) + time_delta
return Time(time.utc.isot) | dd6c02be87840022e88769d3d70e67ce50f24d64 | 3,652,822 |
from controllers.main import main
from controllers.user import user
def create_app(object_name, env="prod"):
"""
Arguments:
object_name: the python path of the config object,
e.g. webapp.settings.ProdConfig
env: The name of the current environment, e.g. prod or dev
"""
app = Flask(__name__)
app.config.from_object(object_name)
app.config['ENV'] = env
# init the cache
cache.init_app(app)
# init SQLAlchemy
db.init_app(app)
login_manager.init_app(app)
# register our blueprints
app.register_blueprint(main)
app.register_blueprint(user)
return app | a2760a759f3afebf8e09c498398712fb26d44de8 | 3,652,823 |
from datetime import datetime
def yyyydoy_to_date(yyyydoy):
"""
Convert a string in the form of either 'yyyydoy' or 'yyyy.doy' to a
datetime.date object, where yyyy is the 4 character year number and doy
is the 3 character day of year
:param yyyydoy: string with date in the form 'yyyy.doy' or 'yyyydoy'
:return: datetime.date object
:rtype: datetime.date
"""
try:
if '.' in yyyydoy:
if len(yyyydoy) != 8:
raise ValueError('Invalid string: must be yyyydoy or yyyy.doy')
yyyy, doy = yyyydoy.split('.')
else:
if len(yyyydoy) != 7:
raise ValueError('Invalid string: must be yyyydoy or yyyy.doy')
yyyy = yyyydoy[0:4]
doy = yyyydoy[4:7]
return datetime.date(int(yyyy), 1, 1) + datetime.timedelta(int(doy) - 1)
except ValueError:
raise ValueError('Invalid string: must be yyyydoy or yyyy.doy') | b289419c14321afc37ea05501307e36203191fec | 3,652,824 |
from typing import Optional
def create_selection():
""" Create a selection expression """
operation = Forward()
nested = Group(Suppress("(") + operation + Suppress(")")).setResultsName("nested")
select_expr = Forward()
functions = select_functions(select_expr)
maybe_nested = functions | nested | Group(var_val)
operation <<= maybe_nested + OneOrMore(oneOf("+ - * /") + maybe_nested)
select_expr <<= operation | maybe_nested
alias = Group(Suppress(upkey("as")) + var).setResultsName("alias")
full_select = Group(
Group(select_expr).setResultsName("selection") + Optional(alias)
)
return Group(
Keyword("*") | upkey("count(*)") | delimitedList(full_select)
).setResultsName("attrs") | 38a3eaef51d0559e796ce7b6bef6127a771a395d | 3,652,825 |
def move_nodes(source_scene, dest_scene):
"""
Moves scene nodes from the source scene to the destination scene.
:type source_scene: fbx.FbxScene
:type dest_scene: fbx.FbxScene
"""
source_scene_root = source_scene.GetRootNode() # type: fbx.FbxNode
dest_scene_root = dest_scene.GetRootNode() # type: fbx.FbxNode
for node in get_children(source_scene_root):
dest_scene_root.AddChild(node)
# Although the original nodes are attached to the destination Scene root node, they are still connected to the old one and
# so the connections must to be removed. Since there could be lots of children, its better to disconnect the root node from the children.
source_scene_root.DisconnectAllSrcObject()
# Because the Scene Object also has connections to other types of FBX objects, they need to be moved too.
# (I'm guessing) Also since there could be only a single mesh in the FBX, the scene has connections to that too.
for index in range(source_scene.GetSrcObjectCount()):
fbx_obj = source_scene.GetSrcObject(index) # type: fbx.FbxObject
# Don't want to move the root node, the global settings or the Animation Evaluator (at this point)
# The equality check is split as the root node is an instance of fbx.FbxNode type but other objects such as fbx.FbxGlobalSettings
# are subclasses of the fbx.FbxNode type but NOT instances. A little weird but this works!
# The == equality check could be used as fallback for isinstance() if necessary
if isinstance(fbx_obj, type(source_scene_root)):
continue
elif issubclass(type(fbx_obj), (fbx.FbxGlobalSettings, fbx.FbxAnimEvaluator, fbx.FbxAnimStack, fbx.FbxAnimLayer)):
continue
else:
fbx_obj.ConnectDstObject(dest_scene)
# Now the scene can be disconnected as everything has been moved! (DO NOT FORGET THIS STEP)
return source_scene.DisconnectAllSrcObject() | 26a413736ab5fee46182f05247fe989d66358f19 | 3,652,826 |
def extract_values(*args):
"""
Wrapper around `extract_value`; iteratively applies that method to all items
in a list. If only one item was passed in, then we return that one item's
value; if multiple items were passed in, we return a list of the corresponding
item values.
"""
processed = [extract_value(arg) for arg in args]
if len(processed) == 1:
return processed[0]
return processed | 2906ca3aa42bfb47b231fd23b2a69a816399c255 | 3,652,827 |
def predefined_split(dataset):
"""Uses ``dataset`` for validiation in :class:`.NeuralNet`.
Examples
--------
>>> valid_ds = skorch.dataset.Dataset(X, y)
>>> net = NeuralNet(..., train_split=predefined_split(valid_ds))
Parameters
----------
dataset: torch Dataset
Validiation dataset
"""
return partial(_make_split, valid_ds=dataset) | 4f4f775e41b07efba3425bc2243d9766b41f5bc1 | 3,652,828 |
import os
import re
def writeBremDecay( # Might want a config later
lhe,
mAp,
eps,
zlims,
seed,
outdir,
outname,
nevents=10_000
):
""" Break A'->ee LHEs into brem and decay files and reformat/rescale """
# Create outdir if needed
if not os.path.exists(outdir): os.makedirs(outdir)
# Outfile names
bremfile = f'{outdir}/{outname}_brem.lhe'
decayfile = f'{outdir}/{outname}_decay.lhe'
decay_vs = f'{outdir}/{outname}_decay.dat'
print( f'Reformatting:\n{lhe}\nInto:\n{bremfile}\n{decayfile}')
print( f'And verticies to:\n{decay_vs}')
# Creation XYZ
Sym = Symbol('q')
x_rv = Uniform(Sym, -10 , 10 )
y_rv = Uniform(Sym, -40 , 40 )
#z_rv = Uniform(Sym, -0.175, 0.175)
Xs = sample( x_rv, numsamples=nevents, seed=np.random.seed( seed ) )
Ys = sample( y_rv, numsamples=nevents, seed=np.random.seed( seed ) )
#Zs = sample( z_rv, numsamples=nevents, seed=np.random.seed( seed ) )
# Detector limits (pheno paper uses [270, 3200] and 4000 is a hard upperlim
zmin = zlims[0]
zmax = zlims[1]
# Decay time
t = Symbol('t')
decay_width = phys_form.gamma_ap_tot(mAp, eps)
tau = phys_form.tau(mAp, eps)
decay_rv = Exponential(t, 1/tau)
decay_t = sample(
decay_rv,
numsamples=nevents,
seed=np.random.seed( seed )
)
# Will store information here
nevents_used = 0
# Open original and output files
with open(lhe, 'r') as ogfile, \
open(bremfile, 'w') as bremf, \
open(decayfile, 'w') as decayf, \
open(decay_vs, 'w') as decayvs:
# Write lims to .dat (plus extra 0s to maintain array shape)
decayvs.write( f'{zmin} {zmax} 0 0\n' )
##################################################
# Edit header (techincaly until </init>
# Many conditions shouldn't check in events sec.
##################################################
scaling_mass = False
for line in ogfile:
# ebeams
if re.search(r'ebeam',line):
line = phys_form.rescaleLine(line)
# Masses
if line[:10] == 'BLOCK MASS':
scaling_mass = True # Indicate following lines should be scaled
continue
if line[0] == '#':
scaling_mass = False
if scaling_mass:
line = phys_form.rescaleLine(line, tokens=[1])
# Decay Width
if re.match(r'DECAY +622', line):
line = phys_form.replaceNums(line, [1], [decay_width])
# Break from header/init
if line == '</init>\n':
bremf.write(line)
decayf.write(line)
break
bremf.write(line)
decayf.write(line)
##################################################
# Edit events
##################################################
event_num = 0
event_line = 0
current_line = 0
for line in ogfile: # Picks up where last loop leaves off
current_line += 1
# Scale relevant lines
if line == '<event>\n':
event_num += 1
event_line = 0
event_brem_lines = []
event_decay_lines = ['<event>\n']
if event_num % 1000 == 0:
print( 'Reformatting event: {}'.format(event_num) )
else: event_line += 1
if 1 < event_line < 9:
line = phys_form.rescaleLine(line, tokens=range(6,11))
# Event info line
if event_line ==1:
# Correct particle number
event_brem_lines.append( phys_form.replaceNums(line, [0], [5]) )
event_decay_lines.append(phys_form.replaceNums(line, [0], [2]) )
elif event_line < 7: # If first 5 write to bremfile
event_brem_lines.append(line)
if event_line == 6: # Take note of Ap info for projection
px,py,pz = [
float(v) for v in phys_form.numsInLine(line)[6:9]
]
Ap_3mom = np.array((px,py,pz))
elif event_line < 9: # decay electrons
# Null parents
event_decay_lines.append( phys_form.replaceNums(line, [2,3], [-1,-1]) )
# Skip mgrwt. add appropriate vertex, and end event
elif event_line == 16 :
# Prepare vertex samples
#x,y,z,t = next(Xs), next(Ys), next(Zs), next(decay_t)*(en/mAp)
x,y,z,t = next(Xs), next(Ys), 0, next(decay_t)
c_vertex = np.array( (x,y,z) )
d_vertex = c_vertex + Ap_3mom*phys_form.c_speed / mAp * t
# If not in allowed z, don't write event
if not (zmin < d_vertex[2] < zmax): continue
nevents_used += 1 # Else, count event as used
# If it is allowed, catch up the writing
for ln in event_brem_lines: bremf.write(ln)
for ln in event_decay_lines: decayf.write(ln)
# Then add the verticies
bremf.write( '#vertex {} {} {}\n'.format(x,y,z) )
decayf.write( '#vertex {} {} {} {}\n'.format(*d_vertex,t) )
decayvs.write( '{} {} {} {}\n'.format(*d_vertex,t) )
# And end event
bremf.write(line)
decayf.write(line)
# End both
elif line == '</LesHouchesEvents>\n':
bremf.write(line)
decayf.write(line)
print(f'Using {nevents_used} events')
return bremfile, decayfile, nevents_used | c6ab2695ce8d4984acc9f1e50898089ab4f7aaf1 | 3,652,829 |
from typing import Union
def bgr_to_rgba(image: Tensor, alpha_val: Union[float, Tensor]) -> Tensor:
"""Convert an image from BGR to RGBA.
Args:
image (Tensor[B, 3, H, W]):
BGR Image to be converted to RGBA.
alpha_val (float, Tensor[B, 1, H, W]):
A float number or tensor for the alpha value.
Returns:
rgba (Tensor[B, 4, H, W]):
RGBA version of the image.
Notes:
Current functionality is NOT supported by Torchscript.
"""
if not isinstance(alpha_val, (float, Tensor)):
raise TypeError(f"`alpha_val` must be a `float` or `Tensor`. "
f"But got: {type(alpha_val)}.")
# Convert first to RGB, then add alpha channel
rgb = bgr_to_rgb(image)
rgba = rgb_to_rgba(rgb, alpha_val)
return rgba | 654cb3df7432d799b2a391bf5cfa19a15a26b1fa | 3,652,830 |
def d_matrix_1d(n, r, v):
"""Initializes the differentiation matrices on the interval.
Args:
n: The order of the polynomial.
r: The nodal points.
v: The Vandemonde matrix.
Returns:
The gradient matrix D.
"""
vr = grad_vandermonde_1d(n, r)
return np.linalg.lstsq(v.T, vr.T, rcond=None)[0].T | a8d1df34726ea1ac6ef7b49209c45374cb2bed04 | 3,652,831 |
import functools
def compile_replace(pattern, repl, flags=0):
"""Construct a method that can be used as a replace method for sub, subn, etc."""
call = None
if pattern is not None and isinstance(pattern, RE_TYPE):
if isinstance(repl, (compat.string_type, compat.binary_type)):
repl = ReplaceTemplate(pattern, repl, bool(flags & FORMAT))
call = Replace(
functools.partial(_apply_replace_backrefs, repl=repl), repl.use_format, repl.pattern_hash
)
elif isinstance(repl, Replace):
if flags:
raise ValueError("Cannot process flags argument with a compiled pattern!")
if repl.pattern_hash != hash(pattern):
raise ValueError("Pattern hash doesn't match hash in compiled replace!")
call = repl
elif isinstance(repl, ReplaceTemplate):
if flags:
raise ValueError("Cannot process flags argument with a ReplaceTemplate!")
call = Replace(
functools.partial(_apply_replace_backrefs, repl=repl), repl.use_format, repl.pattern_hash
)
else:
raise TypeError("Not a valid type!")
else:
raise TypeError("Pattern must be a compiled regular expression!")
return call | eb753edeb9c212a28968eaf9c070aeeec8678d49 | 3,652,832 |
import six
def python_2_unicode_compatible(klass):
"""
From Django
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2: # pragma: no cover
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass | 18c290d649e0299c72f85209c4db6a7a4b716300 | 3,652,833 |
import re
import logging
def ParseNewPingMsg(msg):
"""Attempt to parse the message for a ping (in the new format). Return the request and response strings
(json-ified dict) if parsing succeeded. Return None otherwise.
"""
parsed = re.match(kNewPingMsgRe, msg)
if not parsed:
return None
try:
return (parsed.group(1), parsed.group(2))
except IndexError as e:
logging.warning('RE matched "%s", but extracted wrong numbers of items: %r' % (msg, e))
return None | 6bca164892ea13b598af75d468580a7d4bd04d4c | 3,652,834 |
from faker import Faker
def parse_main_dict():
"""Parses dict to get the lists of
countries, cities, and fakers. Fakers allow generation of region specific fake data.
Also generates total number of agents
"""
Faker.seed(seed) # required to generate reproducible data
countries = main_dict.keys()
cities = [v['city'] for v in main_dict.values()]
fakers = [Faker(v['faker_abbrev']) for v in main_dict.values()]
total_agents = sum([v['number_of_agents'] for v in main_dict.values()])
return fakers, countries, cities, total_agents | 7cf9870c86c40bb2d1565479d6789d9cd7114024 | 3,652,835 |
import json
def format_payload(svalue):
"""formats mqtt payload"""
data = {"idx": IDX, "nvalue": 0, "svalue": svalue}
return json.dumps(data) | 1cbee0d5169acde802be176cc47a25c2db1c2f62 | 3,652,836 |
def load_auth_client():
"""Create an AuthClient for the portal
No credentials are used if the server is not production
Returns
-------
globus_sdk.ConfidentialAppAuthClient
Client used to perform GlobusAuth actions
"""
_prod = True
if _prod:
app = globus_sdk.ConfidentialAppAuthClient(GLOBUS_CLIENT,
GLOBUS_KEY)
else:
app = globus_sdk.ConfidentialAppAuthClient('', '')
return app | 8e16303fa80e775d94e669d96db24a9f7a63e0b6 | 3,652,837 |
def DCGAN_discriminator(img_dim, nb_patch, bn_mode, model_name="DCGAN_discriminator", use_mbd=True):
"""
Discriminator model of the DCGAN
args : img_dim (tuple of int) num_chan, height, width
pretr_weights_file (str) file holding pre trained weights
returns : model (keras NN) the Neural Net model
"""
list_input = [Input(shape=img_dim, name="disc_input_%s" % i) for i in range(nb_patch)]
if K.image_dim_ordering() == "th":
bn_axis = 1
else:
bn_axis = -1
nb_filters = 64
nb_conv = int(np.floor(np.log(img_dim[1]) / np.log(2)))
list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]
# First conv
x_input = Input(shape=img_dim, name="discriminator_input")
# x = Convolution2D(list_filters[0], 3, 3, subsample=(2, 2), name="disc_conv2d_1", border_mode="same")(x_input)
# x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
# x = LeakyReLU(0.2)(x)
x = MaxPooling2D(
pool_size=(2, 2), strides=(2, 2))(x_input)
x = Convolution2D(
list_filters[0]/8, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='disc_conv2d_1')(x)
x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
e1 = Convolution2D(
list_filters[0]/2, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same')(x)
e2 = Convolution2D(
list_filters[0]/2, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same')(x)
x = merge(
[e1, e2], mode='concat', concat_axis=bn_axis)
# Next convs
for i, f in enumerate(list_filters[1:]):
name = "disc_conv2d_fire_%s" % (i + 2)
# x = Convolution2D(f, 3, 3, subsample=(2, 2), name=name, border_mode="same")(x)
# x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
# x = LeakyReLU(0.2)(x)
x = MaxPooling2D(
pool_size=(2, 2), strides=(2, 2))(x)
x = Convolution2D(
f/8, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name=name)(x)
x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x)
e1 = Convolution2D(
f/2, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same')(x)
e2 = Convolution2D(
f/2, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same')(x)
x = merge(
[e1, e2], mode='concat', concat_axis=bn_axis)
x_flat = Flatten()(x)
x = Dense(2, activation='softmax', name="disc_dense")(x_flat)
PatchGAN = Model(input=[x_input], output=[x, x_flat], name="PatchGAN")
print("PatchGAN summary")
PatchGAN.summary()
x = [PatchGAN(patch)[0] for patch in list_input]
x_mbd = [PatchGAN(patch)[1] for patch in list_input]
if len(x) > 1:
x = merge(x, mode="concat", name="merge_feat")
else:
x = x[0]
if use_mbd:
if len(x_mbd) > 1:
x_mbd = merge(x_mbd, mode="concat", name="merge_feat_mbd")
else:
x_mbd = x_mbd[0]
num_kernels = 100
dim_per_kernel = 5
M = Dense(num_kernels * dim_per_kernel, bias=False, activation=None)
MBD = Lambda(minb_disc, output_shape=lambda_output)
x_mbd = M(x_mbd)
x_mbd = Reshape((num_kernels, dim_per_kernel))(x_mbd)
x_mbd = MBD(x_mbd)
x = merge([x, x_mbd], mode='concat')
x_out = Dense(2, activation="softmax", name="disc_output")(x)
discriminator_model = Model(input=list_input, output=[x_out], name=model_name)
return discriminator_model | 7aeabfffcc15a10c2eb2c81c795cbc4ff70a890b | 3,652,838 |
def common_stat_style():
"""
The common style for info statistics.
Should be used in a dash component className.
Returns:
(str): The style to be used in className.
"""
return "has-margin-right-10 has-margin-left-10 has-text-centered has-text-weight-bold" | 899381fc56e28ecd042e19507f6bc51ceeca3ef0 | 3,652,839 |
def TourType_LB_rule(M, t):
"""
Lower bound on tour type
:param M: Model
:param t: tour type
:return: Constraint rule
"""
return sum(M.TourType[i, t] for (i, s) in M.okTourType if s == t) >= M.tt_lb[t] | 0495e2d01c7d5d02e8bc85374ec1d05a8fdcbd91 | 3,652,840 |
import json
def build_auto_dicts(jsonfile):
"""Build auto dictionaries from json"""
dicts = {}
with open(jsonfile, "r") as jsondata:
data = json.load(jsondata)
for dicti in data:
partialstr = data[dicti]["partial"]
partial = bool(partialstr == "True")
dictlist = data[dicti]["list"]
autodict = AuDict(partial)
tag = get_tag(dicti)
autodict.set_base_tag(tag)
for dictdata in dictlist:
value = dictdata["value"]
applicants = dictdata["applicants"]
autodict.add_auto_value(value, applicants)
dicts[tag.tag] = autodict
return dicts | 50978acc9696647746e2065144fda8537d0c6dba | 3,652,841 |
def log_gammainv_pdf(x, a, b):
"""
log density of the inverse gamma distribution with shape a and scale b,
at point x, using Stirling's approximation for a > 100
"""
return a * np.log(b) - sp.gammaln(a) - (a + 1) * np.log(x) - b / x | 27bc239770e94cb68a27291abd01050f9780c4fb | 3,652,842 |
from pathlib import Path
def read_basin() -> gpd.GeoDataFrame:
"""Read the basin shapefile."""
basin = gpd.read_file(Path(ROOT, "HCDN_nhru_final_671.shp"))
basin = basin.to_crs("epsg:4326")
basin["hru_id"] = basin.hru_id.astype(str).str.zfill(8)
return basin.set_index("hru_id").geometry | 9d590d478b71bdd2a857ab8f0864144ac598cc58 | 3,652,843 |
from typing import Callable
from typing import Tuple
def cross_validate(estimator: BaseEstimator, X: np.ndarray, y: np.ndarray,
scoring: Callable[[np.ndarray, np.ndarray, ...], float], cv: int = 5) -> Tuple[float, float]:
"""
Evaluate metric by cross-validation for given estimator
Parameters
----------
estimator: BaseEstimator
Initialized estimator to use for fitting the data
X: ndarray of shape (n_samples, n_features)
Input data to fit
y: ndarray of shape (n_samples, )
Responses of input data to fit to
scoring: Callable[[np.ndarray, np.ndarray, ...], float]
Callable to use for evaluating the performance of the cross-validated model.
When called, the scoring function receives the true- and predicted values for each sample
and potentially additional arguments. The function returns the score for given input.
cv: int
Specify the number of folds.
Returns
-------
train_score: float
Average train score over folds
validation_score: float
Average validation score over folds
"""
# raise NotImplementedError()
# k_foldes = KFold(n_splits=cv)
# k_foldes.get_n_splits(X)
#
# for train_index in k_foldes.split(X):
# X, y = X[train_index], y[train_index]
# m = y.size
# shuffled_inds = np.arange(m)
# np.random.shuffle(shuffled_inds)
# X_shuffled, y_shuffled = X.astype('float64'), y.astype('float64')
# kf_X = np.array_split(X_shuffled, 5, axis=0)
# kf_y = np.array_split(y_shuffled, 5, axis=0)
# kf_X = np.array_split(X, cv, axis=0)
# kf_y = np.array_split(y, cv, axis=0)
#
# # for param in range(k): # what is k?
# X_wo_fold = np.concatenate(kf_X[1:])
# y_wo_fold = np.concatenate(kf_y[1:])
# train_scores = []
# validation_score = []
# for fold in range(cv):
# cur_fold = kf_X[fold]
# cur_fold_y = kf_y[fold]
# if len(kf_y[fold+1:]) == 0:
# X_wo_fold = np.concatenate(kf_X[:-1])
# y_wo_fold = np.concatenate(kf_y[:-1])
# elif len(kf_X[:fold]) != 0:
# X_wo_fold1, X_wo_fold2 = np.concatenate(kf_X[:fold]), np.concatenate(kf_X[fold+1:])
# X_wo_fold = np.concatenate((X_wo_fold1, X_wo_fold2))
# y_wo_fold1, y_wo_fold2 = np.concatenate(kf_y[:fold]), np.concatenate(kf_y[fold+1:])
# y_wo_fold = np.concatenate((y_wo_fold1, y_wo_fold2))
# h_i = estimator.fit(X_wo_fold.flatten(), y_wo_fold)
# y_pred_test = h_i.predict(cur_fold.flatten())
# y_pred_train = h_i.predict(X_wo_fold.flatten())
# cur_train_score = scoring(y_wo_fold, y_pred_train)
# train_scores.append(cur_train_score)
# cur_validation_score = scoring(cur_fold_y, y_pred_test)
# validation_score.append(cur_validation_score)
#
# return np.mean(train_scores), np.mean(validation_score)
X = X.flatten()
y = y.flatten()
kf_X = np.array_split(X, cv, axis=0)
kf_y = np.array_split(y, cv, axis=0)
# for param in range(k): # what is k?
X_wo_fold = np.concatenate(kf_X[1:])
y_wo_fold = np.concatenate(kf_y[1:])
train_scores = []
validation_score = []
for fold in range(cv):
cur_fold = kf_X[fold]
cur_fold_y = kf_y[fold]
if len(kf_y[fold + 1:]) == 0:
X_wo_fold = np.concatenate(kf_X[:-1])
y_wo_fold = np.concatenate(kf_y[:-1])
elif len(kf_X[:fold]) != 0:
X_wo_fold1, X_wo_fold2 = np.concatenate(
kf_X[:fold]), np.concatenate(kf_X[fold + 1:])
X_wo_fold = np.concatenate((X_wo_fold1, X_wo_fold2))
y_wo_fold1, y_wo_fold2 = np.concatenate(
kf_y[:fold]), np.concatenate(kf_y[fold + 1:])
y_wo_fold = np.concatenate((y_wo_fold1, y_wo_fold2))
h_i = estimator.fit(X_wo_fold, y_wo_fold)
y_pred_test = h_i.predict(cur_fold)
y_pred_train = h_i.predict(X_wo_fold)
cur_train_score = scoring(y_wo_fold, y_pred_train)
train_scores.append(cur_train_score)
cur_validation_score = scoring(cur_fold_y, y_pred_test)
validation_score.append(cur_validation_score)
return np.mean(train_scores), np.mean(validation_score) | c127b1cf68d011e76fdbf813673bf1d84a7520bb | 3,652,844 |
def unpack_request(environ, content_length=0):
"""
Unpacks a get or post request query string.
:param environ: whiskey application environment.
:return: A dictionary with parameters.
"""
data = None
if environ["REQUEST_METHOD"] == "GET":
data = unpack_get(environ)
elif environ["REQUEST_METHOD"] == "POST":
data = unpack_post(environ, content_length)
logger.debug("read request data: %s", data)
return data | 02280666d6e4aee3ec1465cca17d7118a72b072b | 3,652,845 |
def GetMembership(name, release_track=None):
"""Gets a Membership resource from the GKE Hub API.
Args:
name: the full resource name of the membership to get, e.g.,
projects/foo/locations/global/memberships/name.
release_track: the release_track used in the gcloud command,
or None if it is not available.
Returns:
a Membership resource
Raises:
apitools.base.py.HttpError: if the request returns an HTTP error
"""
client = gkehub_api_util.GetApiClientForTrack(release_track)
return client.projects_locations_memberships.Get(
client.MESSAGES_MODULE.GkehubProjectsLocationsMembershipsGetRequest(
name=name)) | b2232faec0a2302ec554a8658cdf0a44f9374861 | 3,652,846 |
def receive_messages(queue, max_number, wait_time):
"""
Receive a batch of messages in a single request from an SQS queue.
Usage is shown in usage_demo at the end of this module.
:param queue: The queue from which to receive messages.
:param max_number: The maximum number of messages to receive. The actual number
of messages received might be less.
:param wait_time: The maximum time to wait (in seconds) before returning. When
this number is greater than zero, long polling is used. This
can result in reduced costs and fewer false empty responses.
:return: The list of Message objects received. These each contain the body
of the message and metadata and custom attributes.
"""
try:
messages = queue.receive_messages(
MessageAttributeNames=['All'],
MaxNumberOfMessages=max_number,
WaitTimeSeconds=wait_time
)
for msg in messages:
logger.info("Received message: %s: %s", msg.message_id, msg.body)
request = extract_request(msg.message_attributes)
recommendations = get_recommendations(request)
send_to_sns(request, recommendations)
except ClientError as error:
logger.exception("Couldn't receive messages from queue: %s", queue)
raise error
else:
return messages | dd422eb96ddb41513bcf248cf2dc3761a9b56191 | 3,652,847 |
def get_snmp_community(device, find_filter=None):
"""Retrieves snmp community settings for a given device
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class
community (str): optional arg to filter out this specific community
Returns:
dictionary
"""
command = 'show snmp community'
data = device.show(command)
data_dict = xmltodict.parse(data[1])
c_dict = {}
try:
comm_table = data_dict['ins_api']['outputs']['output']['body'].get(
'TABLE_snmp_community')['ROW_snmp_community']
for each in comm_table:
community = {}
key = str(each['community_name'])
community['group'] = str(each['grouporaccess'])
community['acl'] = str(each['aclfilter'])
c_dict[key] = community
except (TypeError):
community = {}
key = str(each['community_name'])
community['group'] = str(comm_table['grouporaccess'])
community['acl'] = str(comm_table['aclfilter'])
c_dict[key] = community
except (KeyError, AttributeError):
return c_dict
if find_filter:
find = c_dict.get(find_filter, None)
if find_filter is None or find is None:
return {}
else:
return find | ae36269133fcc482c30bd29f58e44d3d1e10dcd1 | 3,652,848 |
def get_header_size(tif):
"""
Gets the header size of a GeoTIFF file in bytes.
The code used in this function and its helper function `_get_block_offset` were extracted from the following
source:
https://github.com/OSGeo/gdal/blob/master/swig/python/gdal-utils/osgeo_utils/samples/validate_cloud_optimized_geotiff.py
Copyright (c) 2017, Even Rouault
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
Parameters
----------
tif: str
A path to a GeoTIFF file of the currently processed NRB product.
Returns
-------
header_size: int
The size of all IFD headers of the GeoTIFF file in bytes.
"""
def _get_block_offset(band):
blockxsize, blockysize = band.GetBlockSize()
for y in range(int((band.YSize + blockysize - 1) / blockysize)):
for x in range(int((band.XSize + blockxsize - 1) / blockxsize)):
block_offset = band.GetMetadataItem('BLOCK_OFFSET_%d_%d' % (x, y), 'TIFF')
if block_offset:
return int(block_offset)
return 0
details = {}
ds = gdal.Open(tif)
main_band = ds.GetRasterBand(1)
ovr_count = main_band.GetOverviewCount()
block_offset = _get_block_offset(band=main_band)
details['data_offsets'] = {}
details['data_offsets']['main'] = block_offset
for i in range(ovr_count):
ovr_band = ds.GetRasterBand(1).GetOverview(i)
block_offset = _get_block_offset(band=ovr_band)
details['data_offsets']['overview_%d' % i] = block_offset
headers_size = min(details['data_offsets'][k] for k in details['data_offsets'])
if headers_size == 0:
headers_size = gdal.VSIStatL(tif).size
return headers_size | f7d41b9f6140e2d555c8de7e857612c692ebea16 | 3,652,849 |
def format_x_ticks_as_dates(plot):
"""Formats x ticks YYYY-MM-DD and removes the default 'Date' label.
Args:
plot: matplotlib.AxesSubplot object.
"""
plot.xaxis.set_major_formatter(mpl.dates.DateFormatter('%Y-%m-%d'))
plot.get_xaxis().get_label().set_visible(False)
return plot | 00838b40582c9205e3ba6f87192852af37a88e7a | 3,652,850 |
def operations():
"""Gets the base class for the operations class.
We have to use the configured base back-end's operations class for
this.
"""
return base_backend_instance().ops.__class__ | 845d50884e58491539fb9ebfcf0da62e5cad66d4 | 3,652,851 |
import mimetypes
def office_convert_get_page(request, repo_id, commit_id, path, filename):
"""Valid static file path inclueds:
- index.html for spreadsheets and index_html_xxx.png for images embedded in spreadsheets
- 77e168722458356507a1f373714aa9b575491f09.pdf
"""
if not HAS_OFFICE_CONVERTER:
raise Http404
if not _OFFICE_PAGE_PATTERN.match(filename):
return HttpResponseForbidden()
path = '/' + path
file_id = _office_convert_get_file_id(request, repo_id, commit_id, path)
if filename.endswith('.pdf'):
filename = "{0}.pdf".format(file_id)
if CLUSTER_MODE:
resp = cluster_get_office_converted_page(path, filename, file_id)
else:
resp = get_office_converted_page(request, filename, file_id)
if filename.endswith('.page'):
content_type = 'text/html'
else:
content_type = mimetypes.guess_type(filename)[0] or 'text/html'
resp['Content-Type'] = content_type
return resp | 48a3c5716b833e639a10c0366829185a1ce623aa | 3,652,852 |
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts | 0a780bb022854c83341ed13c0a7ad0346bb43016 | 3,652,853 |
import torch
def _normalize_rows(t, softmax=False):
"""
Normalizes the rows of a tensor either using
a softmax or just plain division by row sums
Args:
t (:obj:`batch_like`)
Returns:
Normalized version of t where rows sum to 1
"""
if not softmax:
# EPSILON hack avoids occasional NaNs
row_sums = torch.sum(t, len(t.size())-1, keepdim=True) + EPSILON
#return torch.exp(torch.log(t)-torch.log(row_sums+EPSILON).expand_as(t))
return torch.div(t, row_sums.expand_as(t))
else:
s = nn.Softmax()
return s(t.view(-1, t.size(len(t.size())-1))).view(t.size()) | 3ffcedbaf279ead72414256290d2b88078aff468 | 3,652,854 |
def calculate_baselines(baselines: pd.DataFrame) -> dict:
"""
Read a file that contains multiple runs of the same pair. The format of the
file must be:
workload id, workload argument, run number, tFC, tVM
This function calculates the average over all runs of each unique pair of
workload id and workload argument.
"""
if type(baselines) is not pd.DataFrame:
raise TypeError("calculate_baselines: invalid object type passed.")
processed_baselines = {}
distinct_workloads = baselines[COLUMN_WORKLOAD].unique()
for workload in distinct_workloads:
# Filter for current workload
workload_baseline = baselines.loc[baselines[COLUMN_WORKLOAD] == workload]
# Get all the arguments
workload_arguments = workload_baseline[COLUMN_ARGUMENT].unique()
if workload not in processed_baselines:
processed_baselines[workload] = {}
for argument in workload_arguments:
workload_argument_baseline = workload_baseline.loc[
workload_baseline[COLUMN_ARGUMENT] == argument]
# Calculate the means of the timings for the workload-argument pair
tVM = round(workload_argument_baseline[COLUMN_TIMEVM].mean())
tFC = round(workload_argument_baseline[COLUMN_TIMEFC].mean())
processed_baselines[workload][argument] = [tFC, tVM]
return processed_baselines | 69cd0473fc21366e57d20ee39fceb704001aba1b | 3,652,855 |
def pick_ind(x, minmax):
""" Return indices between minmax[0] and minmax[1].
Args:
x : Input vector
minmax : Minimum and maximum values
Returns:
indices
"""
return (x >= minmax[0]) & (x <= minmax[1]) | 915a1003589b880d4edf5771a23518d2d4224094 | 3,652,856 |
def read_files(file_prefix,start=0,end=100,nfmt=3,pixel_map=None):
"""
read files that have a numerical suffix
"""
images = []
format = '%' + str(nfmt) + '.' + str(nfmt) + 'd'
for j in range(start,end+1):
ext = format % j
file = file_prefix + '_' + ext + '.tif'
arr = read(file,pixel_map=pixel_map)
images.append(arr)
return images | 95d283f04b8ef6652da290396bb4649deedff665 | 3,652,857 |
def describing_function(
F, A, num_points=100, zero_check=True, try_method=True):
"""Numerical compute the describing function of a nonlinear function
The describing function of a nonlinearity is given by magnitude and phase
of the first harmonic of the function when evaluated along a sinusoidal
input :math:`A \\sin \\omega t`. This function returns the magnitude and
phase of the describing function at amplitude :math:`A`.
Parameters
----------
F : callable
The function F() should accept a scalar number as an argument and
return a scalar number. For compatibility with (static) nonlinear
input/output systems, the output can also return a 1D array with a
single element.
If the function is an object with a method `describing_function`
then this method will be used to computing the describing function
instead of a nonlinear computation. Some common nonlinearities
use the :class:`~control.DescribingFunctionNonlinearity` class,
which provides this functionality.
A : array_like
The amplitude(s) at which the describing function should be calculated.
zero_check : bool, optional
If `True` (default) then `A` is zero, the function will be evaluated
and checked to make sure it is zero. If not, a `TypeError` exception
is raised. If zero_check is `False`, no check is made on the value of
the function at zero.
try_method : bool, optional
If `True` (default), check the `F` argument to see if it is an object
with a `describing_function` method and use this to compute the
describing function. More information in the `describing_function`
method for the :class:`~control.DescribingFunctionNonlinearity` class.
Returns
-------
df : array of complex
The (complex) value of the describing function at the given amplitudes.
Raises
------
TypeError
If A[i] < 0 or if A[i] = 0 and the function F(0) is non-zero.
"""
# If there is an analytical solution, trying using that first
if try_method and hasattr(F, 'describing_function'):
try:
return np.vectorize(F.describing_function, otypes=[complex])(A)
except NotImplementedError:
# Drop through and do the numerical computation
pass
#
# The describing function of a nonlinear function F() can be computed by
# evaluating the nonlinearity over a sinusoid. The Fourier series for a
# static nonlinear function evaluated on a sinusoid can be written as
#
# F(A\sin\omega t) = \sum_{k=1}^\infty M_k(A) \sin(k\omega t + \phi_k(A))
#
# The describing function is given by the complex number
#
# N(A) = M_1(A) e^{j \phi_1(A)} / A
#
# To compute this, we compute F(A \sin\theta) for \theta between 0 and 2
# \pi, use the identities
#
# \sin(\theta + \phi) = \sin\theta \cos\phi + \cos\theta \sin\phi
# \int_0^{2\pi} \sin^2 \theta d\theta = \pi
# \int_0^{2\pi} \cos^2 \theta d\theta = \pi
#
# and then integrate the product against \sin\theta and \cos\theta to obtain
#
# \int_0^{2\pi} F(A\sin\theta) \sin\theta d\theta = M_1 \pi \cos\phi
# \int_0^{2\pi} F(A\sin\theta) \cos\theta d\theta = M_1 \pi \sin\phi
#
# From these we can compute M1 and \phi.
#
# Evaluate over a full range of angles (leave off endpoint a la DFT)
theta, dtheta = np.linspace(
0, 2*np.pi, num_points, endpoint=False, retstep=True)
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
# See if this is a static nonlinearity (assume not, just in case)
if not hasattr(F, '_isstatic') or not F._isstatic():
# Initialize any internal state by going through an initial cycle
for x in np.atleast_1d(A).min() * sin_theta:
F(x) # ignore the result
# Go through all of the amplitudes we were given
retdf = np.empty(np.shape(A), dtype=complex)
df = retdf # Access to the return array
df.shape = (-1, ) # as a 1D array
for i, a in enumerate(np.atleast_1d(A)):
# Make sure we got a valid argument
if a == 0:
# Check to make sure the function has zero output with zero input
if zero_check and np.squeeze(F(0.)) != 0:
raise ValueError("function must evaluate to zero at zero")
df[i] = 1.
continue
elif a < 0:
raise ValueError("cannot evaluate describing function for A < 0")
# Save the scaling factor to make the formulas simpler
scale = dtheta / np.pi / a
# Evaluate the function along a sinusoid
F_eval = np.array([F(x) for x in a*sin_theta]).squeeze()
# Compute the prjections onto sine and cosine
df_real = (F_eval @ sin_theta) * scale # = M_1 \cos\phi / a
df_imag = (F_eval @ cos_theta) * scale # = M_1 \sin\phi / a
df[i] = df_real + 1j * df_imag
# Return the values in the same shape as they were requested
return retdf | 4e9b779ba30f2588262e2ecff7a993d210533b59 | 3,652,858 |
from typing import List
def _read_point(asset: str, *args, **kwargs) -> List:
"""Read pixel value at a point from an asset"""
with COGReader(asset) as cog:
return cog.point(*args, **kwargs) | 246c98d55fd27465bc2c6f737cac342ccf9d52d8 | 3,652,859 |
def get_unquoted_text(token):
"""
:param token: Token
:return: String
"""
if isinstance(token, UnquotedText):
return token.value()
else:
raise exceptions.BugOrBroken(
"tried to get unquoted text from " + token) | 0fabfb504f725a84a75cada6e5d04a9aeda9a406 | 3,652,860 |
import torch
def image2tensor(image: np.ndarray, range_norm: bool, half: bool) -> torch.Tensor:
"""Convert ``PIL.Image`` to Tensor.
Args:
image (np.ndarray): The image data read by ``PIL.Image``
range_norm (bool): Scale [0, 1] data to between [-1, 1]
half (bool): Whether to convert torch.float32 similarly to torch.half type.
Returns:
Normalized image data
Examples:
>>> image = cv2.imread("image.bmp", cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.
>>> tensor_image = image2tensor(image, range_norm=False, half=False)
"""
tensor = F.to_tensor(image)
if range_norm:
tensor = tensor.mul_(2.0).sub_(1.0)
if half:
tensor = tensor.half()
return tensor | 86ab04d599ac9b1bfe2e90d0b719ea47dc8f7671 | 3,652,861 |
def panda_four_load_branch():
"""
This function creates a simple six bus system with four radial low voltage nodes connected to \
a medium valtage slack bus. At every low voltage node the same load is connected.
RETURN:
**net** - Returns the required four load system
EXAMPLE:
import pandapower.networks as pn
net_four_load = pn.panda_four_load_branch()
"""
pd_net = pp.create_empty_network()
busnr1 = pp.create_bus(pd_net, name="bus1", vn_kv=10.)
busnr2 = pp.create_bus(pd_net, name="bus2", vn_kv=.4)
busnr3 = pp.create_bus(pd_net, name="bus3", vn_kv=.4)
busnr4 = pp.create_bus(pd_net, name="bus4", vn_kv=.4)
busnr5 = pp.create_bus(pd_net, name="bus5", vn_kv=.4)
busnr6 = pp.create_bus(pd_net, name="bus6", vn_kv=.4)
pp.create_ext_grid(pd_net, busnr1)
pp.create_transformer(pd_net, busnr1, busnr2, std_type="0.25 MVA 10/0.4 kV")
pp.create_line(pd_net, busnr2, busnr3, name="line1", length_km=0.05,
std_type="NAYY 4x120 SE")
pp.create_line(pd_net, busnr3, busnr4, name="line2", length_km=0.05,
std_type="NAYY 4x120 SE")
pp.create_line(pd_net, busnr4, busnr5, name="line3", length_km=0.05,
std_type="NAYY 4x120 SE")
pp.create_line(pd_net, busnr5, busnr6, name="line4", length_km=0.05,
std_type="NAYY 4x120 SE")
pp.create_load(pd_net, busnr3, 30, 10)
pp.create_load(pd_net, busnr4, 30, 10)
pp.create_load(pd_net, busnr5, 30, 10)
pp.create_load(pd_net, busnr6, 30, 10)
return pd_net | dd5bc45a75943f0c078ab3bde9aa94b4bafc804f | 3,652,862 |
def word_flipper(our_string):
"""
Flip the individual words in a sentence
Args:
our_string(string): Strings to have individual words flip
Returns:
string: String with words flipped
"""
word_list = our_string.split(" ")
for idx in range(len(word_list)):
word_list[idx] = word_list[idx][::-1] # [index1:index2:step]
return " ".join(word_list) | fd484079407342925fc13583fb1fbee9ee472b14 | 3,652,863 |
import json
import base64
def load_json(ctx, param, value):
"""Decode and load json for click option."""
value = value[1:]
return json.loads(base64.standard_b64decode(value).decode()) | 99236d6fcde6c69a4bdadad4c6f3487d88fb7ce0 | 3,652,864 |
def hyperparam_search(model_config, train, test):
"""Perform hyperparameter search using Bayesian optimization on a given model and
dataset.
Args:
model_config (dict): the model and the parameter ranges to search in. Format:
{
"name": str,
"model": sklearn.base.BaseEstimator,
"params": dict
}
train (pandas.DataFrame): training data
test (pandas.DataFrame): test data
"""
X_train = train.drop("label", axis=1)
y_train = train.label
X_test = test.drop("label", axis=1)
y_test = test.label
opt = BayesSearchCV(
model_config["model"],
model_config["params"],
n_jobs=4,
cv=5,
random_state=RANDOM_SEED,
)
opt.fit(X_train, y_train)
acc = opt.score(X_test, y_test)
print(f"{model_config['name']} results:")
print(f"Best validation accuracy: {opt.best_score_}")
print(f"Test set accuracy: {acc}")
print(f"Best parameters:")
for param, value in opt.best_params_.items():
print(f"- {param}: {value}")
return {
"name": model_config["name"],
"class": model_config["class"],
"model": opt.best_estimator_,
"params": opt.best_params_,
"score": acc,
} | 8f496a2c4494545ffdba2a5f63512ff45da4bb03 | 3,652,865 |
def profile_tags(profile):
"""
Get the tags from a given security profile.
"""
# TODO: This is going to be a no-op now, so consider removing it.
return profile.id.split('_') | 3d3cda3d67e9574f31a7fea4aee714cca39af5db | 3,652,866 |
def _sawtooth_wave_samples(freq, rate, amp, num):
"""
Generates a set of audio samples taken at the given sampling rate
representing a sawtooth wave oscillating at the given frequency with
the given amplitude lasting for the given duration.
:param float freq The frequency of oscillation of the sawtooth wave
:param int rate The sampling rate
:param float amp The amplitude of the sawtooth wave
:param float num The number of samples to generate.
:return List[float] The audio samples representing the signal as
described above.
"""
return [utils._sawtooth_sample(amp, freq, rate, i) for i in range(num)] | 4691fb94e1709c5dc1a1dcb8ed02795d0b3cfe40 | 3,652,867 |
from keras.models import Model
from keras.layers import Conv2D, SpatialDropout2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.applications.resnet50 import ResNet50
def ResNet_UNet_Dropout(dim=512, num_classes=6, dropout=0.5, final_activation=True):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes skip connections
from previous ResNet50 layers.
Uses a SpatialDrop on the final layer as introduced
in https://arxiv.org/pdf/1411.4280.pdf, 2015.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1 - 512
fs = 32
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up1)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1)
merge1_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1_conv1)
# Upsampling 2 - 256
fs = 32
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up2)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2)
merge2_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2_conv1)
# Upsampling 3 & 4 - 128
fs = 32
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv1 = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3)
up3_conv2 = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3_conv1)
up4 = UpSampling2D(size = (2,2))(up3_conv2)
up4_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up4)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer,up4_conv], axis = 3)
merge3_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3)
merge3_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3_conv1)
# Upsample 5 - 64
fs = 32
up5 = UpSampling2D(size=(2,2))(merge3_conv2)
up5_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5)
merge5_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5_conv)
merge5_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5_conv1)
# Drop Out
do = SpatialDropout2D(dropout)(merge5_conv2)
# Activation and reshape for training
if final_activation:
activation = Conv2D(num_classes, 1, activation="softmax")(do)
else:
activation = Conv2D(num_classes, 1, activation=None)(do)
output = Reshape((dim*dim, num_classes))(activation)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model | 6d99cbb9f5986a87e79653b03cc91ca652ca2d2d | 3,652,868 |
import sqlite3
def _parse_accounts_ce(database, uid, result_path):
"""Parse accounts_ce.db.
Args:
database (SQLite3): target SQLite3 database.
uid (str): user id.
result_path (str): result path.
"""
cursor = database.cursor()
try:
cursor.execute(query)
except sqlite3.Error as exception:
logger.error('Accounts not found! {0!s}'.format(exception))
results = cursor.fetchall()
num_of_results = len(results)
data = {}
header = ('name', 'type', 'password')
data['title'] = 'accounts_ce'+f'_{uid}'
data['number_of_data_headers'] = len(header)
data['number_of_data'] = num_of_results
data['data_header'] = header
data_list = []
if num_of_results >0:
for row in results:
data_list.append((row[0], row[1], row[2]))
data['data'] = data_list
else:
logger.warning('NO Accounts found!')
return data | 05538c21342f854d8465a415c32f5e2ea4f3f14d | 3,652,869 |
from flask import current_app
def resolve_grant_endpoint(doi_grant_code):
"""Resolve the OpenAIRE grant."""
# jsonresolver will evaluate current_app on import if outside of function.
pid_value = '10.13039/{0}'.format(doi_grant_code)
try:
_, record = Resolver(pid_type='grant', object_type='rec',
getter=Record.get_record).resolve(pid_value)
return record
except Exception:
current_app.logger.error(
'Grant {0} does not exists.'.format(pid_value), exc_info=True)
raise | e3217aeda5e6dec935c3ccb96e1164be66083e4f | 3,652,870 |
from typing import Union
from pathlib import Path
def from_tiff(path: Union[Path, str]) -> OME:
"""Generate OME metadata object from OME-TIFF path.
This will use the first ImageDescription tag found in the TIFF header.
Parameters
----------
path : Union[Path, str]
Path to OME TIFF.
Returns
-------
ome: ome_types.model.ome.OME
ome_types.OME metadata object
Raises
------
ValueError
If the TIFF file has no OME metadata.
"""
with Path(path).open(mode="rb") as fh:
try:
offsetsize, offsetformat, tagnosize, tagnoformat, tagsize, codeformat = {
b"II*\0": (4, "<I", 2, "<H", 12, "<H"),
b"MM\0*": (4, ">I", 2, ">H", 12, ">H"),
b"II+\0": (8, "<Q", 8, "<Q", 20, "<H"),
b"MM\0+": (8, ">Q", 8, ">Q", 20, ">H"),
}[fh.read(4)]
except KeyError:
raise ValueError(f"{path!r} does not have a recognized TIFF header")
fh.read(4 if offsetsize == 8 else 0)
fh.seek(unpack(offsetformat, fh.read(offsetsize))[0])
for _ in range(unpack(tagnoformat, fh.read(tagnosize))[0]):
tagstruct = fh.read(tagsize)
if unpack(codeformat, tagstruct[:2])[0] == 270:
size = unpack(offsetformat, tagstruct[4 : 4 + offsetsize])[0]
if size <= offsetsize:
desc = tagstruct[4 + offsetsize : 4 + offsetsize + size]
break
fh.seek(unpack(offsetformat, tagstruct[-offsetsize:])[0])
desc = fh.read(size)
break
else:
raise ValueError(f"No OME metadata found in file: {path}")
if desc[-1] == 0:
desc = desc[:-1]
return from_xml(desc.decode("utf-8")) | 98ed750bba4b6aeaa791cc9041cf394e43fc50f9 | 3,652,871 |
def increase_structure_depth(previous_architecture, added_block, problem_type):
"""Returns new structure given the old one and the added block.
Increases the depth of the neural network by adding `added_block`.
For the case of cnns, if the block is convolutional, it will add it before
the flattening operation. Otherwise, if it is a dense block, then it will
be added at the end.
For the dnn and rnn case, the added_block is always added at the end.
Args:
previous_architecture: the input architecture. An np.array holding
`blocks.BlockType` (i.e., holding integers).
added_block: a `blocks.BlockType` to add to previous_architecture.
problem_type: a `PhoenixSpec.ProblemType` enum.
Returns:
np.array of `blocks.BlockType` (integers).
"""
if added_block == blocks.BlockType.EMPTY_BLOCK:
return previous_architecture.copy()
output = previous_architecture.copy()
# No problems for DNN of RNN
if problem_type != phoenix_spec_pb2.PhoenixSpec.CNN:
return np.append(output, added_block)
# TODO(b/172564129): Change this class (blocks) to a singleton
builder = blocks.Blocks()
# CNN case - convolution before fully connected.
if not builder[added_block].is_input_order_important:
return np.append(output, added_block)
# First block index in which order is not important
index_for_new_block = next(
index for index, block in enumerate(previous_architecture)
if not builder[block].is_input_order_important)
return np.insert(output, index_for_new_block, added_block) | 3735ca2c66a1a5856fb7fac69b6e02daf25868d2 | 3,652,872 |
def create_table_string(data, highlight=(True, False, False, False), table_class='wikitable', style=''):
"""
Takes a list and returns a wikitable.
@param data: The list that is converted to a wikitable.
@type data: List (Nested)
@param highlight: Tuple of rows and columns that should be highlighted.
(first row, last row, left column, right column)
@type highlight: Tuple
@param table_class: A string containing the class description.
See wikitable help.
@type table_class: String
@param style: A string containing the style description.
See wikitable help.
@type style: String
"""
last_row = len(data) - 1
last_cell = len(data[0]) - 1
table = '{{| class="{}" style="{}"\n'.format(table_class, style)
for key, row in enumerate(data):
if key == 0 and highlight[0] or key == last_row and highlight[1]:
row_string = '|-\n! ' + '\n! '.join(cell for cell in row)
else:
row_string = '|-'
cells = ''
for ckey, cell in enumerate(row):
if ckey == 0 and highlight[2]:
cells += '\n! ' + cell
elif ckey == last_cell and highlight[3]:
cells += '\n! ' + cell
else:
cells += '\n| ' + cell
row_string += cells
table += row_string + '\n'
table += '|}'
return table | f586fac681e1b4f06ad5e2a1cc451d9250fae929 | 3,652,873 |
from pathlib import Path
import os
def path_to_dnd(path: Path) -> str:
"""Converts a `Path` into an acceptable value for `tkinterdnd2.`"""
# tkinterdnd2 will only accept fs paths with forward slashes, even on Windows.
wants_sep = '/'
if os.path.sep == wants_sep:
return str(path)
else:
return wants_sep.join(str(path).split(os.path.sep)) | 61c4f88b944551f16f1baf127ddc3ccc5018267a | 3,652,874 |
def registry_dispatcher_document(self, code, collection):
"""
This task receive a list of codes that should be queued for DOI registry
"""
return _registry_dispatcher_document(code, collection, skip_deposited=False) | 530b2d183e6e50dc475ac9ec258fc13bea76aa8d | 3,652,875 |
from typing import Collection
import requests
def get_reddit_oauth_scopes(
scopes: Collection[str] | None = None,
) -> dict[str, dict[str, str]]:
"""Get metadata on the OAUTH scopes offered by the Reddit API."""
# Set up the request for scopes
scopes_endpoint = "/api/v1/scopes"
scopes_endpoint_url = REDDIT_BASE_URL + scopes_endpoint
headers = {"User-Agent": USER_AGENT}
query_params = {}
if scopes:
query_params["scopes"] = scopes
# Make and process the request
response = requests.get(
scopes_endpoint_url,
params=query_params,
headers=headers,
timeout=REQUEST_TIMEOUT_S,
)
response.raise_for_status()
response_json: dict[str, dict[str, str]] = response.json()
return response_json | 0a55facfd07af259c1229aa30417b516b268602b | 3,652,876 |
def beta_reader(direc):
"""
Function to read in beta values for each tag
"""
path = direc
H_beta = np.loadtxt('%s/Beta Values/h_beta_final2.txt' % path)
Si_beta = np.loadtxt('%s/Beta Values/si_beta_final2.txt' % path)
He_emi_beta = np.loadtxt('%s/Beta Values/he_emi_beta_final2.txt' % path)
He_cyg_beta = np.loadtxt('%s/Beta Values/he_cyg_beta_final2.txt' % path)
He_abs_beta = np.loadtxt('%s/Beta Values/he_abs_beta_final2.txt' % path)
H_alp_beta = np.loadtxt('%s/Beta Values/h_alp_beta_final2.txt' % path)
Ca_beta = np.loadtxt('%s/Beta Values/ca_beta_final2.txt' % path)
iib_dp_beta = np.loadtxt('%s/Beta Values/iibdp_beta_final2.txt' % path)
Fe_beta = np.loadtxt('%s/Beta Values/fe_beta_final2.txt' % path)
S_beta = np.loadtxt('%s/Beta Values/s_beta_final2.txt' % path)
return H_beta,Si_beta,He_emi_beta,He_cyg_beta,He_abs_beta,H_alp_beta,Ca_beta,iib_dp_beta,Fe_beta,S_beta | ab8aef0acd6a9cd86301d5cc99e45511cf193a10 | 3,652,877 |
import os
def boto3_s3_upload(s3, dst, file):
"""Upload Item to s3.
:param s3: -- Sqlalchemy session object.
:param dst: -- str. Location to storage ???
:param file: -- ???. File object.
Return Type: Bool
"""
s3.Object(settings.config_type['AWS_BUCKET'], file).put(Body=open(os.path.join(dst, file), 'rb'))
return file | 487d25cad72225ddde8ee91f8b12e1696c3163a0 | 3,652,878 |
def get_logging_format():
"""return the format string for the logger"""
formt = "[%(asctime)s] %(levelname)s:%(message)s"
return formt | 3380cdd34f1a44cf15b9c55d2c05d3ecb81116cb | 3,652,879 |
def plot_hydrogen_balance(results):
""" Plot the hydrogen balance over time """
n_axes = results["times"].shape[0]
fig = plt.figure(figsize=(6.0, 5.5))
fig.suptitle('Hydrogen production and utilization over the year', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(n_axes)
for index, ax in enumerate(axes):
x1, y1 = results["times"][index, :] / 24, +results["H2_produced"][index, :]
x2, y2 = results["times"][index, :] / 24, -results["H2_utilized"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot([0.0], [0.0], linestyle="", marker="", label="Period " + str(index + 1))
ax.plot(x1, y1, linewidth=0.75, linestyle='-', color='k', label="Produced")
ax.plot(x2, y2, linewidth=0.75, linestyle='-', color='r', label="Utilized")
ax.set_ylabel('Mass flow (kg/s)', fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == n_axes:
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0)
dy = max(np.max(y1)-np.min(y2), 0.02)
ax.set_ylim([np.min(y2)-dy/5, np.max(y1)+dy/5])
fig.tight_layout()
return fig, axes | e352b1885b53ec9f5fc41f32f67afc5f86cae647 | 3,652,880 |
def ref_dw(fc, fmod):
"""Give the reference value for roughness by linear interpolation from the data
given in "Psychoacoustical roughness:implementation of an optimized model"
by Daniel and Weber in 1997
Parameters
----------
fc: integer
carrier frequency
fmod: integer
modulation frequency
Output
------
roughness reference values from the article by Daniel and Weber
"""
if fc == 125:
fm = np.array(
[
1.0355988,
10.355987,
11.132686,
13.851132,
18.511328,
20.064724,
24.724918,
31.32686,
41.423946,
49.967636,
57.34628,
64.33657,
72.10356,
90.74434,
79.4822,
86.084145,
91.909386,
100.45307,
]
)
R = np.array(
[
0.0,
0.04359673,
0.09468665,
0.16416894,
0.19482289,
0.27656674,
0.3113079,
0.34196186,
0.32356948,
0.26226157,
0.20299728,
0.15803815,
0.11512262,
0.0619891,
0.09264305,
0.07016349,
0.05177112,
0.03950954,
]
)
if fc == 250:
fm = np.array(
[
0.7373272,
3.9324117,
9.585254,
14.2549925,
16.71275,
19.907835,
22.611366,
23.594471,
29.493088,
30.47619,
37.112137,
41.29032,
47.926266,
50.13825,
51.121353,
53.08756,
54.07066,
56.774193,
58.248848,
62.427036,
61.68971,
69.308754,
68.57143,
71.27496,
73.73272,
73.97849,
75.207375,
79.139786,
79.139786,
84.792625,
90.19969,
97.81874,
104.70046,
112.31951,
120.92166,
129.76959,
]
)
R = np.array(
[
0.00432277,
0.00576369,
0.06340057,
0.16138329,
0.17435159,
0.26945245,
0.32132566,
0.3443804,
0.42651296,
0.44668588,
0.47694525,
0.4668588,
0.42651296,
0.46253604,
0.41210374,
0.4020173,
0.43948126,
0.37463978,
0.39193085,
0.3631124,
0.3429395,
0.3040346,
0.28242075,
0.27521613,
0.259366,
0.24207492,
0.24351585,
0.2204611,
0.20461094,
0.17146975,
0.14697406,
0.11815562,
0.09942363,
0.07636888,
0.05619597,
0.04322766,
]
)
if fc == 500:
fm = np.array(
[
7.6375403,
15.79288,
20.841423,
26.666666,
30.93851,
34.43366,
40.2589,
44.919094,
49.190937,
51.521034,
57.34628,
64.33657,
69.77346,
74.04531,
81.42395,
87.63754,
94.23948,
102.78317,
116.763756,
129.57928,
140.84143,
149.77347,
160.2589,
]
)
R = np.array(
[
0.04972752,
0.1253406,
0.23569483,
0.35013625,
0.46457765,
0.5258856,
0.619891,
0.67302454,
0.69346046,
0.69550407,
0.6873297,
0.67098093,
0.6321526,
0.57901907,
0.5074932,
0.4400545,
0.38487738,
0.3153951,
0.22752044,
0.16621253,
0.11920981,
0.08651226,
0.06811989,
]
)
if fc == 1000:
fm = np.array(
[
0.0,
3.884415,
9.7237625,
17.147604,
29.302307,
37.933605,
48.504757,
55.145306,
55.948395,
57.480103,
60.618927,
63.314735,
65.28852,
67.201035,
69.55657,
76.14433,
77.2943,
82.847725,
83.352325,
88.26008,
89.019806,
93.92756,
94.4309,
97.78904,
99.06719,
104.23258,
103.963005,
106.03293,
109.89504,
111.18953,
115.05101,
117.38172,
119.95311,
125.630646,
132.60141,
137.24963,
144.47617,
151.19432,
159.97737,
]
)
R = np.array(
[
0.0,
0.00211198,
0.03450088,
0.1382977,
0.40437,
0.60555416,
0.80238307,
0.89103884,
0.9516347,
0.90182984,
0.9753813,
0.92339617,
0.9969634,
0.92983717,
0.9882475,
0.9556905,
0.92104256,
0.89138556,
0.86107534,
0.83503467,
0.7960629,
0.7700222,
0.736826,
0.71946436,
0.6819286,
0.6529984,
0.6284707,
0.62555665,
0.5764418,
0.5764243,
0.52586645,
0.52727795,
0.48683867,
0.44491437,
0.40008652,
0.3726063,
0.3205599,
0.29016566,
0.24531329,
]
)
if fc == 2000:
fm = np.array(
[
0.0,
4.4051557,
7.5956764,
10.048887,
12.017292,
15.69636,
17.911657,
20.366364,
20.619616,
25.28251,
27.987852,
30.20053,
31.18548,
34.37525,
34.38161,
39.782192,
39.298134,
42.23989,
42.981316,
45.18539,
44.95683,
46.663754,
48.13538,
50.358532,
53.04068,
55.264206,
56.971127,
58.68778,
60.890354,
62.367218,
62.84529,
65.06246,
67.00842,
68.48715,
71.90736,
73.62214,
76.79096,
79.24305,
81.67831,
85.10337,
91.45038,
93.655945,
96.586105,
96.33435,
98.04801,
106.5901,
107.57281,
115.62524,
118.07209,
120.26419,
121.97673,
129.54285,
131.255,
134.91576,
135.15628,
136.87106,
144.92911,
159.83092,
]
)
R = np.array(
[
0.00271003,
0.00538277,
0.04194128,
0.06631085,
0.10694477,
0.1407891,
0.18955104,
0.21934068,
0.250504,
0.30331025,
0.35477808,
0.39405492,
0.41708192,
0.4509304,
0.47396567,
0.54031587,
0.55929023,
0.5809457,
0.60803974,
0.6161512,
0.674419,
0.65407926,
0.66761696,
0.74483424,
0.71229106,
0.7908634,
0.7705236,
0.7854143,
0.78810567,
0.8206137,
0.779959,
0.83549607,
0.79482895,
0.83411205,
0.8164678,
0.8245834,
0.78255093,
0.8028555,
0.76218426,
0.76215523,
0.7119658,
0.7254973,
0.7051472,
0.67940396,
0.6834545,
0.6088561,
0.62375295,
0.5478037,
0.549138,
0.5138889,
0.5138744,
0.4487694,
0.44739988,
0.41484842,
0.39994115,
0.40805677,
0.3524327,
0.27371538,
]
)
if fc == 4000:
fm = np.array(
[
3.1950846,
16.221199,
23.840246,
29.984638,
30.230415,
37.112137,
37.603687,
45.714287,
51.85868,
57.265743,
63.90169,
68.57143,
74.47005,
78.156685,
82.33487,
88.97082,
98.064514,
108.14132,
115.02304,
123.870964,
128.78648,
133.21045,
143.04147,
151.39784,
155.08449,
157.29646,
160.24577,
]
)
R = np.array(
[
0.00432277,
0.11383285,
0.23054755,
0.29538906,
0.31123918,
0.39337176,
0.41066283,
0.50864553,
0.5907781,
0.62680113,
0.6426513,
0.65273774,
0.64841497,
0.6440922,
0.6152738,
0.5720461,
0.5158501,
0.45677233,
0.41210374,
0.3631124,
0.34149855,
0.3184438,
0.2795389,
0.24495678,
0.24783862,
0.23919308,
0.24063401,
]
)
if fc == 8000:
fm = np.array(
[
4.6498036,
7.1022663,
8.569778,
16.16957,
23.037289,
24.018497,
25.735521,
27.451048,
30.885843,
33.578465,
34.319515,
38.48526,
40.206398,
42.654747,
45.355972,
50.995964,
52.953144,
55.896774,
56.631092,
60.54957,
61.772808,
63.238823,
66.18058,
68.86871,
70.58611,
72.78196,
74.744,
78.409225,
80.61181,
82.31723,
86.23272,
87.20532,
90.384995,
91.11295,
96.73499,
100.39909,
106.50631,
117.26071,
127.28154,
137.0596,
145.37276,
154.66376,
159.55597,
]
)
R = np.array(
[
0.0053807,
0.02704024,
0.0256728,
0.08251926,
0.14614701,
0.15562384,
0.17186953,
0.18269515,
0.21789658,
0.22329386,
0.24903294,
0.27338803,
0.30453888,
0.31129324,
0.3478559,
0.3952338,
0.39521724,
0.42364773,
0.42499653,
0.43986857,
0.4398582,
0.4330707,
0.4547261,
0.44386315,
0.46146387,
0.43976498,
0.4573636,
0.44107231,
0.4437637,
0.4180039,
0.42203578,
0.40034726,
0.39761028,
0.3759238,
0.35826093,
0.3379046,
0.30533242,
0.2686558,
0.23334044,
0.20480223,
0.18711658,
0.1667126,
0.16396113,
]
)
return np.interp(fmod, fm, R) | adf7a67c7b9d4448074f6ccd5fbf8e62c52b113d | 3,652,881 |
from typing import Optional
def points_2d_inside_image(
width: int,
height: int,
camera_model: str,
points_2d: np.ndarray,
points_3d: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Returns the indices for an array of 2D image points that are inside the image canvas.
Args:
width: Pixel width of the image canvas.
height: Pixel height of the image canvas.
camera_model: One of `opencv_pinhole`, `opencv_fisheye`, `pd_fisheye`.
More details in :obj:`~.model.sensor.CameraModel`.
points_2d: A matrix with dimensions (nx2) containing the points that should be tested
if inside the image canvas. Points must be in image coordinate system (x,y).
points_3d: Optional array of size (nx3) which provides the 3D camera coordinates for each point. Required for
camera models `opencv_pinhole` and `opencv_fisheye`.
Returns:
An array with dimensions (n,).
"""
if camera_model in (CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_OPENCV_FISHEYE) and points_3d is None:
raise ValueError(f"`points_3d` must be provided for camera model {camera_model}")
if len(points_2d) != len(points_3d):
raise ValueError(
f"Mismatch in length between `points_2d` and `points_3d` with {len(points_2d)} vs. {len(points_3d)}"
)
return np.where(
(points_2d[:, 0] >= 0)
& (points_2d[:, 0] < width)
& (points_2d[:, 1] >= 0)
& (points_2d[:, 1] < height)
& (points_3d[:, 2] > 0 if camera_model in (CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_OPENCV_FISHEYE) else True)
) | 95d235e475555c184e95b1e30c3cac686fe3e65f | 3,652,882 |
import torch
def list2tensors(some_list):
"""
:math:``
Description:
Implemented:
[True/False]
Args:
(:):
(:):
Default:
Shape:
- Input: list
- Output: list of tensors
Examples::
"""
t_list=[]
for i in some_list:
t_list.append(torch.tensor(i))
return t_list | 35efe7c13c8c4f75266eceb912e8afccd25408cf | 3,652,883 |
def interpret_input(inputs):
""" convert input entries to usable dictionaries """
for key, value in inputs.items(): # interpret each line's worth of entries
if key in ['v0', 'y0', 'angle']: # for variables, intepret distributions
converted = interpret_distribution(key, value) # use a separate method to keep things clean
elif key == 'metric': # metrics are easy, they're just a list
converted = list(x.strip().lower() for x in value.split(','))
for c in converted: # check the metrics are valid entries
if c not in ['mean', 'std', 'percentile']:
raise IOError('Unrecognized metric:', c)
else:
raise IOError('Unrecognized keyword entry: {} = {}'.format(key, value))
inputs[key] = converted # replace the key with the converted values
return inputs | 5a68f8e551ae3e31e107ab5a6a9aacc2db358263 | 3,652,884 |
def time(prompt=None, output_hour_clock=24, milli_seconds=False, fill_0s=True, allow_na=False):
"""
Repeatedly ask the user to input hours, minutes and seconds until they input valid values and return this in a defined format
:param prompt: Message to display to the user before asking them for inputs. Default: None
:param output_hour_clock: Whether to output in 24 hour clock or in 12 hour clock with AM/PM. Default: 24
:param milli_seconds: Whether or not to allow more accuracy in seconds. Default: False
:param fill_0s: Whether or not to fill numerical times with leading 0s. Default: False
:param allow_na: Whether or not to allow empty inputs too. Default: False
"""
extras = None if allow_na else [""]
output_hour_clock = assert_valid(output_hour_clock, SpecNumList([12, 24], None, True), "param output_hour_clock")
if prompt is not None:
print(prompt, "\n")
input_hour_clock = validate_input(SpecNumList([12, 24], None, True), "Input hour clock (12/24): ")
if input_hour_clock == 12:
hours = validate_input(SpecNumRange(1, 12, None, True, extras), "Hours (12 hour clock): ")
period = validate_input(SpecStr(["am", "pm"], extra_values=extras), "AM or PM? ")
if hours == 12:
hours = 0
if period == "pm":
hours += 12
else:
hours = validate_input(SpecNumRange(0, 23, None, True, extras), "Hours (24 hour clock): ")
minutes = validate_input(SpecNumRange(0, 59, None, True, extras), "Minutes: ")
if milli_seconds:
seconds = validate_input(SpecNumRange(0, 59.999999, 6, False, extras), "Seconds including decimal: ")
else:
seconds = validate_input(SpecNumRange(0, 59, 0, True, extras), "Seconds: ")
if hours is not None and output_hour_clock == 12:
if hours < 12:
period = "AM"
else:
period = "PM"
hours %= 12
if hours == 0:
hours = 12
if fill_0s:
if hours is not None and hours < 10:
hours = "0" + str(hours)
if minutes is not None and minutes < 10:
minutes = "0" + str(minutes)
if seconds is not None and seconds < 10:
seconds = "0" + str(seconds)
to_return = "{}:{}:{}".format(hours, minutes, seconds)
if output_hour_clock == 12:
to_return += " {}".format(period)
return to_return | 82c0d8fae1f82e3f19b6af220ada5fadcea63bb3 | 3,652,885 |
def byol_a_url(ckpt, refresh=False, *args, **kwargs):
"""
The model from URL
ckpt (str): URL
"""
return byol_a_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs) | c9a8ce31ae5b6b59832d8ae9bb4e05d697f96cc9 | 3,652,886 |
def bellman_ford(g, start):
"""
Given an directed graph with possibly negative edge weights and with n vertices and m edges as well
as its vertex s, compute the length of shortest paths from s to all other vertices of the graph.
Returns dictionary with vertex as key.
- If vertex not present in the dictionary, then it is not reachable from s
- If distance to vertex is None, then this vertex is reachable from a negative cycle
- Otherwise, value of a dictionary is the length of a path from s to a vertex
"""
dist = {}
prev = {}
dist[start] = 0
def __construct_path(t):
path = []
path.append(t)
u = prev[t]
while u in prev and u != t:
path.append(u)
u = prev[u]
path.reverse()
return path
c = Graph()
for _ in g.get_vertices():
relaxed = False
for e in g.get_edges():
u = e.start
v = e.end
w = e.weight
if u not in dist:
continue
if v not in dist or dist[u] + w < dist[v]:
dist[v] = dist[u] + w
prev[v] = u
relaxed = True
c.add_edge(u, v, w)
if not relaxed:
return dist
ncv = set()
for e in g.get_edges():
u = e.start
v = e.end
w = e.weight
if u not in dist:
continue
if v in dist and dist[u] + w < dist[v]:
for x in __construct_path(u):
ncv.add(x)
dist[v] = dist[u] + w
prev[v] = u
for v in ncv:
if v not in dist:
continue
if dist[v] is None:
continue
visited = set()
q = deque()
q.append(v)
while q:
x = q.popleft()
dist[x] = None
visited.add(x)
for e in c.get_edges(x):
if e.end in visited:
continue
q.append(e.end)
return dist | dd09de61d26a6ee988e549c5a0f8aafdf54b78ab | 3,652,887 |
import locale
from datetime import datetime
def _read_date(settings_file):
"""Get the data from the settings.xml file
Parameters
----------
settings_file : Path
path to settings.xml inside open-ephys folder
Returns
-------
datetime
start time of the recordings
Notes
-----
The start time is present in the header of each file. This might be useful
if 'settings.xml' is not present.
"""
locale.setlocale(locale.LC_TIME, 'en_US.utf-8')
root = ElementTree.parse(settings_file).getroot()
for e0 in root:
if e0.tag == 'INFO':
for e1 in e0:
if e1.tag == 'DATE':
break
return datetime.strptime(e1.text, '%d %b %Y %H:%M:%S') | 2f762bd7e190323acc44e5408c5f0977069d8828 | 3,652,888 |
def conv_res_step(x, hparams, padding, mask):
"""One step of convolutions and mid-residual."""
k = (hparams.kernel_height, hparams.kernel_width)
k2 = (hparams.large_kernel_size, 1)
dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)]
dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)]
with tf.variable_scope("conv_res_step"):
y = common_layers.subseparable_conv_block(
x, hparams.filter_size, dilations_and_kernels1,
padding=padding, mask=mask, separabilities=0, name="residual1")
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
return common_layers.subseparable_conv_block(
y, hparams.hidden_size, dilations_and_kernels2,
padding=padding, mask=mask, separabilities=0, name="residual2") | e0d2728f4991112a0dbd504121048f8670a4406b | 3,652,889 |
import six
from typing import Any
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind | 094298763f9bf1e3e7a421c19e08016f2138b7d7 | 3,652,890 |
def Froude_number(v, h, g=9.80665):
"""
Calculate the Froude Number of the river, channel or duct flow,
to check subcritical flow assumption (if Fr <1).
Parameters
------------
v : int/float
Average velocity [m/s].
h : int/float
Mean hydrolic depth float [m].
g : int/float
Gravitational acceleration [m/s2].
Returns
---------
Fr : float
Froude Number of the river [unitless].
"""
assert isinstance(v, (int,float)), 'v must be of type int or float'
assert isinstance(h, (int,float)), 'h must be of type int or float'
assert isinstance(g, (int,float)), 'g must be of type int or float'
Fr = v / np.sqrt( g * h )
return Fr | 754225397baa6a27ae58adc63f09bba5287f18e9 | 3,652,891 |
from typing import Callable
from typing import Any
def handle_error(
func: Callable[[Command | list[Command]], Any]
) -> Callable[[str], Any]:
"""Handle tradfri api call error."""
@wraps(func)
async def wrapper(command: Command | list[Command]) -> None:
"""Decorate api call."""
try:
await func(command)
except RequestError as err:
_LOGGER.error("Unable to execute command %s: %s", command, err)
return wrapper | 1604f8ae224a9fb565f81ae70d74c24e68e60b9e | 3,652,892 |
def write(ser, command, log):
"""Write command to serial port, append what you write to log."""
ser.write(command)
summary = " I wrote: " + repr(command)
log += summary + "\n"
print summary
return log | 769e345d90121d4bf2d8cc23c128c2a588cba37c | 3,652,893 |
def anscombe(x):
"""Compute Anscombe transform."""
return 2 * np.sqrt(x + 3 / 8) | 9a47318733568892c4695db2cf153e59e78bb8d7 | 3,652,894 |
def max_accuracy(c1, c2):
"""
Relabel the predicted labels *in order* to
achieve the best accuracy, and return that
score and the best labelling
Parameters
----------
c1 : np.array
numpy array with label of predicted cluster
c2 : np.array
numpy array with label of true cluster
"""
c1 = c1.astype(str)
c2 = c2.astype(str)
match_satimage = pd.DataFrame({"Guess": c1, "True": c2})
match_satimage['match'] = match_satimage['Guess'] + '_t' + match_satimage['True']
comparison = pd.DataFrame(match_satimage['match'])
A = comparison.value_counts()
sum = 0
clusters = []
c1new = np.copy(c1).astype(int)
j = 0
for i in range(len(A)):
C_str = A[[i]].index.values[0][0]
#print(C_str)
CTL = C_str.split('_')
if CTL[0] in clusters or CTL[1] in clusters or CTL[0] == '-1':
pass
else:
c1new[c1 == CTL[0]] = CTL[1][1:]
clusters.append(CTL[0])
clusters.append(CTL[1])
sum = sum + int(A[[i]])
#print(clusters)
#print(sum)
j = j + 1
accuracy = sum/len(c1)
return accuracy, c1new.astype(int) | 7ec438b500463859c27ea94d315312b88f5954f1 | 3,652,895 |
def create_sphere():
"""Create and return a single sphere of radius 5."""
sphere = rt.sphere()
sphere.radius = 5
return sphere | a8d5e2e8c0ec7d00f75c4007214d21aa0d2b64ad | 3,652,896 |
def calc_entropy(data):
"""
Calculate the entropy of a dataset.
Input:
- data: any dataset where the last column holds the labels.
Returns the entropy of the dataset.
"""
entropy = 0.0
###########################################################################
# TODO: Implement the function. #
###########################################################################
labels = np.unique(data[:, -1])
for label in labels:
entropy -= (np.count_nonzero(data[:, -1] == label) / data.shape[0]) * np.log2(np.count_nonzero(data[:, -1] == label) / data.shape[0])
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return entropy | 418054f9a36b100daf788814e8549bc818e2e27a | 3,652,897 |
import time
def get_input(prompt=None):
"""Sets the prompt and waits for input.
:type prompt: None | list[Text] | str
"""
if not isinstance(prompt, type(None)):
if type(prompt) == str:
text_list = [Text(prompt, color=prompt_color,
new_line=True)]
elif type(prompt) == list:
text_list = prompt
else:
raise Exception("Must be None, str, or list[Text]")
update_textbox("events", text_list)
_user_input = check_input()
while isinstance(_user_input, type(None)):
time.sleep(.1)
if not is_running():
return None
_user_input = check_input()
return _user_input | bbcd5bbd7f97bff8d213d13afe22ae9111849e10 | 3,652,898 |
def alpha_liq(Nu, lyambda_feed, d_inner):
"""
Calculates the coefficent of heat transfer(alpha) from liquid to wall of pipe.
Parameters
----------
Nu : float
The Nusselt criterion, [dimensionless]
lyambda_feed : float
The thermal conductivity of feed, [W / (m * degreec celcium)]
d_inner : float
The diametr of inner pipe, [m]
Returns
-------
alpha_liq : float
The coefficent of heat transfer(alpha), [W / (m**2 * degrees celcium)]
References
----------
Романков, формула 4.11, стр.150
"""
return Nu * lyambda_feed / d_inner | 13d0371248c106fb0f12d26335381675d7484000 | 3,652,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.