content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def SecureBytesEqual( a, b ):
"""Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks."""
# Consistent timing matters more here than data type flexibility
# We do NOT want to support py2's str type because iterating over them
# (below) produces different results.
if type( a ) != bytes or type( b ) != bytes:
raise TypeError( "inputs must be bytes instances" )
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len( a ) != len( b ):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip( a, b ):
result |= x ^ y
return result == 0 | 1ba46089d94f544b53a47e09dbbcf95dd5b594a0 | 3,651,200 |
import base64
import pickle
def encode(something):
"""
We encode all messages as base64-encoded pickle objects in case
later on, we want to persist them or send them to another system.
This is extraneous for now.
"""
return base64.b64encode(pickle.dumps(something)) | 89c9c855b8b66aadc55c1602e133906d3220691a | 3,651,201 |
def scrape_proposal_page(browser, proposal_url):
"""
Navigates to the page giving details about a piece of legislation, scrapes
that data, and adds a model to the database session. Returns the new DB
model.
"""
browser.get(proposal_url)
file_number = int(extract_text(browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblFile2'
)))
proposal_title = extract_text(browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblTitle2'
))
proposal_type = extract_text(browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblIntroduced2'
))
proposal_status = extract_text(browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblStatus2'
))
introduction_date = parse_date(extract_text(
browser.find_element_by_css_selector(
'#ctl00_ContentPlaceHolder1_lblIntroduced2'
)
))
db_proposal = db.Proposal(file_number, proposal_title)
db_proposal.status = proposal_status
db_proposal.proposal_type = proposal_type
db_proposal.introduction_date = introduction_date
db.session.add(db_proposal)
db.session.flush()
# TODO probably should refactor this out a t least
return db_proposal | a15b01721e9fea658d07a0a878df2f2ac58fa2f7 | 3,651,202 |
def installRecommendation(install, uninstall, working_set=working_set, tuples=False):
"""Human Readable advice on which modules have to be installed on
current Working Set.
"""
installList = []
for i in install:
is_in = False
for p in working_set:
if i[0] == p.key and i[1] == p.version:
is_in = True
break
if not is_in:
if not tuples:
print('~~ Install: '+i[0]+' version '+i[1])
else:
installList.append((i[0], i[1]))
for u in uninstall:
is_in = False
for p in working_set:
if u[0] == p.key and u[1] == p.version:
is_in = True
break
if is_in:
if not tuples:
print('~~ Uninstall: '+u[0]+' version '+u[1])
return installList | bf3083d4bcb50bdc27c382ccd9ea1dfc7b8cdb71 | 3,651,203 |
def obsangle(thetas, phis, alpha_obs):
"""
Return the cosine of the observer angle for the different shockwave segments and and
and observer at and angle alpha_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(alpha_obs), cos(alpha_obs)
u_obs_y, u_obs_z = sin(alpha_obs), cos(alpha_obs)
#seg_x =
seg_y = sin(thetas)*sin(phis)
seg_z = cos(thetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z | 6fc03a386a97f63d3ad10d291d1528bf7fb45720 | 3,651,204 |
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor | bd3c339da4b8f0eea482687cecf28a4625d3f84c | 3,651,205 |
def _load_top_bonds(f, topology, **kwargs):
"""Take a mol2 file section with the heading '@<TRIPOS>BOND' and save to the topology.bonds attribute."""
while True:
line = f.readline()
if _is_end_of_rti(line):
line = line.split()
bond = Bond(
connection_members=(
topology.sites[int(line[1]) - 1],
topology.sites[int(line[2]) - 1],
)
)
topology.add_connection(bond)
else:
break
return line | e6605428a99720ff0d773a1db2a8363d61e38ca3 | 3,651,206 |
def timelength_to_phrase(
timelength: spec.Timelength,
from_representation: spec.TimelengthRepresentation = None,
) -> spec.TimelengthPhrase:
"""convert Timelength to TimelengthPhrase
## Inputs
- timelength: Timelength
- from_representation: str representation name of input timelength
## Returns
- TimelengthPhrase timelength
"""
return convert_timelength(
timelength=timelength,
to_representation='TimelengthPhrase',
from_representation=from_representation,
) | a840b69625c968cda4a1e686a61298e2809ffde0 | 3,651,207 |
def order_columns(self: DataFrame, order: str = "asc", by_dtypes: bool = False):
"""
Rearrange the columns in alphabetical order.
An option of rearrangement by dtypes is possible.
:param self:
:param by_dtypes: boolean to rearrange by dtypes first
"""
if order not in ['asc', 'desc']:
raise Exception("'{}' is not an acceptable ordering value, you can only use {'asc','desc'}".format(order))
if by_dtypes:
dtypes_dict = dict()
for col, dtype in self.dtypes:
dtypes_dict.setdefault(dtype, list())
dtypes_dict[dtype].append(col)
dtypes_dict = dict(sorted(dtypes_dict.items()))
columns = [col for values in dtypes_dict.values()
for col in sorted(values)]
return self.select(columns)
else:
return self.select(sorted(self.columns, reverse=False if order == "asc" else True)) | c05f4b13b26b041c86816c15a375943713a6dcdb | 3,651,208 |
import sys
from sys import version
import os
def main():
""" do main work """
cmdArgs = sys.argv[1:]
if not cmdArgs:
msg = "There is no version in args. Current version: "
msg += version.current()
print(msg)
if GIT_EXE:
print("Result of 'git describe': ")
print(_runGitCmd('describe'))
msg = "\nUsage: " + sys.argv[0] + " x.y.z where x,y,z are numbers"
print(msg)
return 0
newVer = cmdArgs[0]
if not version.checkFormat(newVer):
print('Version %r has invalid format' % newVer)
return 1
if not GIT_EXE:
print("There is no 'git'. Install 'git' to use this script.")
return 2
if not _checkChangeLog(newVer):
print('There is no records for the version %r in changelog file' % newVer)
return 3
question = 'Bump version to %s?'
question += ' It will write the version to file,'
question += '\nadd it to git repo, commit it and add git tag with the version.'
answer = _getAnswerYesNo(question % newVer)
if not answer:
return 0
print("Bumping version to %r .." % newVer)
_bumpVersion(newVer)
print("Building distribution ..")
_runPyScript('setup.py clean sdist bdist_wheel')
answer = _getAnswerYesNo('Distribution was built successfully. Publish it to pypi?')
if answer:
print("Publishing distribution ..")
_runPyScript('setup.py publish')
print("Distribution was published.")
answer = _getAnswerYesNo('Publish release to github?')
if answer:
scriptPath = os.path.join('scripts', 'publish-github-release.py')
args = '%s %s' % (scriptPath, newVer)
print("Publishing release to github ..")
_runPyScript(args, tryPy3 = True)
print("Release was published on github.")
print("Writing new dev version to file %r .." % version.VERSION_FILE_PATH)
nextVer = _writeNewDevVersion(newVer)
print("New dev version %r was written to file." % nextVer)
return 0 | b6110be510bb9a8bdddbc076ada7c51eb2ad8427 | 3,651,209 |
import random
def reorderWithinGroup(players_by_wins):
"""Shuffle players with the same score.
Args:
players_by_wins: a dictionary returned by splitByScore().
Returns a list of the re-ordered player ids.
"""
for score in players_by_wins.keys():
random.shuffle(players_by_wins[score])
# players_by_wins is a dictionary with scores as keys. When
# converting to a list, need to make sure it is sorted by score,
# from highest to lowest.
players_ordered = []
score_keys = players_by_wins.keys()
score_keys.sort(reverse=True)
for score in score_keys:
players_ordered.append(players_by_wins[score])
# Convert back to a list.
players_ordered = list(chain.from_iterable(players_ordered))
# Return the ordered ids.
ordered_ids = [x[0] for x in players_ordered]
return(ordered_ids) | bd0afe4db36bf815ab7861e53cd674bd49e81775 | 3,651,210 |
def selection(population, method):
"""Apply selection method of a given population.
Args:
population: (list of) plans to apply the selection on.
method: (str) selection method:
- rws (Roulette Wheel Selection)
- sus (Stochastic Universal Selection)
- ts (Tournament Selection)
Returns:
(list of) plans representing the new pool
"""
if method == "rws":
return roulette_wheel_selection(population)
elif method == "sus":
return stochastic_universal_sampling(population)
elif method == "ts":
return tournament_selection(population) | e5b05c62530babfd48b5061152b9f88e4a463456 | 3,651,211 |
def PSL_prefix(row, cols):
"""Returns the prefix a domain (www.images for www.images.example.com)"""
psl_data = psl.search_tree(row[cols[0]])
if psl_data:
return(psl_data[1], psl_data[0])
return (None, None) | e5e7809acae3be60eca9f0cd65aec7a93ac087de | 3,651,212 |
def build_model(sess,t,Y,model='sde',sf0=1.0,ell0=[2,2],sfg0=1.0,ellg0=[1e5],
W=6,ktype="id",whiten=True,
fix_ell=False,fix_sf=False,fix_Z=False,fix_U=False,fix_sn=False,
fix_ellg=False,fix_sfg=False,fix_Zg=True,fix_Ug=False):
"""
Args:
sess: TensowFlow session needed for initialization and optimization
t: Python array of numpy vectors storing observation times
Y: Python array of numpy matrices storing observations. Observations
are stored in rows.
model: 'sde' or 'ode'
sf0: Integer initial value of the signal variance of drift GP
ell0: Python/numpy array of floats for the initial value of the
lengthscale of drift GP
sfg0: Integer initial value of the signal variance of diffusion GP
ellg0: Python/numpy array of a single float for the initial value of the
lengthscale of diffusion GP
W: Integer denoting the width of the inducing point grid. If the problem
dimension is D, total number of inducing points is W**D
ktype: Kernel type. We have made experiments only with Kronecker kernel,
denoted by 'id'. The other kernels are not supported.
whiten: Boolean. Currently we perform the optimization only in the
white domain
fix_ell: Boolean - whether drift GP lengthscale is fixed or optimized
fix_sf: Boolean - whether drift GP signal variance is fixed or optimized
fix_Z: Boolean - whether drift GP inducing locations are fixed or optimized
fix_U: Boolean - whether drift GP inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
fix_ellg: Boolean - whether diffusion GP lengthscale is fixed or optimized
fix_sfg: Boolean - whether diffusion GP signal variance is fixed or optimized
fix_Zg: Boolean - whether diffusion GP inducing locations are fixed or optimized
fix_Ug: Boolean - whether diffusion GP inducing vectors are fixed or optimized
Returns:
npde: A new NPDE model
"""
print('Model being initialized...')
def init_U0(Y=None,t=None,kern=None,Z0=None,whiten=None):
Ug = (Y[1:,:] - Y[:-1,:]) / np.reshape(t[1:]-t[:-1],(-1,1))
with tf.name_scope("init_U0"):
tmp = NPODE(Z0=Y[:-1,:],U0=Ug,sn0=0,kern=kern,jitter=0.25,whiten=False,
fix_Z=True,fix_U=True,fix_sn=True)
U0 = tmp.f(X=Z0)
if whiten:
Lz = tf.cholesky(kern.K(Z0))
U0 = tf.matrix_triangular_solve(Lz, U0, lower=True)
U0 = sess.run(U0)
return U0
D = len(ell0)
Nt = len(Y)
x0 = np.zeros((Nt,D))
Ys = np.zeros((0,D))
for i in range(Nt):
x0[i,:] = Y[i][0,:]
Ys = np.vstack((Ys,Y[i]))
maxs = np.max(Ys,0)
mins = np.min(Ys,0)
grids = []
for i in range(D):
grids.append(np.linspace(mins[i],maxs[i],W))
vecs = np.meshgrid(*grids)
Z0 = np.zeros((0,W**D))
for i in range(D):
Z0 = np.vstack((Z0,vecs[i].T.flatten()))
Z0 = Z0.T
tmp_kern = OperatorKernel(sf0,ell0,ktype="id",fix_ell=True,fix_sf=True)
U0 = np.zeros(Z0.shape,dtype=np.float64)
for i in range(len(Y)):
U0 += init_U0(Y[i],t[i],tmp_kern,Z0,whiten)
U0 /= len(Y)
sn0 = 0.5*np.ones(D)
Ug0 = np.ones([Z0.shape[0],1])*0.01
ell0 = np.asarray(ell0,dtype=np.float64)
ellg0 = np.asarray(ellg0,dtype=np.float64)
kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=ktype, fix_ell=fix_ell, fix_sf=fix_sf)
if model is 'ode':
npde = NPODE(Z0=Z0, U0=U0, sn0=sn0, kern=kern, whiten=whiten, fix_Z=fix_Z, fix_U=fix_U, fix_sn=fix_sn)
sess.run(tf.global_variables_initializer())
return npde
elif model is 'sde':
diffus = BrownianMotion(sf0=sfg0, ell0=ellg0, U0=Ug0, Z0=Z0, whiten=whiten,\
fix_sf=fix_sfg, fix_ell=fix_ellg, fix_Z=fix_Zg, fix_U=fix_Ug)
npde = NPSDE(Z0=Z0, U0=U0, sn0=sn0, kern=kern, diffus=diffus, whiten=whiten,\
fix_Z=fix_Z, fix_U=fix_U, fix_sn=fix_sn)
sess.run(tf.global_variables_initializer())
return npde
else:
raise NotImplementedError("model parameter should be either 'ode' or 'sde', not {:s}\n".format(model)) | f5d691aca815df25d1c34ae9c7fa9810c3aae1ab | 3,651,213 |
import re
def paginatedUrls(pattern, view, kwargs=None, name=None):
"""
Takes a group of url tuples and adds paginated urls.
Extends a url tuple to include paginated urls.
Currently doesn't handle url() compiled patterns.
"""
results = [(pattern, view, kwargs, name)]
tail = ''
mtail = re.search('(/+\+?\\*?\??\$?)$', pattern)
if mtail:
tail = mtail.group(1)
pattern = pattern[:len(pattern) - len(tail)]
results += [(pattern + "/(?P<page_number>\d+)" + tail, view, kwargs)]
results += [(pattern + "/(?P<page_number>\d+)\|(?P<page_limit>\d+)" +
tail, view, kwargs)]
if not kwargs:
kwargs = dict()
kwargs['page_limit'] = 0
results += [(pattern + "/?\|(?P<page_limit>all)" + tail, view, kwargs)]
return results | 2102309434e02e0df49888978d41ffce2de0e2dc | 3,651,214 |
from typing import Tuple
def _to_intraday_trix(date: pd.Timestamp, provider: providers.DataProvider,
period: int)-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns an ndarray containing the TRIX for a given +data+ and +provider+,
averaged across a given +period+.
"""
# First, get the triple-smoothed 15 period exponential moving average
data = _get_intraday_data(date, provider)
ewm1 = pd.Series.ewm(data['close'], span=period).mean()
ewm2 = pd.Series.ewm(ewm1, span=period).mean()
ewm3 = pd.Series.ewm(ewm2, span=period).mean()
# Return the percentage change from last period
ewm3_yesterday = ewm3.shift(periods=1, fill_value=ewm3[0])
trix = (ewm3 / ewm3_yesterday) - 1
return nd.array(trix.values, utils.try_gpu(0)) | 14c621afa6128fb3b33058b357e2ca79723a42f9 | 3,651,215 |
def _decode(integer):
"""
Decode the given 32-bit integer into a MAX_LENGTH character string according
to the scheme in the specification. Returns a string.
"""
if integer.bit_length() > 32:
raise ValueError("Can only decode 32-bit integers.")
decoded_int = 0
# Since each byte has its bits distributed along the given integer at
# BIT_SHIFT intervals, we'll get the bits from one byte at a time.
for input_start in range(4):
# Move to the beginning of the correct output byte.
output_pos = input_start * 8
# Read the bits from the input at BIT_SHIFT intervals, lowest-order
# bits first.
for input_bit in range(input_start, integer.bit_length(), BIT_SHIFT):
current_bit = getBit(integer, input_bit)
# If the current bit is 1, set the corresponding bit in the result.
# Otherwise, we can leave the result bit as 0.
if current_bit:
decoded_int = setBit(decoded_int, output_pos)
# Move to the next position in the output byte.
output_pos += 1
# Get a byte array from the decoded integer. We're reversing the byte order
# because we read the input integer from lowest-order bit to highest-order.
decoded_bytes = decoded_int.to_bytes(4, byteorder="little")
# Get the characters represented by each byte, ignoring empty bytes.
chars = []
for byte in decoded_bytes:
if byte:
chars.append(chr(byte))
return "".join(chars) | 156b75e8907bbcf6ae69f0a3429fb29777651f8e | 3,651,216 |
def register(name):
"""Registers a new data loader function under the given name."""
def add_to_dict(func):
_LOADERS[name] = func
return func
return add_to_dict | ea672cdf3c8d34f090d98e2498b77ea929aee6e6 | 3,651,217 |
def get_api_host(staging):
"""If 'staging' is truthy, return staging API host instead of prod."""
return STAGING_API_HOST if staging else PROD_API_HOST | d2b0003669422ef4481ffe4db76497de1485d0f7 | 3,651,218 |
import requests
import os
def get(path, params={}):
"""Make an authenticated GET request to the GitHub API."""
return requests.get(
os.path.join("https://api.github.com/", path),
auth=(USER, PASS),
params=params
).json() | b35d416f762a7d42a97169133d686b33dac74a59 | 3,651,219 |
def delete_user(auth, client):
"""
Delete a user
:auth: dict
:client: users_client object
"""
log("What user you want to delete?")
user_to_delete = find_user_by_username(auth, client)
if user_to_delete is False:
log("Could not find user.", serv="ERROR")
return False
confirmation = yes_or_no("You really want to delete %s?" % user_to_delete["username"])
if confirmation is False:
log("Aborted...")
return False
try:
client.users_user_id_delete(int(user_to_delete["id"]))
log("Successfully deleted user %s" % user_to_delete["username"], serv="SUCCESS")
return True
except:
log("Could not delete user %s. Error by backend" % user_to_delete["username"], serv="ERROR")
return False | db51c7d7d9f5fbd164bde010f3887f43e998fbef | 3,651,220 |
from datetime import datetime
import requests
def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:
"""Get the statistics for the all builders."""
print('getting list of builders...')
stats = BuildStats()
for builder in requests.get(BASE_URL).json().keys():
# TODO: maybe filter the builds to the ones we care about
stats += get_builder_stats(builder, time_window )
return stats | dc27d0672b9c03967575bef2ccbc95791502a8ab | 3,651,221 |
def remove_empty(s):
"""\
Remove empty strings from a list.
>>> a = ['a', 2, '', 'b', '']
>>> remove_empty(a)
[{u}'a', 2, {u}'b']
"""
while True:
try:
s.remove('')
except ValueError:
break
return s | 98778e4cc90f11b9b74ac6d26b203cbfc958fd7b | 3,651,222 |
import math
from typing import Tuple
from typing import Any
import itertools
def quantum_ia(nb_stick: int, past: list, backend_sim: Aer) -> list:
"""Quantum IA.
Args:
nb_stick: nb of stick left
past: past turn
backend_sim: backend for quantum
Return: Prediction to use
"""
def quadratibot(nb_stick: int, past: list, backend_sim: Aer) -> list:
"""Quadratic + QAOA function
Args:
nb_stick: nb of stick left
past: past turn
backend_sim: backend for quantum
Return: Gates to use
"""
def get_quantum_solution_for(
quadprog: QuadraticProgram, quantumInstance: QuantumInstance, optimizer=None
):
_eval_count = 0
def callback(eval_count, parameters, mean, std):
nonlocal _eval_count
_eval_count = eval_count
# Create solver and optimizer
solver = QAOA(
optimizer=optimizer,
quantum_instance=quantumInstance,
callback=callback,
max_evals_grouped=3,
)
# Create optimizer for solver
optimizer = MinimumEigenOptimizer(solver)
# Get result from optimizer
result = optimizer.solve(quadprog)
return result, _eval_count
# Check number of stick max
if nb_stick >= 3:
max_stick = 3
else:
max_stick = nb_stick
# Check the past
poten_stick = nb_stick
for i in range(len(past)):
if past[i] == "/":
poten_stick += 0.5
if past[i] == "¬":
u = 1
if len(past) - 1 >= i + u:
while past[i + u] == "¬":
u += 1
if past[i + u] == "/":
poten_stick += 0.5
# Check last turn
last_st = 0
if past[0] == "¬":
u = 1
while past[0 + u] == "¬":
u += 1
if past[0 + u] == "/":
last_st = 0.5
if past[0] == "/":
last_st = 0.5
quadprog = QuadraticProgram(name="qnim")
quadprog.integer_var(name="x", lowerbound=0, upperbound=max_stick)
quadprog.integer_var(name="sup", lowerbound=0, upperbound=max_stick)
quadprog.integer_var(name="intric", lowerbound=0, upperbound=max_stick)
quadprog.maximize(
linear={"x": 1, "sup": 0.5, "intric": last_st},
quadratic={("sup", "intric"): 0.5},
)
# General constraints
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1}, sense=">", rhs=0, name="gen_min"
)
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1},
sense="<=",
rhs=max_stick,
name="gen_max",
)
# Mod4 constraints
if math.ceil(poten_stick % 4) - 0.5 > 0:
quadprog.linear_constraint(
linear={"x": 1, "sup": 1},
sense="<=",
rhs=math.ceil(poten_stick % 4),
name="qua_mod4",
)
if nb_stick % 4 - 1 > 0:
quadprog.linear_constraint(
linear={"x": 1, "sup": 1, "intric": 1},
sense="<=",
rhs=nb_stick % 4 - 1,
name="cla_mod4",
)
# Get QAOA result
final_result = []
simulator_instance = QuantumInstance(backend=backend_sim)
qaoa_result, qaoa_eval_count = get_quantum_solution_for(
quadprog, simulator_instance
)
# Format and print result
for cropHectares, cropName in zip(qaoa_result.x, qaoa_result.variable_names):
for i in range(int(cropHectares)):
final_result.append(cropName)
return final_result
def gronim(output: list, backend_sim: Aer) -> Tuple[Any, ...]:
"""Grover for best predict.
Args:
output: every possible prediction
backend_sim: backend for quantum
Return: best predict
"""
def diffuser(nqubits):
qc = QuantumCircuit(nqubits)
for qubit in range(nqubits):
qc.h(qubit)
for qubit in range(nqubits):
qc.x(qubit)
qc.h(nqubits - 1)
qc.mct(list(range(nqubits - 1)), nqubits - 1)
qc.h(nqubits - 1)
for qubit in range(nqubits):
qc.x(qubit)
for qubit in range(nqubits):
qc.h(qubit)
U_s = qc.to_gate()
U_s.name = "$Diff$"
return U_s
def ram(nqubits, lists_final):
list_qram = [i for i in range(nqubits)]
qram = QuantumRegister(nqubits, "qram")
qalgo = QuantumRegister(nqubits, "algo")
qc = QuantumCircuit(qram, qalgo)
control_h = MCMT("h", nqubits, 1).to_gate()
map_ram_2 = [["x", "x"], ["o", "x"], ["x", "o"], ["o", "o"]]
map_ram_3 = [
["x", "x", "x"],
["o", "x", "x"],
["x", "o", "x"],
["o", "o", "x"],
["x", "x", "o"],
["o", "x", "o"],
["x", "o", "o"],
["o", "o", "o"],
]
if len(bin(len(lists_final))[2:]) == 3:
map_ram = map_ram_3
if len(bin(len(lists_final))[2:]) == 2:
map_ram = map_ram_2
for i, m_ram in zip(range(len(lists_final)), map_ram):
# qc.barrier()
for index, gate in enumerate(m_ram):
if gate == "x":
qc.x(qram[index])
if lists_final[i][0] == "x" or lists_final[i][0] == "sup":
qc.mcx(qram, qalgo[0])
else:
qc.append(control_h, [*list_qram, qalgo[0]])
if len(lists_final[i]) == 3:
if lists_final[i][1] == "x":
qc.mcx(qram, qalgo[1])
elif lists_final[i][1] == "intric":
qc.mcx([qram[0], qram[1], qram[2], qalgo[0]], qalgo[1])
else:
qc.append(control_h, [*list_qram, qalgo[1]])
if lists_final[i][-1] == "x":
qc.mcx(qram, qalgo[-1])
elif lists_final[i][-1] == "intric":
if len(lists_final[i]) == 3:
qc.mcx([qram[0], qram[1], qram[2], qalgo[1]], qalgo[-1])
else:
qc.mcx([qram[0], qram[1], qalgo[0]], qalgo[-1])
else:
qc.append(control_h, [*list_qram, qalgo[-1]])
for index, gate in enumerate(m_ram):
if gate == "x":
qc.x(qram[index])
# print(qc.draw())
U_s = qc.to_gate()
U_s.name = "$Qram$"
return U_s
def algo(nqubits):
qc = QuantumCircuit(nqubits)
qc.h(0)
qc.x(0)
U_s = qc.to_gate()
U_s.name = "$Algo$"
return U_s
lists_final = []
lists_full = list(itertools.permutations(output, len(output)))
for u in lists_full:
if u not in lists_final:
lists_final.append(u)
len_qram = len(bin(len(lists_final))[2:])
qram = QuantumRegister(len_qram, "qram")
qalgo = QuantumRegister(len_qram, "algo")
oracle = QuantumRegister(1, "oracle")
c = ClassicalRegister(len_qram, "measurement")
qc = QuantumCircuit(qram, qalgo, oracle, c)
# Init
qc.h(qram)
qc.x(oracle)
qc.h(oracle)
qc.barrier()
# Qram
qc.append(ram(len_qram, lists_final), [*[i for i in range(len_qram * 2)]])
qc.barrier()
# Algorithm
qc.append(algo(len_qram), [*[i for i in range(len_qram, len_qram * 2)]])
qc.barrier()
# Oracle
qc.mcx([qalgo[0], qalgo[-1]], oracle)
qc.barrier()
# Revert Algo + Qram
qc.append(
algo(len_qram).inverse(), [*[i for i in range(len_qram, len_qram * 2)]]
)
qc.append(
ram(len_qram, lists_final).inverse(), [*[i for i in range(len_qram * 2)]]
)
qc.barrier()
# Diffuser
qc.append(diffuser(len_qram), [*[i for i in range(len_qram)]])
# Measure of the outputs
qc.barrier()
qc.measure(qram, c)
job = execute(qc, backend_sim, shots=512, memory=True)
result_job = job.result()
result_count = result_job.get_counts()
result_memory = job.result().get_memory()
if len(result_count) == 1:
final_result = int(result_memory[0], 2)
else:
final_result = max(result_count, key=result_count.get)
final_result = int(final_result, 2)
to_return = lists_final[final_result]
return to_return
gates = quadratibot(nb_stick, past, backend_sim)
if len(gates) < 2:
predict = gates
elif len(set(gates)) != len(gates):
predict = gates
else:
predict = gronim(gates, backend_sim)
return predict | 7e13f554e6eb901ec43ec80bf346005f17ec55d5 | 3,651,223 |
def construct_model_cnn_gram(num_classes, input_shape):
"""
construct model architecture
:param num_classes: number of output classes of the model [int]
:return: model - Keras model object
"""
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(num_classes, activation='softmax'))
return model | ddffb66efe6b4f94a9c2a0ccc976206eebb503a6 | 3,651,224 |
def get_basic_data(match_info, event):
"""input: dictionary | output: dictionary updated"""
match_info['status'] = event['status']['type']['name']
match_info['start_time'] = event['date']
match_info['match_id'] = event['id']
match_info['time'] = event['status']['displayClock']
match_info['period'] = event['status']['period']
match_info['display_period'] = give_display_period(match_info['period'])
match_info['detail'] = event['status']['type']['detail']
match_info['match_type_id'] = event['status']['type']['id']
return match_info | fae755e195b5bbf12c9fccf20b9ba2c2f9e700c6 | 3,651,225 |
import os
import subprocess
def handle_solution(f, problem_id, user, lang):
"""
When user uploads the solution, this function takes care of it.
It runs the grader, saves the running time and output and saves
the submission info to database.
:param f: submission file (program)
:param problem_id: the id of the problem user submitted solution to
:param user: user id of user that made submission
:param lang: what language did user use
:return grade: grade that user got in this submission
:return submission.grade: best grade user got on this problem
:return error: -1 if no errors, otherwise output of agrader.sh
"""
# Get directory where files are stored.
directory = os.popen('echo $CG_FILES_UPLOADED').read().strip()
# Create directory where user's problem submission stuff will get stored.
problem_dir = "{0}/{1}/{2}".format(directory, user, problem_id)
try:
os.mkdir(problem_dir)
except OSError:
pass # directory already exists
# Write the submission file to previously created directory, rename it.
_, end = f.name.split('.')
f_local = '{2}_{0}.{1}'.format(user, end, problem_id)
with open('{0}/{1}'.format(problem_dir, f_local), 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
# Grade the task using agrader.sh. First compile the program if necessary and then copy
# files to docker container. Then run the program and check the grade
# with grader.
runner_ret_val = -1
grade = -1
error = -1
compiler_output = \
subprocess.check_output('bash problem/grader/compile_and_copy.sh {0} {1} {2}'.format(f_local, problem_id, user),
shell=True).split('\n')[-2]
if compiler_output == 'OK':
if end == 'py':
if lang == 'Python 2':
runner_ret_val = subprocess.call('bash problem/grader/run_py.sh {0} {1}'.format(user, problem_id),
shell=True)
elif lang == 'Python 3':
runner_ret_val = subprocess.call('bash problem/grader/run_py3.sh {0} {1}'.format(user, problem_id),
shell=True)
elif end == 'java':
runner_ret_val = subprocess.call('bash problem/grader/run_java.sh {0} {1}'.format(user, problem_id),
shell=True)
elif end == 'cs':
runner_ret_val = subprocess.call('bash problem/grader/run_cs.sh {0} {1}'.format(user, problem_id),
shell=True)
else:
runner_ret_val = subprocess.call('bash problem/grader/run_c.sh {0} {1}'.format(user, problem_id), shell=True)
if runner_ret_val == 0:
grader_out = subprocess.check_output('bash problem/grader/grade.sh {0} {1}'.format(user, problem_id),
shell=True).split('\n')[-2]
try:
grade = int(grader_out)
except ValueError:
grade = -1
error = grader_out
else:
error = "RTE"
else:
error = compiler_output
# Add submission
user = User.objects.get(username=user)
today = date.today()
today_str = '{0}-{1}-{2}'.format(today.year, today.month, today.day)
try: # if user has already submitted solution for this problem before
submission = Submission.objects.get(user=user.id, problem=problem_id)
submission.tries += 1
if grade > submission.grade:
submission.grade = grade
submission.date = today_str
# Save newer solution with same points.
if grade >= submission.grade:
os.system('bash problem/grader/move_output.sh {0} {1} {2}'.format(user.username, problem_id, 1))
else:
os.system('bash problem/grader/move_output.sh {0} {1} {2}'.format(user.username, problem_id, 0))
except ObjectDoesNotExist: # this is user's first submission
submission = Submission()
submission.user_id = user.id
submission.problem_id = problem_id
submission.grade = grade
submission.date = today_str
submission.tries = 1
os.system('bash problem/grader/move_output.sh {0} {1} {2}'.format(user.username, problem_id, 1))
finally: # at the end we need to update some data about best submissions
if grade == 10 and submission.tries_until_correct == 0:
submission.tries_until_correct = submission.tries
# Update number of people that solved this problem.
problem = Problem.objects.get(pk=problem_id)
if problem.solved_by_how_many == 0:
problem.first_solved_by = user
problem.first_solved_on = today_str
problem.solved_by_how_many += 1
problem.last_successful_try = today_str
submission.save()
return grade, submission.grade, error | 6373e5d55c8135ede8e401cc8f2c9b1464bda1bf | 3,651,226 |
def convert_blockgrad(node, **kwargs):
""" Skip operator """
return create_basic_op_node('Identity', node, kwargs) | 12803d387a30884da08779b878e9cdf3e06226a7 | 3,651,227 |
def is_complete(node):
"""
all children of a sum node have same scope as the parent
"""
assert node is not None
for sum_node in reversed(get_nodes_by_type(node, Sum)):
nscope = set(sum_node.scope)
if len(sum_node.children) == 0:
return False, "Sum node %s has no children" % sum_node.id
for child in sum_node.children:
if nscope != set(child.scope):
return False, "children of (sum) node %s do not have the same scope as parent" % sum_node.id
return True, None | a92741f4770757518e91a44e757e4d8037958066 | 3,651,228 |
import torch
def step_inplace(Ts, ae, target, weight, depth, intrinsics, lm=.0001, ep=10.0):
""" dense gauss newton update with computing similiarity matrix """
pts = pops.inv_project(depth, intrinsics)
pts = pts.permute(0,3,1,2).contiguous()
# tensor representation of SE3
se3 = Ts.data.permute(0,3,1,2).contiguous()
ae = ae / 8.0
# build the linear system
H, b = SE3BuilderInplace.apply(se3, ae, pts, target, weight, intrinsics)
I = torch.eye(6, device=H.device)[...,None,None]
H = H + (lm*H + ep) * I # damping
dx = SE3Solver.apply(H, b)
dx = dx.permute(0,3,4,1,2).squeeze(-1).contiguous()
Ts = SE3.exp(dx) * Ts
return Ts | 9f1fc6911d1fb11bc6956d63dda8f59b8f6654cd | 3,651,229 |
def _compute_extent_axis(axis_range, grid_steps):
"""Compute extent for matplotlib.pyplot.imshow() along one axis.
:param axis_range: 1D numpy float array with 2 elements; axis range for plotting
:param grid_steps: positive integer, number of grid steps in each dimension
:return: 1D numpy float array with 2 elements
"""
delta = (axis_range[1] - axis_range[0]) / (2.0 * (grid_steps - 1))
# the range is covered by grid_steps - 1 pixels with one half of a pixel overlapping on each side; delta is half the
# pixel width
return np.array([axis_range[0] - delta, axis_range[1] + delta]) | e83a251b4055639435342d19960d1e75a6d33ba8 | 3,651,230 |
import pathlib
import os
def normalize_path(filepath, expand_vars=False):
""" Fully normalizes a given filepath to an absolute path.
:param str filepath: The filepath to normalize
:param bool expand_vars: Expands embedded environment variables if True
:returns: The fully noralized filepath
:rtype: str
"""
filepath = str(pathlib.Path(filepath).expanduser().resolve())
if expand_vars:
filepath = os.path.expandvars(filepath)
return filepath | d408d6c1cd86072473a52f626821fcebd380c29d | 3,651,231 |
import os
def _make_relative_path(base_path, full_path):
"""
Strip out the base_path from full_path and make it relative.
"""
flask.current_app.logger.debug(
'got base_path: %s and full_path: %s' % (base_path, full_path))
if base_path in full_path:
# Get the common prefix
common_prefix =\
os.path.commonprefix([base_path, full_path])
rel_path = full_path[len(common_prefix):]
# Remove '/' from the beginning
if os.path.isabs(rel_path):
rel_path = rel_path[1:]
return rel_path | c40252232bf02aa51411f4a9a50d9d71d5447348 | 3,651,232 |
import hashlib
def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest() | f572ec27add8024e5fa8b9a82b5d694905e4d0f8 | 3,651,233 |
def rule(request):
"""Administration rule content"""
if request.user.is_authenticated():
return helpers.render_page('rule.html', show_descriptions=True)
else:
return redirect('ahmia.views_admin.login') | 514f767235660b812126c5cfc2dabeec40a7b22a | 3,651,234 |
def get_tenant_info(schema_name):
"""
get_tenant_info return the first tenant object by schema_name
"""
with schema_context(schema_name):
return Pharmacy.objects.filter(schema_name=schema_name).first() | 388d51c50fec60a331822bd157da1d53c88cc170 | 3,651,235 |
def get_lr(curr_epoch, hparams, iteration=None):
"""Returns the learning rate during training based on the current epoch."""
assert iteration is not None
batches_per_epoch = int(hparams.train_size / hparams.batch_size)
if 'svhn' in hparams.dataset and 'wrn' in hparams.model_name:
lr = step_lr(hparams.lr, curr_epoch)
elif 'cifar' in hparams.dataset or ('svhn' in hparams.dataset and
'shake_shake' in hparams.model_name):
lr = cosine_lr(hparams.lr, curr_epoch, iteration, batches_per_epoch,
hparams.num_epochs)
else:
lr = hparams.lr
tf.logging.log_first_n(tf.logging.WARN, 'Default not changing learning rate.', 1)
return lr | 42df4a185dd41d0eb02845d78dbd061f4658fba8 | 3,651,236 |
import glob
import os
def delete_map():
"""
Delete all maps
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result is an empty list.
The status_code is always 204 (deleted).
"""
# Delete all metadata_map figures
# Loop Through the folder projects all files and deleting them one by one
for file_map in glob.glob(f'{fig_folder}/metadata_map*'):
os.remove(file_map)
response = {
'status': True,
'message': 'All maps deleted',
'result': []
}
status_code = 204
return response, status_code | 0a4b8ef8975f33e37ecde4b50324d98c45f8a691 | 3,651,237 |
def delete_page(shortname):
"""Delete page from the database."""
# Check page existency
if get_page(shortname) is None:
abort(404)
if shortname is None:
flash("No parameters for page deletion!")
return redirect(url_for("admin"))
else:
query_db("DELETE FROM pages WHERE shortname = ?", (shortname,))
commit_db()
flash("Page '" + shortname + "' deleted!")
return redirect(url_for("admin")) | b1bb9526832209e1cddf1c86851aeef7c6701d3d | 3,651,238 |
def has_property(name, match=None):
"""Matches if object has a property with a given name whose value satisfies
a given matcher.
:param name: The name of the property.
:param match: Optional matcher to satisfy.
This matcher determines if the evaluated object has a property with a given
name. If no such property is found, ``has_property`` is not satisfied.
If the property is found, its value is passed to a given matcher for
evaluation. If the ``match`` argument is not a matcher, it is implicitly
wrapped in an :py:func:`~hamcrest.core.core.isequal.equal_to` matcher to
check for equality.
If the ``match`` argument is not provided, the
:py:func:`~hamcrest.core.core.isanything.anything` matcher is used so that
``has_property`` is satisfied if a matching property is found.
Examples::
has_property('name', starts_with('J'))
has_property('name', 'Jon')
has_property('name')
"""
if match is None:
match = anything()
return IsObjectWithProperty(name, wrap_shortcut(match)) | a5c562b1f5a36fc2c591d700d84b5ee9ca54ccde | 3,651,239 |
import glob
def gain_stability_task(run, det_name, fe55_files):
"""
This task fits the Fe55 clusters to the cluster data from each frame
sequence and writes a pickle file with the gains as a function of
sequence number and MJD-OBS.
Parameters
----------
run: str
Run number.
det_name: str
Sensor name in the focal plane, e.g., 'R22_S11'.
fe55_files: list
Raw Fe55 for the sensor being consider. The MJD-OBS values
will be extracted from these files.
Returns:
(pandas.DataFrame, str), i.e., a tuple of the data frame containing
the gain sequence and the file name of the output pickle file.
"""
file_prefix = make_file_prefix(run, det_name)
# Extract MJD-OBS values into a dict to provide look up table in
# case there are missing sequence frames in the psf results table.
mjd_obs = dict()
for item in fe55_files:
with fits.open(item) as hdus:
mjd_obs[hdus[0].header['SEQNUM']] = hdus[0].header['MJD-OBS']
psf_results_file = sorted(glob.glob(f'{file_prefix}_psf_results*.fits'))[0]
try:
df = sensorTest.gain_sequence(det_name, psf_results_file)
except ValueError as eobj:
print("ValueError in gain_stability_task:", eobj)
return None
df['mjd'] = [mjd_obs[seqnum] for seqnum in df['seqnum']]
outfile = f'{file_prefix}_gain_sequence.pickle'
df.to_pickle(outfile)
return df, outfile | a285879fc963342ed51b61ec9fae8ac08c089bc6 | 3,651,240 |
from datetime import datetime
def get_datetime(timestamp):
"""Parse several representations of time into a datetime object"""
if isinstance(timestamp, datetime.datetime):
# Timestamp is already a datetime object.
return timestamp
elif isinstance(timestamp, (int, float)):
try:
# Handle Unix timestamps.
return datetime.datetime.fromtimestamp(timestamp)
except ValueError:
pass
try:
# Handle Unix timestamps in milliseconds.
return datetime.datetime.fromtimestamp(timestamp / 1000)
except ValueError:
pass
elif isinstance(timestamp, string_types):
try:
timestamp = float(timestamp)
except (ValueError, TypeError):
pass
else:
# Timestamp is probably Unix timestamp given as string.
return get_datetime(timestamp)
try:
# Try to parse as string date in common formats.
return iso8601.parse_date(timestamp)
except:
pass
# Fuck this shit.
raise ValueError("Couldn't extract date object from %r" % timestamp) | d8277a1de3876106de02b9d75e02369061261996 | 3,651,241 |
def conda_installed_files(prefix, exclude_self_build=False):
"""
Return the set of files which have been installed (using conda) into
a given prefix.
"""
res = set()
for dist in install.linked(prefix):
meta = install.is_linked(prefix, dist)
if exclude_self_build and 'file_hash' in meta:
continue
res.update(set(meta['files']))
return res | e81051e19f7829bf149c71f0533bd51c80c7e2a1 | 3,651,242 |
def computeStarsItembased(corated, target_bid, model):
"""
corated - {bid: star, ...}
"""
if corated == None:
return None
corated.pop(target_bid, None)
bid_cor = list(corated.keys())
collect = []
for b in bid_cor:
pair = None
if b < target_bid:
pair = (b, target_bid)
else:
pair = (target_bid, b)
# if b == target_bid:
# print('same:', pair)
w = model.get(pair)
if w != None:
# pair may not have a value in the model
# when b == target_bid, pair have no value, too
collect.append((pair, w, b))
# else:
# collect.append((pair, 0, b))
# print(collect)
collect.sort(key=lambda x: x[1], reverse=True)
neighbors = collect[:N_NEIGHBORS_ITEMBASED]
sum_w = 0
n = 0
for p, w, b in neighbors:
star = corated[b]
n += star * w
sum_w += w
if sum_w == 0:
return None
else:
return n / sum_w | 7b3cd5bd103d35fe09477be96b5cbcc378927c65 | 3,651,243 |
import psutil
import re
def beacon(config):
"""
Monitor the memory usage of the minion
Specify thresholds for percent used and only emit a beacon
if it is exceeded.
.. code-block:: yaml
beacons:
memusage:
- percent: 63%
"""
ret = []
_config = {}
list(map(_config.update, config))
_current_usage = psutil.virtual_memory()
current_usage = _current_usage.percent
monitor_usage = _config["percent"]
if isinstance(monitor_usage, str) and "%" in monitor_usage:
monitor_usage = re.sub("%", "", monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({"memusage": current_usage})
return ret | ac10e85d47fef403148ad2e00c4e10ced2cc226c | 3,651,244 |
def get_surrounding_points(search_values, point_set):
"""
#for each value p[i] in search_values, returns a pair of surrounding points from point_set
the surrounding points are a tuplet of the form (lb[i], ub[i]) where
- lb[i] < p[i] < ub[i] if p[i] is not in point_set, and p[i] is within range
- lb[i] == p[i] == ub[i] if p[i] in point_set, p[i] < min(point_set), p[i] > max(point_set)
:param search_values: set of points that need neighbors
:param point_set: set of points that need be sorted
:return: list of points in point_set that surround search_values
"""
# http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
upper_indices = np.searchsorted(point_set, search_values, side="left")
n_points = len(point_set)
n_search = len(search_values)
neighbors = []
for i in range(n_search):
idx = upper_indices[i]
val = search_values[i]
if idx == 0:
n = (point_set[0], point_set[0])
elif idx == n_points:
n = (point_set[-1], point_set[-1])
else:
n = (point_set[idx-1], point_set[idx])
neighbors.append(n)
return neighbors | d4ec055946c19b999ed9523aa260d7bd28ffd269 | 3,651,245 |
def _scriptable_get(obj, name):
""" The getter for a scriptable trait. """
global _outermost_call
saved_outermost = _outermost_call
_outermost_call = False
try:
result = getattr(obj, '_' + name, None)
if result is None:
result = obj.trait(name).default
finally:
_outermost_call = saved_outermost
if saved_outermost:
get_script_manager().record_trait_get(obj, name, result)
return result | 53da00cb49d73065281306c90a51a95e1670e14e | 3,651,246 |
def Iq(q, intercept, slope):
"""
:param q: Input q-value
:param intercept: Intrecept in linear model
:param slope: Slope in linear model
:return: Calculated Intensity
"""
inten = intercept + slope*q
return inten | af3e580e6061089b431ef25f1f08def6f29c8ef6 | 3,651,247 |
import os
import logging
import math
import numpy
import audioop
def read_sph(input_file_name, mode='p'):
"""
Read a SPHERE audio file
:param input_file_name: name of the file to read
:param mode: specifies the following (\* =default)
.. note::
- Scaling:
- 's' Auto scale to make data peak = +-1 (use with caution if reading in chunks)
- 'r' Raw unscaled data (integer values)
- 'p' Scaled to make +-1 equal full scale
- 'o' Scale to bin centre rather than bin edge (e.g. 127 rather than 127.5 for 8 bit values,
can be combined with n+p,r,s modes)
- 'n' Scale to negative peak rather than positive peak (e.g. 128.5 rather than 127.5 for 8 bit values,
can be combined with o+p,r,s modes)
- Format
- 'l' Little endian data (Intel,DEC) (overrides indication in file)
- 'b' Big endian data (non Intel/DEC) (overrides indication in file)
- File I/O
- 'f' Do not close file on exit
- 'd' Look in data directory: voicebox('dir_data')
- 'w' Also read the annotation file \*.wrd if present (as in TIMIT)
- 't' Also read the phonetic transcription file \*.phn if present (as in TIMIT)
- NMAX maximum number of samples to read (or -1 for unlimited [default])
- NSKIP number of samples to skip from start of file (or -1 to continue from previous read when FFX
is given instead of FILENAME [default])
:return: a tupple such that (Y, FS)
.. note::
- Y data matrix of dimension (samples,channels)
- FS sample frequency in Hz
- WRD{\*,2} cell array with word annotations: WRD{\*,:)={[t_start t_end],'text'} where times are in seconds
only present if 'w' option is given
- PHN{\*,2} cell array with phoneme annotations: PHN{\*,:)={[t_start t_end],'phoneme'} where times
are in seconds only present if 't' option is present
- FFX Cell array containing
1. filename
2. header information
1. first header field name
2. first header field value
3. format string (e.g. NIST_1A)
4.
1. file id
2. current position in file
3. dataoff byte offset in file to start of data
4. order byte order (l or b)
5. nsamp number of samples
6. number of channels
7. nbytes bytes per data value
8. bits number of bits of precision
9. fs sample frequency
10. min value
11. max value
12. coding 0=PCM,1=uLAW + 0=no compression, 0=shorten,20=wavpack,30=shortpack
13. file not yet decompressed
5. temporary filename
If no output parameters are specified,
header information will be printed.
The code to decode shorten-encoded files, is
not yet released with this toolkit.
"""
codings = dict([('pcm', 1), ('ulaw', 2)])
compressions = dict([(',embedded-shorten-', 1),
(',embedded-wavpack-', 2),
(',embedded-shortpack-', 3)])
byteorder = 'l'
endianess = dict([('l', '<'), ('b', '>')])
if not mode == 'p':
mode = [mode, 'p']
k = list((m >= 'p') & (m <= 's') for m in mode)
# scale to input limits not output limits
mno = all([m != 'o' for m in mode])
sc = ''
if k[0]:
sc = mode[0]
# Get byte order (little/big endian)
if any([m == 'l' for m in mode]):
byteorder = 'l'
elif any([m == 'b' for m in mode]):
byteorder = 'b'
ffx = ['', '', '', '', '']
if isinstance(input_file_name, str):
if os.path.exists(input_file_name):
fid = open(input_file_name, 'rb')
elif os.path.exists("".join((input_file_name, '.sph'))):
input_file_name = "".join((input_file_name, '.sph'))
fid = open(input_file_name, 'rb')
else:
raise Exception('Cannot find file {}'.format(input_file_name))
ffx[0] = input_file_name
elif not isinstance(input_file_name, str):
ffx = input_file_name
else:
fid = input_file_name
# Read the header
if ffx[3] == '':
fid.seek(0, 0) # go to the begining of the file
l1 = fid.readline().decode("utf-8")
l2 = fid.readline().decode("utf-8")
if not (l1 == 'NIST_1A\n') & (l2 == ' 1024\n'):
logging.warning('File does not begin with a SPHERE header')
ffx[2] = l1.rstrip()
hlen = int(l2[3:7])
hdr = {}
while True: # Read the header and fill a dictionary
st = fid.readline().decode("utf-8").rstrip()
if st[0] != ';':
elt = st.split(' ')
if elt[0] == 'end_head':
break
if elt[1][0] != '-':
logging.warning('Missing ''-'' in SPHERE header')
break
if elt[1][1] == 's':
hdr[elt[0]] = elt[2]
elif elt[1][1] == 'i':
hdr[elt[0]] = int(elt[2])
else:
hdr[elt[0]] = float(elt[2])
if 'sample_byte_format' in list(hdr.keys()):
if hdr['sample_byte_format'][0] == '0':
bord = 'l'
else:
bord = 'b'
if (bord != byteorder) & all([m != 'b' for m in mode]) \
& all([m != 'l' for m in mode]):
byteorder = bord
icode = 0 # Get encoding, default is PCM
if 'sample_coding' in list(hdr.keys()):
icode = -1 # unknown code
for coding in list(codings.keys()):
if hdr['sample_coding'].startswith(coding):
# is the signal compressed
# if len(hdr['sample_coding']) > codings[coding]:
if len(hdr['sample_coding']) > len(coding):
for compression in list(compressions.keys()):
if hdr['sample_coding'].endswith(compression):
icode = 10 * compressions[compression] \
+ codings[coding] - 1
break
else: # if the signal is not compressed
icode = codings[coding] - 1
break
# initialize info of the files with default values
info = [fid, 0, hlen, ord(byteorder), 0, 1, 2, 16, 1, 1, -1, icode]
# Get existing info from the header
if 'sample_count' in list(hdr.keys()):
info[4] = hdr['sample_count']
if not info[4]: # if no info sample_count or zero
# go to the end of the file
fid.seek(0, 2) # Go to te end of the file
# get the sample count
info[4] = int(math.floor((fid.tell() - info[2]) / (info[5] * info[6]))) # get the sample_count
if 'channel_count' in list(hdr.keys()):
info[5] = hdr['channel_count']
if 'sample_n_bytes' in list(hdr.keys()):
info[6] = hdr['sample_n_bytes']
if 'sample_sig_bits' in list(hdr.keys()):
info[7] = hdr['sample_sig_bits']
if 'sample_rate' in list(hdr.keys()):
info[8] = hdr['sample_rate']
if 'sample_min' in list(hdr.keys()):
info[9] = hdr['sample_min']
if 'sample_max' in list(hdr.keys()):
info[10] = hdr['sample_max']
ffx[1] = hdr
ffx[3] = info
info = ffx[3]
ksamples = info[4]
if ksamples > 0:
fid = info[0]
if (icode >= 10) & (ffx[4] == ''): # read compressed signal
# need to use a script with SHORTEN
raise Exception('compressed signal, need to unpack in a script with SHORTEN')
info[1] = ksamples
# use modes o and n to determine effective peak
pk = 2 ** (8 * info[6] - 1) * (1 + (float(mno) / 2 - int(all([m != 'b'
for m in
mode]))) / 2 **
info[7])
fid.seek(1024) # jump after the header
nsamples = info[5] * ksamples
if info[6] < 3:
if info[6] < 2:
logging.debug('Sphere i1 PCM')
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
if info[11] % 10 == 1:
if y.shape[0] % 2:
y = numpy.frombuffer(audioop.ulaw2lin(
numpy.concatenate((y, numpy.zeros(1, 'int8'))), 2),
numpy.int16)[:-1]/32768.
else:
y = numpy.frombuffer(audioop.ulaw2lin(y, 2), numpy.int16)/32768.
pk = 1.
else:
y = y - 128
else:
logging.debug('Sphere i2')
y = numpy.fromfile(fid, endianess[byteorder]+"i2", -1)
else: # non verifie
if info[6] < 4:
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
y = y.reshape(nsamples, 3).transpose()
y = (numpy.dot(numpy.array([1, 256, 65536]), y) - (numpy.dot(y[2, :], 2 ** (-7)).astype(int) * 2 ** 24))
else:
y = numpy.fromfile(fid, endianess[byteorder]+"i4", -1)
if sc != 'r':
if sc == 's':
if info[9] > info[10]:
info[9] = numpy.min(y)
info[10] = numpy.max(y)
sf = 1 / numpy.max(list(list(map(abs, info[9:11]))), axis=0)
else:
sf = 1 / pk
y = sf * y
if info[5] > 1:
y = y.reshape(ksamples, info[5])
else:
y = numpy.array([])
if mode != 'f':
fid.close()
info[0] = -1
if not ffx[4] == '':
pass # VERIFY SCRIPT, WHICH CASE IS HANDLED HERE
return y.astype(numpy.float32), int(info[8]), int(info[6]) | 4d06b14fffc8203c2ac0ec0a3c1086cc0904d193 | 3,651,248 |
import os
import pathlib
import yaml
def unpack(path, catalog_name):
"""
Place a catalog configuration file in the user configuration area.
Parameters
----------
path: Path
Path to output from pack
catalog_name: Str
A unique name for the catalog
Returns
-------
config_path: Path
Location of new catalog configuration file
"""
# Validate user input.
if not os.path.isdir(path):
raise ValueError(f"{path} is not a directory")
source_catalog_file_path = pathlib.Path(path, "catalog.yml")
if not os.path.isfile(source_catalog_file_path):
raise ValueError(f"Cold not find 'catalog.yml' in {path}")
if catalog_name in databroker.utils.list_configs():
raise CatalogNameExists(catalog_name)
config_dir = databroker.catalog_search_path()[0]
with open(source_catalog_file_path) as file:
catalog = yaml.safe_load(file)
source = catalog["sources"].pop("packed_catalog")
# Handle temporary condition where 'pack' puts absolute paths in "args"
# and puts relative paths off to the side.
if any(pathlib.Path(p).is_absolute() for p in source["args"]["paths"]):
relative_paths = source["metadata"]["relative_paths"]
new_paths = [str(pathlib.Path(path, rel_path)) for rel_path in relative_paths]
source["args"]["paths"] = new_paths
# The root_map values may be relative inside a pack, given relative to the
# catalog file. Now that we are going to use a catalog file in a config
# directory, we need to make these paths absolute.
for k, v in source["args"].get("root_map", {}).items():
if not pathlib.Path(v).is_absolute():
source["args"]["root_map"][k] = str(pathlib.Path(path, v))
catalog["sources"][catalog_name] = source
config_filename = f"databroker_unpack_{catalog_name}.yml"
config_path = pathlib.Path(config_dir, config_filename)
os.makedirs(config_dir, exist_ok=True)
with open(config_path, "xt") as file:
yaml.dump(catalog, file)
return config_path | ae40f5d9f9521e6df34b598c034d657b8239a389 | 3,651,249 |
def psi4ToStrain(mp_psi4, f0):
"""
Convert the input mp_psi4 data to the strain of the gravitational wave
mp_psi4 = Weyl scalar result from simulation
f0 = cutoff frequency
return = strain (h) of the gravitational wave
"""
#TODO: Check for uniform spacing in time
t0 = mp_psi4[:, 0]
list_len = len(t0)
complexPsi = np.zeros(list_len, dtype=np.complex_)
complexPsi = mp_psi4[:, 1]+1.j*mp_psi4[:, 2]
freq, psif = myFourierTransform(t0, complexPsi)
dhf = ffi(freq, psif, f0)
hf = ffi(freq, dhf, f0)
time, h = myFourierTransformInverse(freq, hf, t0[0])
hTable = np.column_stack((time, h))
return hTable | 4ab3320a44bd10403e73015e4a215142529b8029 | 3,651,250 |
def get_optimizer(library, solver):
"""Constructs Optimizer given and optimization library and optimization
solver specification"""
options = {
'maxiter': 100
}
if library == 'scipy':
optimizer = optimize.ScipyOptimizer(method=solver, options=options)
elif library == 'ipopt':
optimizer = optimize.IpoptOptimizer()
elif library == 'dlib':
optimizer = optimize.DlibOptimizer(options=options)
elif library == 'pyswarm':
optimizer = optimize.PyswarmOptimizer(options=options)
elif library == 'cmaes':
optimizer = optimize.CmaesOptimizer(options=options)
elif library == 'scipydiffevolopt':
optimizer = optimize.ScipyDifferentialEvolutionOptimizer(
options=options)
elif library == 'pyswarms':
optimizer = optimize.PyswarmsOptimizer(options=options)
elif library == 'nlopt':
optimizer = optimize.NLoptOptimizer(method=solver, options=options)
elif library == 'fides':
options[fides.Options.SUBSPACE_DIM] = solver[1]
optimizer = optimize.FidesOptimizer(options=options,
hessian_update=solver[0])
else:
raise ValueError(f"Optimizer not recognized: {library}")
return optimizer | be72fc9115abf0d087049debe470139b248ef47f | 3,651,251 |
def _cast_query(query, col):
"""
ALlow different query types (e.g. numerical, list, str)
"""
query = query.strip()
if col in {"t", "d"}:
return query
if query.startswith("[") and query.endswith("]"):
if "," in query:
query = ",".split(query[1:-1])
return [i.strip() for i in query]
if query.isdigit():
return int(query)
try:
return float(query)
except Exception:
return query | 4b6cfc823f8b2e78f343e73683b418112e66f43d | 3,651,252 |
import torch
def binary_loss(pred_raw,
label_raw,
loss_func,
weight=None,
class_weight=None,
class_weight_norm=False,
reduction='mean',
avg_factor=None,
smooth=1.0):
"""
:param pred: [N, C, *] scores without softmax
:param label: [N, *] in [0, C], 0 stands for background, 1~C stands for pred in 0~C-1
:return: reduction([N])
"""
pred = pred_raw.clone()
label = label_raw.clone()
num_classes = pred.shape[1]
if class_weight is not None:
class_weight = class_weight.float()
if pred.shape != label.shape:
label = _make_one_hot(label, num_classes)
pred = torch.sigmoid(pred)
loss = 0.
for i in range(num_classes):
if isinstance(loss_func, tuple):
loss_function = loss_func[i]
else:
loss_function = loss_func
class_loss = loss_function(pred[:, i], label[:, i], smooth=smooth)
if class_weight is not None:
class_loss *= class_weight[i]
loss += class_loss
if class_weight is not None and class_weight_norm:
loss = loss / torch.sum(class_weight)
else:
loss = loss / num_classes
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss | 9154c8e46a48485e643de496554d302f9db294ac | 3,651,253 |
import os
import uuid
def picture_upload_to(instance, filename):
"""
Returns a unique filename for picture which is hard to guess.
Will use uuid.uuid4() the chances of collision are very very very low.
"""
ext = os.path.splitext(filename)[1].strip('.')
if not ext:
ext = 'jpg'
filename = '%s.%s' % (uuid.uuid4(), ext)
return os.path.join(UPLOAD_TO, filename) | 0bdaa002105cdbc85c68019eebe1c6f54ab9173f | 3,651,254 |
def showerActivityModel(sol, flux_max, b, sol_max):
""" Activity model taken from: Jenniskens, P. (1994). Meteor stream activity I. The annual streams.
Astronomy and Astrophysics, 287., equation 8.
Arguments:
sol: [float] Solar longitude for which the activity is computed (radians).
flux_max: [float] Peak relative flux.
b: [float] Slope of the shower.
sol_max: [float] Solar longitude of the peak of the shower (radians).
"""
# Compute the flux at given solar longitude
flux = flux_max*10**(-b*np.degrees(np.abs(sol - sol_max)))
return flux | 0a4cc6d8c490b36412140cfeeca0c30464c11577 | 3,651,255 |
import os
def create_directories(directory_name):
"""
Create directories
"""
# Create directory
try:
# Create target Directory
os.mkdir(directory_name)
logger.info("Directory %s Created", directory_name)
except FileExistsError:
logger.info("Directory %s already exists", directory_name)
exit(1)
subdirectory_name = os.path.join(directory_name, 'res')
# Create target directory & all intermediate directories if don't exists
try:
os.makedirs(subdirectory_name)
logger.info("Directory %s Created", subdirectory_name)
except FileExistsError:
logger.info("Directory %s already exists", subdirectory_name)
return subdirectory_name | 453399c5e5b4200478c14d414efdd7a4cf8ced1f | 3,651,256 |
import json
def request_slow_log(db_cluster_id, start_datetime, end_datetime, page_number, page_size):
"""
请求慢SQL日志
:param db_cluster_id:
:param start_datetime:
:param end_datetime:
:param page_number:
:param page_size:
:return:
"""
request = DescribeSlowLogRecordsRequest()
request.set_accept_format('json')
request.set_DBClusterId(db_cluster_id)
# 格式化前一天的日期
request.set_StartTime(start_datetime)
request.set_EndTime(end_datetime)
request.set_PageNumber(page_number)
request.set_PageSize(page_size)
response = client.do_action_with_exception(request)
response = str(response, encoding='utf-8')
resp_result = json.loads(response)
return resp_result | e49a006e59f04067eb43f72dfcd29fd71def4fb1 | 3,651,257 |
def pad_omni_image(image, pad_size, image_dims=None):
"""Pad an omni-directional image with the correct image wrapping at the edges.
Parameters
----------
image
Image to perform the padding on *[batch_shape,h,w,d]*
pad_size
Number of pixels to pad.
image_dims
Image dimensions. Inferred from Inputs if None. (Default value = None)
Returns
-------
ret
New padded omni-directional image *[batch_shape,h+ps,w+ps,d]*
"""
if image_dims is None:
image_dims = image.shape[-3:-1]
# BS x PS x W/2 x D
top_left = image[..., 0:pad_size, int(image_dims[1] / 2):, :]
top_right = image[..., 0:pad_size, 0:int(image_dims[1] / 2), :]
# BS x PS x W x D
top_border = _ivy.flip(_ivy.concatenate((top_left, top_right), -2), -3)
# BS x PS x W/2 x D
bottom_left = image[..., -pad_size:, int(image_dims[1] / 2):, :]
bottom_right = image[..., -pad_size:, 0:int(image_dims[1] / 2), :]
# BS x PS x W x D
bottom_border = _ivy.flip(_ivy.concatenate((bottom_left, bottom_right), -2), -3)
# BS x H+2PS x W x D
image_expanded = _ivy.concatenate((top_border, image, bottom_border), -3)
# BS x H+2PS x PS x D
left_border = image_expanded[..., -pad_size:, :]
right_border = image_expanded[..., 0:pad_size, :]
# BS x H+2PS x W+2PS x D
return _ivy.concatenate((left_border, image_expanded, right_border), -2) | e54d732508bea3f969eb2a78ec3238e88e33a30f | 3,651,258 |
from typing import Any
def add_film(
film: FilmCreate,
db: Session = Depends(get_db),
user: User = Depends(get_current_user),
) -> Any:
"""
Add new film
"""
if not user.role.can_add_films:
raise ForbiddenAction
db_film = db.query(Film).filter(Film.name == film.name).first()
if db_film is not None:
raise FilmAlreadyExists
db_film = Film(
name=film.name,
released_year=film.released_year,
owner_user=user,
)
db.add(db_film)
db.flush()
film_model = FilmGet.from_orm(db_film)
db.commit()
return {
'status': 'ok',
'data': film_model,
} | 510f80969570e186233a6313277093b6d939a9ea | 3,651,259 |
def load_cube_file(lines, target_mode=None, cls=ImageFilter.Color3DLUT):
"""Loads 3D lookup table from .cube file format.
:param lines: Filename or iterable list of strings with file content.
:param target_mode: Image mode which should be after color transformation.
The default is None, which means mode doesn't change.
:param cls: A class which handles the parsed file.
Default is ``ImageFilter.Color3DLUT``.
"""
name, size = None, None
channels = 3
file = None
lines = open(lines, 'rt')
try:
iterator = iter(lines)
for i, line in enumerate(iterator, 1):
line = line.strip()
if line.startswith('TITLE "'):
name = line.split('"')[1]
continue
if line.startswith('LUT_3D_SIZE '):
size = [int(x) for x in line.split()[1:]]
if len(size) == 1:
size = size[0]
continue
if line.startswith('CHANNELS '):
channels = int(line.split()[1])
if line.startswith('LUT_1D_SIZE '):
raise ValueError("1D LUT cube files aren't supported")
try:
float(line.partition(' ')[0])
except ValueError:
pass
else:
# Data starts
break
if size is None:
raise ValueError('No size found in the file')
table = []
for i, line in enumerate(chain([line], iterator), i):
line = line.strip()
if not line or line.startswith('#'):
continue
try:
pixel = [float(x) for x in line.split()]
except ValueError:
raise ValueError("Not a number on line {}".format(i))
if len(pixel) != channels:
raise ValueError(
"Wrong number of colors on line {}".format(i))
table.extend(pixel)
finally:
if file is not None:
file.close()
instance = cls(size, table, channels=channels,
target_mode=target_mode, _copy_table=False)
if name is not None:
instance.name = name
return instance | bf87c0e686b689a429297bae4ec84402dd12dc3d | 3,651,260 |
import torch
def vector_to_Hermitian(vec):
"""Construct a Hermitian matrix from a vector of N**2 independent
real-valued elements.
Args:
vec (torch.Tensor): (..., N ** 2)
Returns:
mat (ComplexTensor): (..., N, N)
""" # noqa: H405, D205, D400
N = int(np.sqrt(vec.shape[-1]))
mat = torch.zeros(size=vec.shape[:-1] + (N, N, 2), device=vec.device)
# real component
triu = np.triu_indices(N, 0)
triu2 = np.triu_indices(N, 1) # above main diagonal
tril = (triu2[1], triu2[0]) # below main diagonal; for symmetry
mat[(...,) + triu + (np.zeros(triu[0].shape[0]),)] = vec[..., : triu[0].shape[0]]
start = triu[0].shape[0]
mat[(...,) + tril + (np.zeros(tril[0].shape[0]),)] = mat[
(...,) + triu2 + (np.zeros(triu2[0].shape[0]),)
]
# imaginary component
mat[(...,) + triu2 + (np.ones(triu2[0].shape[0]),)] = vec[
..., start : start + triu2[0].shape[0]
]
mat[(...,) + tril + (np.ones(tril[0].shape[0]),)] = -mat[
(...,) + triu2 + (np.ones(triu2[0].shape[0]),)
]
return ComplexTensor(mat[..., 0], mat[..., 1]) | 8bd32d93e9865305a8f75711d72990beaea5d897 | 3,651,261 |
import os
import yaml
import plistlib
def new_recipe(argv):
"""Makes a new recipe template"""
verb = argv[1]
parser = gen_common_parser()
parser.set_usage(
f"Usage: %prog {verb} [options] recipe_pathname\n" "Make a new template recipe."
)
# Parse arguments
parser.add_option("-i", "--identifier", help="Recipe identifier")
parser.add_option(
"-p", "--parent-identifier", help="Parent recipe identifier for this recipe."
)
parser.add_option(
"--format",
action="store",
default="plist",
help=(
"The format of the new recipe to be created. "
"Valid options include: 'plist' (default) or 'yaml'"
),
)
(options, arguments) = common_parse(parser, argv)
if len(arguments) != 1:
log_err("Must specify exactly one recipe pathname!")
log_err(parser.get_usage())
return -1
filename = arguments[0]
name = os.path.basename(filename).split(".")[0]
identifier = options.identifier or "local." + name
recipe = {
"Description": "Recipe description",
"Identifier": identifier,
"Input": {"NAME": name},
"MinimumVersion": "1.0",
"Process": [
{
"Arguments": {"Argument1": "Value1", "Argument2": "Value2"},
"Processor": "ProcessorName",
}
],
}
if options.parent_identifier:
recipe["ParentRecipe"] = options.parent_identifier
try:
if options.format == "yaml" or filename.endswith(".recipe.yaml"):
# Yaml recipes require AutoPkg 2.3 or later.
recipe["MinimumVersion"] = "2.3"
with open(filename, "wb") as f:
yaml.dump(recipe, f, encoding="utf-8")
else:
with open(filename, "wb") as f:
plistlib.dump(recipe, f)
log(f"Saved new recipe to {filename}")
except Exception as err:
log_err(f"Failed to write recipe: {err}") | 598577761487497a65f2bb6bdd16c308859af278 | 3,651,262 |
def view_payment(request):
""" A view that renders the payment page template """
user = request.user
# Check if user has already paid and redirect them to definitions app.
if user.has_perm('definitionssoftware.access_paid_definitions_app'):
return redirect(reverse('view_definitionssoftware'))
# Get stripe environment variables
stripe_public_key = settings.STRIPE_PUBLIC_KEY
stripe_secret_key = settings.STRIPE_SECRET_KEY
if request.method == 'POST':
request.session['payment_successful'] = True
return redirect(reverse('payment_success'))
# Create Stripe Payment Intent
stripe_total = 2500
stripe.api_key = stripe_secret_key
intent = stripe.PaymentIntent.create(
amount=stripe_total,
currency=settings.STRIPE_CURRENCY,
)
print(intent)
if not stripe_public_key:
messages.warning(request, 'Stripe public key is missing. \
Did you forget to set it in your environment?')
template = 'payment/payment.html'
context = {
'stripe_public_key': stripe_public_key,
'client_secret': intent.client_secret,
}
return render(request, template, context) | fc86a79c759bc4e4005d634b5c9a204473a3a3a7 | 3,651,263 |
def augment_bag(store, bag, username=None):
"""
Augment a bag object with information about it's policy type.
"""
if not bag.store:
bag = store.get(bag)
if not username:
username = bag.policy.owner
policy_type = determine_tank_type(bag, username)
bag.icon = POLICY_ICONS[policy_type]
bag.type = policy_type
return bag | 1fbda85f3db346e46e52b86d2d4b5784f8c4d2ab | 3,651,264 |
from typing import Dict
def province_id_to_home_sc_power() -> Dict[utils.ProvinceID, int]:
"""Which power is this a home sc for?"""
content = get_mdf_content(MapMDF.STANDARD_MAP)
home_sc_line = content.splitlines()[2]
tag_to_id = _tag_to_id(get_mdf_content(MapMDF.STANDARD_MAP))
# Assume powers are ordered correctly
id_to_power = {}
power = -1
words = str(home_sc_line).split(' ')
for w in words:
if w in ['(', ')']:
pass
elif w in tag_to_id: # Is a province
id_to_power[tag_to_id[w]] = power
else: # Must be a power tag
power += 1
return id_to_power | f87081ce053e3a50bb48deaae28b0e919e224216 | 3,651,265 |
def evaluate_cubic_spline(x, y, r, t):
"""Evaluate cubic spline at points.
Parameters:
x : rank-1 np.array of np.float64
data x coordinates
y : rank-1 np.array of np.float64
data y coordinates
r : rank-1 np.array of np.float64
output of solve_coeffs() for your data
t : rank-1 np.array of np.float64
points where to evaluate. Must satisfy (x[0] <= t <= x[-1]).all().
Returns:
s : rank-1 np.array of np.float64
Value of the spline at the points t.
"""
return _evaluate_generic(x,y,r,t, _evaluate_cubic_spline_one) | 52d6c4ac0440da88ee908bc0a6cfa2b755ca606f | 3,651,266 |
def get_username(host, meta_host, config):
"""Find username from sources db/metadata/config."""
username = host.username or meta_host.get("username")
if is_windows_host(meta_host):
username = username or "Administrator"
default_user = get_config_value(config["users"], meta_host["os"])
username = username or default_user
return username | 4e220816442e64d43f1da15aa0bd19508e186f19 | 3,651,267 |
def get_all_tests():
"""
Collect all tests and return them
:return: A test suite as returned by xunitparser with all the tests
available in the w3af framework source code, without any selectors.
"""
return _get_tests('all.xml') | 11acff501fb717ac5c9bdc16343742f124f2a120 | 3,651,268 |
def main():
"""Builds OSS-Fuzz project's fuzzers for CI tools.
Note: The resulting fuzz target binaries of this build are placed in
the directory: ${GITHUB_WORKSPACE}/out
Returns:
0 on success or nonzero on failure.
"""
return build_fuzzers_entrypoint() | cd7c386c2a5d126c0abc1504a1cb3dfe6026173c | 3,651,269 |
def find_first_img_dim(import_gen):
"""
Loads in the first image in a provided data set and returns its dimensions
Intentionally returns on first iteration of the loop
:param import_gen: PyTorch DataLoader utilizing ImageFolderWithPaths for its dataset
:return: dimensions of image
"""
for x, _, _ in import_gen:
return x[0].shape[-2], x[0].shape[-1] | 3ccaccdfb20d7b2ca4d339adacd3c706a460fdef | 3,651,270 |
def restaurantJSON():
""" Returns all restaurants by JSON call """
restaurants = session.query(Restaurant)
return jsonify(Restaurants=[r.serialize for r in restaurants]) | 350df909de7798da9567a7fe0a972d660c40ff8c | 3,651,271 |
def _to_histogram_plotgroup(use_spec, plotgroup_id, plot_id, read_type, bincounts, output_dir, png_name):
"""
Create a histogram of length distribution.
"""
plot_spec = use_spec.get_plot_spec(plotgroup_id, plot_id)
png_file = op.join(output_dir, png_name)
png, thumb = plot_read_lengths_binned(bincounts,
png_file,
read_type=read_type,
title=plot_spec.title,
color=get_blue(3),
edgecolor=get_blue(2))
return to_plotgroup(plotgroup_id, plot_id, png, thumb) | 18ae412af24800098ec2c01c9ba5c456455540f5 | 3,651,272 |
def prepare_string(x, max_length=None):
""" Converts a string from LaTeX escapes to UTF8 and truncates it to max_length """
# data = latex2text(x, tolerant_parsing=True)
try:
data = latex_to_unicode(filter_using_re(x))
if max_length is not None:
data = (data[:max_length-5] + '[...]') if len(data) > max_length else data
return smart_text(data)
except TypeError:
logger.warning("Encountered a TypeError which may be linked to unicode handling "
"in bibtexparser when processing the following string: %s."%x)
return "" | 043d0d063e22ef18943459a7ba0a8928244bca12 | 3,651,273 |
import math
def q_b(m0, m1, m2, n0, n1, n2):
"""Stretch"""
return math.sqrt((m0 - n0)**2 + (m1 - n1)**2 + (m2 - n2)**2) | 61cf1b5eec6c89be7f822cbdbc03564b805a1920 | 3,651,274 |
def poly_union(poly_det, poly_gt):
"""Calculate the union area between two polygon.
Args:
poly_det (Polygon): A polygon predicted by detector.
poly_gt (Polygon): A gt polygon.
Returns:
union_area (float): The union area between two polygons.
"""
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
area_det = poly_det.area()
area_gt = poly_gt.area()
area_inters, _ = poly_intersection(poly_det, poly_gt)
return area_det + area_gt - area_inters | fbd13a9b1ef4acee27fac7d04b00fc1cfc46ca08 | 3,651,275 |
def get_stoich(geom_i, geom_j):
""" get the overall combined stoichiometry
"""
form_i = automol.geom.formula(geom_i)
form_j = automol.geom.formula(geom_j)
form = automol.formula.join(form_i, form_j)
stoich = ''
for key, val in form.items():
stoich += key + str(val)
return stoich | eaba89508d7c913a77ebf91097d620dc6fdff5a6 | 3,651,276 |
import requests
from bs4 import BeautifulSoup
def get_all_text(url):
"""Retrieves all text in paragraphs.
:param str url: The URL to scrap.
:rtype: str :return: Text in the URL.
"""
try:
response = requests.get(url)
# If the response was successful, no Exception will be raised
response.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}') # Python 3.6
return None
# sys.exit()
except Exception as err:
print(f'Other error occurred: {err}') # Python 3.6
return None
# sys.exit()
soup = BeautifulSoup(response.text, "lxml")
text = ""
for i in soup.find_all('p'): # soup.select
# i.encode("utf-8") # default
# Delete citations (e.g. "The Alhambra is a UNESCO World Heritage Site.[2]")
text += i.get_text() + '\n'
text = clean_text.del_nonAscii(clean_text.del_refs(text))
return text | 364150aee7c1c093367d3d95bc5c0836dde978db | 3,651,277 |
from typing import List
def metadata_partitioner(rx_txt: str) -> List[str]:
"""Extract Relax program and metadata section.
Parameters
----------
rx_txt : str
The input relax text.
Returns
-------
output : List[str]
The result list of partitioned text, the first element
is the relax program, and the second is metadata section.
"""
partitions = []
left_curly = 0
meta_start = 0
meta_end = 0
for i, char in enumerate(rx_txt):
if i < 0:
raise ValueError("The program is invalid.")
if char == "{":
if meta_start == 0:
meta_start = i
left_curly += 1
elif char == "}":
left_curly -= 1
if left_curly == 0:
meta_end = i + 1
break
if meta_end == 0:
raise ValueError("The metadata section was not found.")
metadata = rx_txt[meta_start:meta_end]
rx_program = rx_txt[meta_end:-1]
partitions.append(rx_program)
partitions.append(metadata)
return partitions | dd09aff9ea517813d43ff307fb9fc425b7338943 | 3,651,278 |
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone) | b466b4fda2daf54b7aa5e8f00ad7b10397e61c7b | 3,651,279 |
def to_dict(funs):
"""Convert an object to a dict using a dictionary of functions.
to_dict(funs)(an_object) => a dictionary with keys calculated from functions on an_object
Note the dictionary is copied, not modified in-place.
If you want to modify a dictionary in-place, do adict.update(to_dict(funs)(a_dict))
Use to_dict(funs) in a map, and you can generate a list of dictionaries from a list of objects (which could also be dictionaries).
:: K is hashable type => {K: (X -> V)} -> [X] -> {K: V}
Equivalent to the following in Python 3:
{k: f(an_object) for (k, f) in funs.items()}
>>> from operator import itemgetter
>>> funs = {'id': itemgetter('id'), 'fullname': lambda x: '%(forename)s %(surname)s' % x}
>>> an_object = {'id': 1, 'forename': 'Fred', 'surname': 'Bloggs'}
>>> result = to_dict(funs)(an_object)
>>> result['id']
1
>>> result['fullname']
'Fred Bloggs'
>>> 'forename' in result # Original keys are left out
False
"""
def to_dict_funs(an_object):
return dict((k, f(an_object)) for (k, f) in funs.items())
return to_dict_funs | d22bbcb3c1913361c3906fd2e7f3d254dc67de28 | 3,651,280 |
import re
def parse_duration_string_ms(duration):
"""Parses a duration string of the form 1h2h3m4s5.6ms4.5us7.8ns into milliseconds."""
pattern = r'(?P<value>[0-9]+\.?[0-9]*?)(?P<units>\D+)'
matches = list(re.finditer(pattern, duration))
assert matches, 'Failed to parse duration string %s' % duration
times = {'h': 0, 'm': 0, 's': 0, 'ms': 0}
for match in matches:
parsed = match.groupdict()
times[parsed['units']] = float(parsed['value'])
return (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000 + times['ms'] | da2981590d70f32ee3514873602621a77b70cbe2 | 3,651,281 |
def fin(activity):
"""Return the end time of the activity. """
return activity.finish | ed5b1d1e0f29f403cfee357a264d05d5cc88093e | 3,651,282 |
def unfreeze_map(obj):
"""
Unfreezes all elements of mappables
"""
return {key: unfreeze(value) for key, value in obj.items()} | 2ba48f6cf89f44001b7940076c4763dc820d9aa1 | 3,651,283 |
from typing import Optional
from typing import Union
from datetime import datetime
def get_date(
value: Optional[Union[date, datetime, str]],
raise_error=False
) -> Optional[date]:
"""
Convert a given value to a date.
Args:
raise_error: flag to raise error if return is None or not
value: to be converted. Can be date/datetime obj as well as str formatted in date/datetime
Returns:
date obj
Raises:
ValueError: If raise_error flag is True and parsed_date is None
Examples:
>>> get_date(date(2021, 1, 1))
datetime.date(2021, 1, 1)
>>> get_date(datetime(2021, 1, 1, 0, 2))
datetime.date(2021, 1, 1)
>>> get_date('2020-01-01 13:12:13')
datetime.date(2020, 1, 1)
>>> get_date('sadasadasdas') is None
True
>>> get_date(None) is None
True
>>> get_date('2021-20-20-20-20', raise_error=True)
Traceback (most recent call last):
...
ValueError: Invalid date 2021-20-20-20-20
"""
if isinstance(value, datetime):
return value.date()
if isinstance(value, date):
return value
if value is not None:
# A common date is in the form "2020-01-01", 10 characters
if len(value) > 10:
parsed_date = parse_datetime(value)
parsed_date = parsed_date.date() if parsed_date else None
else:
parsed_date = parse_date(value)
else:
parsed_date = None
if parsed_date is None and raise_error:
raise ValueError(f"Invalid date {value}")
return parsed_date | 501b2363aa2d40f16f6144995db8d840e62f750a | 3,651,284 |
import os.path, os
def make_working_directories ():
""" Creates directories that we will be working in.
In particular, we will have DOC_ROOT/stage-PID and
DOC_ROOT/packages-PID """
global doc_root
stage_dir = os.path.join (doc_root, "stage-" + str (os.getpid ()))
package_dir = os.path.join (doc_root, "package-" + str (os.getpid ()))
os.mkdir (stage_dir)
os.mkdir (package_dir)
return (stage_dir, package_dir) | e92fb9f27c5c7b13d1666175ea89705c473d8323 | 3,651,285 |
def k_param(kguess, s):
"""
Finds the root of the maximum likelihood estimator
for k using Newton's method. Routines for using Newton's method
exist within the scipy package but they were not explored. This
function is sufficiently well behaved such that we should not
have problems solving for k, especially since we have a good
estimate of k to use as a starting point.
"""
k = kguess
val = np.log(k) - sps.psi(k) - s
counter = 0
while np.abs(val) >= 0.0001:
k = k - (np.log(k)-sps.psi(k)-s)/(1/k-sps.polygamma(1, k))
val = np.log(k) - sps.psi(k) - s
# sps.polygamma(1,k) is first derivative of sps.psi(k)
counter += 1
if counter > MAX_NEWTON_ITERATIONS:
raise Exception("Max Newton's method iterations exceeded")
return k | 24df48746d53fd4573db10093065e7b49d5c7bfe | 3,651,286 |
def hex_to_bin(value: hex) -> bin:
"""
convert a hexadecimal to binary
0xf -> '0b1111'
"""
return bin(value) | b82c4fea08fc258a3b50be9a5e77b3d076a33459 | 3,651,287 |
def four_oneports_2_twoport(s11: Network, s12: Network, s21: Network, s22: Network, *args, **kwargs) -> Network:
"""
Builds a 2-port Network from list of four 1-ports
Parameters
----------
s11 : one-port :class:`Network`
s11
s12 : one-port :class:`Network`
s12
s21 : one-port :class:`Network`
s21
s22 : one-port :class:`Network`
s22
\*args, \*\*kwargs :
passed to :func:`Network.__init__` for the twoport
Returns
-------
twoport : two-port :class:`Network`
result
See Also
--------
n_oneports_2_nport
three_twoports_2_threeport
"""
return n_oneports_2_nport([s11, s12, s21, s22], *args, **kwargs) | 2f8b365b2ccb06c252337630f6e34b794a3a3eba | 3,651,288 |
def find_xml_command(rvt_version, xml_path):
"""
Finds name index src path and group of Commands in RevitPythonShell.xml configuration.
:param rvt_version: rvt version to find the appropriate RevitPythonShell.xml.
:param xml_path: path where RevitPythonShell.xml resides.
:return: Commands dictionary: {com_name:[index, src_path, group]}
"""
if not xml_path:
xml_path = op.join(op.expanduser("~"),
"AppData\\Roaming\\RevitPythonShell{0}\\RevitPythonShell.xml").format(rvt_version)
xml_tree = ETree.parse(xml_path)
xml_root = xml_tree.getroot()
commands = defaultdict(list)
for child in xml_root:
if child.tag == 'Commands':
com_children = child.getchildren()
for i, com_child in enumerate(com_children):
com_name = com_child.attrib["name"]
commands[com_name].append(i)
commands[com_name].append(com_child.attrib["src"])
commands[com_name].append(com_child.attrib["group"])
return commands | 1effc1b313d93e92b25deef1d62fc65c8f3e6975 | 3,651,289 |
import mimetypes
def put_data_to_s3(data, bucket, key, acl=None):
"""data is bytes not string"""
content_type = mimetypes.guess_type(key)[0]
if content_type is None:
content_type = 'binary/octet-stream'
put_object_args = {'Bucket': bucket, 'Key': key, 'Body': data,
'ContentType': content_type}
if acl:
put_object_args.update({'ACL': acl})
return boto3.client('s3').put_object(**put_object_args) | 042fc8eea230559efdc60ca9f18db2e9d1766286 | 3,651,290 |
from pathlib import Path
def join_analysis_json_path(data_path: Path, analysis_id: str, sample_id: str) -> Path:
"""
Join the path to an analysis JSON file for the given sample-analysis ID combination.
Analysis JSON files are created when the analysis data is too large for a MongoDB document.
:param data_path: the path to the application data
:param analysis_id: the ID of the analysis
:param sample_id: the ID of the sample
:return: a path
"""
return join_analysis_path(data_path, analysis_id, sample_id) / "results.json" | 5ae25e5c0df4801b23a34cdac09db709733844ca | 3,651,291 |
def user_profile(uname=None):
"""
Frontend gets user's profile by user name or modify user profile (to do).
Return user's complete profile and the recommendations for him (brief events).
:param uname: user's name, a string
:return: a json structured as {'user': [(0, 'who', 'password', '[email protected]', 'address', 'Limoges')],
'event': [{'event_id': 1234, 'title': '...', ...},{'event_id': 2345, ...}, ...]}
"""
verify_headers(request.headers)
if request.method == 'GET':
user = user_manager.return_user_data(uname)
if len(user) == 0:
abort(404)
preferred_events_id = rcmd_manager.get_recommendations_for_user(user_manager.return_user_id(uname))
preferred_events = []
for pair in preferred_events_id:
preferred_events.append({'activity': event_manager.get_event_with_nearest(pair[0]),
'score': pair[1]})
return jsonify({'user': user, 'event': preferred_events})
elif request.method == 'POST':
if not request.json:
abort(400)
# to do for user profile modification | b43ab64b0d44e7d19342a90da261bf96489fed3a | 3,651,292 |
def circuit_status(self, handle: ResultHandle) -> CircuitStatus:
"""
Return a CircuitStatus reporting the status of the circuit execution
corresponding to the ResultHandle
"""
if handle in self._cache:
return CircuitStatus(StatusEnum.COMPLETED)
raise CircuitNotRunError(handle) | 7de17e03e3177f7b7c2de31650a0c341ab7e4fa6 | 3,651,293 |
import os
import json
def run_pyfunnel(test_dir):
"""Run pyfunnel compareAndReport function.
The test is run:
* with the parameters, reference and test values from the test directory
passed as argument;
* from current directory (to which output directory path is relative).
Args:
test_dir (str): path of test directory
Returns:
int: exit status of funnel binary
"""
with open(os.path.join(test_dir, 'param.json'), 'r') as f:
par = json.load(f)
ref = pd.read_csv(os.path.join(test_dir, par['reference']))
test = pd.read_csv(os.path.join(test_dir, par['test']))
par['outputDirectory'] = par['output']
for t in ['atolx', 'atoly', 'ltolx', 'ltoly', 'rtolx', 'rtoly']:
try:
par[t]
except KeyError:
par[t] = None
rc = pyfunnel.compareAndReport(
ref.iloc(axis=1)[0],
ref.iloc(axis=1)[1],
test.iloc(axis=1)[0],
test.iloc(axis=1)[1],
**{k: par[k] for k in ['outputDirectory', 'atolx', 'atoly', 'ltolx', 'ltoly', 'rtolx', 'rtoly']}
)
return rc | ef88e40aad9be05dbfdb4f281b9cb1c5b934d49e | 3,651,294 |
def get_portfolio() -> pd.DataFrame:
"""
Get complete user portfolio
Returns:
pd.DataFrame: complete portfolio
"""
portfolio = get_simple_portfolio()
full_portfolio = pd.DataFrame()
for ticket in portfolio.index:
full_portfolio = full_portfolio.append(
_clear_individual_information(get_individual_information(ticket)))
return full_portfolio | c04df5cf88e936cab9bd7f30a63ab3e695efd771 | 3,651,295 |
def findZeros( vec, tol = 0.00001 ):
"""Given a vector of a data, finds all the zeros
returns a Nx2 array of data
each row is a zero, first column is the time of the zero, second column indicates increasing
or decreasing (+1 or -1 respectively)"""
zeros = []
for i in range( vec.size - 1 ):
a = float( vec[ i ] )
b = float( vec[ i + 1] )
increasing = 1
if ( b < a ):
increasing = -1
if ( a * b < 0 ):
t = -a / ( b - a )
zeros.append( ( i + t, increasing ) )
if ( abs( vec[ -1 ] ) < tol ):
if ( vec[-1] > vec[-2] ):
zeros.append( ( vec.size - 1, 1 ) )
else:
zeros.append( ( vec.size - 1, -1 ) )
return np.array( zeros, dtype=np.int ) | 173f734c9b3abf876b48d194e691b517fd0ec816 | 3,651,296 |
from typing import List
def get_index(square_num: int) -> List[int]:
"""
Gets the indices of a square given the square number
:param square_num: An integer representing a square
:return: Returns a union with 2 indices
"""
for i in range(4):
for j in range(4):
if puzzle_state[i][j] == square_num:
return [i, j] | e9896ba58b76ea43069b408a445720f0b418488d | 3,651,297 |
def _ResolveName(item):
"""Apply custom name info if provided by metadata"""
# ----------------------------------------------------------------------
def IsValidName(value):
return bool(value)
# ----------------------------------------------------------------------
if Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME in item.metadata.Values:
metadata_value = item.metadata.Values[Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME]
if not IsValidName(metadata_value.Value):
raise Exceptions.ResolveInvalidCustomNameException(
metadata_value.Source,
metadata_value.Line,
metadata_value.Column,
name=metadata_value.Value,
)
item.name = metadata_value.Value
del item.metadata.Values[Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME] | 0d303cb4577503b4e39f14da699cf77c9adf462f | 3,651,298 |
import re
def extract_errno(errstr):
"""
Given an error response from a proxyfs RPC, extracts the error number
from it, or None if the error isn't in the usual format.
"""
# A proxyfs error response looks like "errno: 18"
m = re.match(PFS_ERRNO_RE, errstr)
if m:
return int(m.group(1)) | adff11595d391a1bb4403c3c93a0bb4ab182254a | 3,651,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.