content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def play_image_stack(array_tiff):
"""
Display tiff image stack as a looping video.
Keyword arguments:
array_tiff -- multidimensional array of tiff images
"""
fig = plt.figure()
images = []
# Loop through each slice of 3D array, animate and store in list
for frame in range(len(array_tiff)):
image = plt.imshow(array_tiff[frame], animated = True)
images.append([image])
# Initialize animation
ani = animation.ArtistAnimation(fig, images, interval=50)
plt.show() | 5,357,200 |
def _ser_batch_ixs(num_samples, batch_size):
"""A generator which yields a list of tuples (offset, size) in serial order.
:param num_samples: Number of available samples.
:param batch_size: The size of the batch to fill.
"""
current_index = 0
batch, batch_count = [], 0
while True:
next_fetch = current_index
next_fetch_size = min(batch_size - batch_count, num_samples - next_fetch)
batch.append((next_fetch, next_fetch_size))
batch_count += next_fetch_size
if batch_count == batch_size:
# If we have enough samples to fill the batch size, yield
# the indices and reset the batch count.
yield batch
batch, batch_count = [], 0
current_index += next_fetch_size
if current_index == num_samples:
current_index = 0 | 5,357,201 |
def cal_sort_key( cal ):
"""
Sort key for the list of calendars: primary calendar first,
then other selected calendars, then unselected calendars.
(" " sorts before "X", and tuples are compared piecewise)
"""
if cal["selected"]:
selected_key = " "
else:
selected_key = "X"
if cal["primary"]:
primary_key = " "
else:
primary_key = "X"
return (primary_key, selected_key, cal["summary"]) | 5,357,202 |
def select_zip_info(sample: bytes) -> tuple:
"""Print a list of items contained within the ZIP file, along with
their last modified times, CRC32 checksums, and file sizes. Return
info on the item selected by the user as a tuple.
"""
t = []
w = 0
z = ZipFile(sample)
for i in z.infolist():
if len(i.filename) > w:
w = len(i.filename)
t.append((i.filename, datetime(*i.date_time), i.CRC, i.file_size))
for i in range(len(t)):
dt = t[i][1].strftime('%Y-%m-%d %H:%M:%S')
crc = t[i][2].to_bytes(4, 'big').hex()
print(f'{i + 1: >2}. {t[i][0]: <{w}} {dt} {crc} {t[i][3]}')
n = input('\nEnter a number corresponding to the desired entry: ')
print()
return t[int(n) - 1] | 5,357,203 |
def create_ionosphere_layers(base_name, fp_id, requested_timestamp):
"""
Create a layers profile.
:param None: determined from :obj:`request.args`
:return: array
:rtype: array
"""
function_str = 'ionoshere_backend.py :: create_ionosphere_layers'
trace = 'none'
fail_msg = 'none'
layers_algorithms = None
layers_added = None
value_conditions = ['<', '>', '==', '!=', '<=', '>=']
conditions = ['<', '>', '==', '!=', '<=', '>=', 'in', 'not in']
if 'd_condition' in request.args:
d_condition = request.args.get('d_condition', '==')
else:
logger.error('no d_condition argument passed')
fail_msg = 'error :: no d_condition argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if not str(d_condition) in conditions:
logger.error('d_condition not a valid conditon - %s' % str(d_condition))
fail_msg = 'error :: d_condition not a valid conditon - %s' % str(d_condition)
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'd_boundary_limit' in request.args:
d_boundary_limit = request.args.get('d_boundary_limit', '0')
else:
logger.error('no d_boundary_limit argument passed')
fail_msg = 'error :: no d_boundary_limit argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
# @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats
# test_d_boundary_limit = int(d_boundary_limit) + 1
test_d_boundary_limit = float(d_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d_boundary_limit is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
# @modified 20160315 - Feature #1972: ionosphere_layers - use D layer boundary for upper limit
# Added d_boundary_times
if 'd_boundary_times' in request.args:
d_boundary_times = request.args.get('d_boundary_times', '1')
else:
logger.error('no d_boundary_times argument passed')
fail_msg = 'error :: no d_boundary_times argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
test_d_boundary_times = int(d_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d_boundary_times is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
# @added 20170616 - Feature #2048: D1 ionosphere layer
if 'd1_condition' in request.args:
d1_condition = request.args.get('d1_condition', 'none')
else:
logger.error('no d1_condition argument passed')
fail_msg = 'error :: no d1_condition argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if str(d1_condition) == 'none':
d1_condition = 'none'
d1_boundary_limit = 0
d1_boundary_times = 0
else:
if not str(d1_condition) in conditions:
logger.error('d1_condition not a valid conditon - %s' % str(d1_condition))
fail_msg = 'error :: d1_condition not a valid conditon - %s' % str(d1_condition)
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'd1_boundary_limit' in request.args:
d1_boundary_limit = request.args.get('d1_boundary_limit', '0')
else:
logger.error('no d1_boundary_limit argument passed')
fail_msg = 'error :: no d1_boundary_limit argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
test_d1_boundary_limit = float(d1_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d1_boundary_limit is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'd1_boundary_times' in request.args:
d1_boundary_times = request.args.get('d1_boundary_times', '1')
else:
logger.error('no d1_boundary_times argument passed')
fail_msg = 'error :: no d1_boundary_times argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
test_d1_boundary_times = int(d1_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d1_boundary_times is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'e_condition' in request.args:
e_condition = request.args.get('e_condition', None)
else:
logger.error('no e_condition argument passed')
fail_msg = 'error :: no e_condition argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if not str(e_condition) in value_conditions:
logger.error('e_condition not a valid value conditon - %s' % str(e_condition))
fail_msg = 'error :: e_condition not a valid value conditon - %s' % str(e_condition)
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'e_boundary_limit' in request.args:
e_boundary_limit = request.args.get('e_boundary_limit')
else:
logger.error('no e_boundary_limit argument passed')
fail_msg = 'error :: no e_boundary_limit argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
# @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats
# test_e_boundary_limit = int(e_boundary_limit) + 1
test_e_boundary_limit = float(e_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: e_boundary_limit is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'e_boundary_times' in request.args:
e_boundary_times = request.args.get('e_boundary_times')
else:
logger.error('no e_boundary_times argument passed')
fail_msg = 'error :: no e_boundary_times argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
test_e_boundary_times = int(e_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: e_boundary_times is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
es_layer = False
if 'es_layer' in request.args:
es_layer_arg = request.args.get('es_layer')
if es_layer_arg == 'true':
es_layer = True
if es_layer:
es_day = None
if 'es_day' in request.args:
es_day = request.args.get('es_day')
else:
logger.error('no es_day argument passed')
fail_msg = 'error :: no es_day argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
f1_layer = False
if 'f1_layer' in request.args:
f1_layer_arg = request.args.get('f1_layer')
if f1_layer_arg == 'true':
f1_layer = True
if f1_layer:
from_time = None
valid_f1_from_time = False
if 'from_time' in request.args:
from_time = request.args.get('from_time')
if from_time:
values_valid = True
if len(from_time) == 4:
for digit in from_time:
try:
int(digit) + 1
except:
values_valid = False
if values_valid:
if int(from_time) < 2400:
valid_f1_from_time = True
if not valid_f1_from_time:
logger.error('no valid f1_layer from_time argument passed - %s' % str(from_time))
fail_msg = 'error :: no valid f1_layer from_time argument passed - %s' % str(from_time)
return False, False, layers_algorithms, layers_added, fail_msg, trace
f2_layer = False
if 'f2_layer' in request.args:
f2_layer_arg = request.args.get('f2_layer')
if f2_layer_arg == 'true':
f2_layer = True
if f2_layer:
until_time = None
valid_f2_until_time = False
if 'until_time' in request.args:
until_time = request.args.get('until_time')
if until_time:
values_valid = True
if len(until_time) == 4:
for digit in until_time:
try:
int(digit) + 1
except:
values_valid = False
if values_valid:
if int(until_time) < 2400:
valid_f2_until_time = True
if not valid_f2_until_time:
logger.error('no valid f2_layer until_time argument passed - %s' % str(until_time))
fail_msg = 'error :: no valid f2_layer until_time argument passed - %s' % str(until_time)
return False, False, layers_algorithms, layers_added, fail_msg, trace
label = False
if 'fp_layer_label' in request.args:
label_arg = request.args.get('fp_layer_label')
label = label_arg[:255]
engine_needed = True
engine = None
if engine_needed:
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('metrics_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get metrics_table meta')
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
metrics_id = 0
try:
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.metric == base_name)
result = connection.execute(stmt)
for row in result:
metrics_id = int(row['id'])
connection.close()
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not determine metric id from metrics table'
if engine:
engine_disposal(engine)
raise
# Create layer profile
ionosphere_layers_table = None
try:
ionosphere_layers_table, fail_msg, trace = ionosphere_layers_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: ionosphere_backend :: failed to get ionosphere_layers_table meta for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
layer_id = 0
try:
connection = engine.connect()
stmt = select([ionosphere_layers_table]).where(ionosphere_layers_table.c.fp_id == fp_id)
result = connection.execute(stmt)
for row in result:
layer_id = int(row['id'])
connection.close()
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not determine id from ionosphere_layers_table'
if engine:
engine_disposal(engine)
raise
if layer_id > 0:
return layer_id, True, None, None, fail_msg, trace
new_layer_id = False
try:
connection = engine.connect()
ins = ionosphere_layers_table.insert().values(
fp_id=fp_id, metric_id=int(metrics_id), enabled=1, label=label)
result = connection.execute(ins)
connection.close()
new_layer_id = result.inserted_primary_key[0]
logger.info('new ionosphere layer_id: %s' % str(new_layer_id))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new record into the ionosphere_layers table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# Create layer profile
layers_algorithms_table = None
try:
layers_algorithms_table, fail_msg, trace = layers_algorithms_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: ionosphere_backend :: failed to get layers_algorithms_table meta for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
new_layer_algorithm_ids = []
layers_added = []
# D layer
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='D', type='value', condition=d_condition,
# @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats
# layer_boundary=int(d_boundary_limit),
layer_boundary=str(d_boundary_limit),
# @modified 20160315 - Feature #1972: ionosphere_layers - use D layer boundary for upper limit
# Added d_boundary_times
times_in_row=int(d_boundary_times))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms D layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('D')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new D layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# E layer
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='E', type='value', condition=e_condition,
# @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats
# layer_boundary=int(e_boundary_limit),
layer_boundary=str(e_boundary_limit),
times_in_row=int(e_boundary_times))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms E layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('E')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new E layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# @added 20170616 - Feature #2048: D1 ionosphere layer
# This must be the third created algorithm layer as in the frontend list
# D is [0], E is [1], so D1 has to be [2]
if d1_condition:
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='D1', type='value', condition=d1_condition,
layer_boundary=str(d1_boundary_limit),
times_in_row=int(d1_boundary_times))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms D1 layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('D1')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new D1 layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# Es layer
if es_layer:
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='Es', type='day', condition='in', layer_boundary=es_day)
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms Es layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('Es')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new Es layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# F1 layer
if f1_layer:
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='F1', type='time', condition='>',
layer_boundary=str(from_time))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms F1 layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('F1')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new F1 layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# F2 layer
if f2_layer:
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='F2', type='time', condition='<',
layer_boundary=str(until_time))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms F2 layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('F2')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new F2 layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
ionosphere_table = None
try:
ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table meta for options'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
logger.info('%s :: ionosphere_table OK' % function_str)
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(layers_id=new_layer_id))
connection.close()
logger.info('updated layers_id for %s' % str(fp_id))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: could not update layers_id for %s ' % str(fp_id)
logger.error(fail_msg)
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal
if engine:
engine_disposal(engine)
raise
if engine:
engine_disposal(engine)
return new_layer_id, True, layers_added, new_layer_algorithm_ids, fail_msg, trace | 5,357,204 |
def unitary_ifft2(y):
"""
A unitary version of the ifft2.
"""
return np.fft.ifft2(y)*np.sqrt(ni*nj) | 5,357,205 |
def orb_scf_input(sdmc):
""" find the scf inputs used to generate sdmc """
myinputs = None # this is the goal
sdep = 'dependencies' # string representation of the dependencies entry
# step 1: find the p2q simulation id
p2q_id = None
for key in sdmc[sdep].keys():
if sdmc[sdep][key].result_names[0] == 'orbitals':
p2q_id = key
# end if
# end for dep
# step 2: find the nscf simulation
nscf_id_list = sdmc[sdep][p2q_id]['sim'][sdep].keys()
assert len(nscf_id_list) == 1
nscf_id = nscf_id_list[0]
nscf = sdmc[sdep][p2q_id]['sim'][sdep][nscf_id]
myinputs = nscf['sim']['input']
# step 3: find the scf simulation
calc = myinputs['control']['calculation']
if (calc=='scf'): # scf may actually be the scf simulation
pass # myinputs is already set
elif (calc=='nscf'): # if nscf is not the scf, then we need to go deeper
scf_id = nscf['sim'][sdep].keys()[0]
scf = nscf['sim'][sdep][scf_id]
myinputs = scf['sim']['input'] # this is it!
scalc = myinputs['control']['calculation']
if scalc != 'scf':
RuntimeError('nscf depends on %s instead of scf'%scalc)
# end if
else:
raise RuntimeError('unknown simulation type %s'%calc)
# end if
return myinputs.to_dict() | 5,357,206 |
def is_success(code):
""" Returns the expected response codes for HTTP GET requests
:param code: HTTP response codes
:type code: int
"""
if (200 <= code < 300) or code in [404, 500]:
return True
return False | 5,357,207 |
def create_hdf5(
bigwig_paths, chrom_sizes_path, out_path, chunk_size, batch_size=100
):
"""
Creates an HDF5 file containing all BigWig tracks.
Arguments:
`bigwig_paths`: a list of pairs of paths, as returned by
`fetch_bigwig_paths`
`chrom_sizes_path`: path to canonical chromosome sizes
`out_path`: where to write the HDF5
`chunk_size`: chunk size to use in HDF5 along the chromosome size
dimension; this is recommended to be the expected size of the
queries made
`batch_size`: number of chunks to write at a time
This creates an HDF5 file, containing a dataset for each chromosome. Each
dataset will be a large array of shape L x 2T x 2, where L is the length of
the chromosome, T is the number of tasks (i.e. T experiment/cell lines, one
for each TF and one for matched control), 2 is for both strands. The HDF5
will also contain a dataset which has the paths to the corresponding source
BigWigs, stored as a 2T x 2 array of paths.
"""
bigwig_readers = [
[pyBigWig.open(path1), pyBigWig.open(path2)]
for path1, path2 in bigwig_paths
]
# Read in chromosome sizes
with open(chrom_sizes_path, "r") as f:
chrom_sizes = {}
for line in f:
tokens = line.strip().split("\t")
chrom_sizes[tokens[0]] = int(tokens[1])
# Convert batch size to be in terms of rows, not number of chunks
batch_size = batch_size * chunk_size
with h5py.File(out_path, "w") as f:
# Store source paths
f.create_dataset("bigwig_paths", data=np.array(bigwig_paths, dtype="S"))
for chrom in sorted(chrom_sizes.keys()):
chrom_size = chrom_sizes[chrom]
num_batches = int(np.ceil(chrom_size / batch_size))
chrom_dset = f.create_dataset(
chrom, (chrom_size, len(bigwig_paths), 2), dtype="f",
compression="gzip", chunks=(chunk_size, len(bigwig_paths), 2)
)
for i in tqdm.trange(num_batches, desc=chrom):
start = i * batch_size
end = min(chrom_size, (i + 1) * batch_size)
values = np.stack([
np.stack([
np.nan_to_num(reader1.values(chrom, start, end)),
np.nan_to_num(reader2.values(chrom, start, end))
], axis=1) for reader1, reader2 in bigwig_readers
], axis=1)
chrom_dset[start : end] = values | 5,357,208 |
async def asyncio(
*,
client: AuthenticatedClient,
json_body: SearchEventIn,
) -> Optional[Union[ErrorResponse, SearchEventOut]]:
"""Search Event
Dado um Trecho, uma lista de Grupos que resultam da pesquisa
por esse Trecho e um price token, atualiza os preços dos Grupos e o token.
Contabiliza visita (se passar na validação).
Args:
json_body (SearchEventIn):
Returns:
Response[Union[ErrorResponse, SearchEventOut]]
"""
return (
await asyncio_detailed(
client=client,
json_body=json_body,
)
).parsed | 5,357,209 |
def get_symmtrafo(newstruct_sub):
"""???
Parameters
----------
newstruct_sub : pymatgen structure
pymatgen structure of the bulk material
Returns
-------
trafo : ???
???
"""
sg = SpacegroupAnalyzer(newstruct_sub)
trr = sg.get_symmetry_dataset()
trafo = []
for index, op in enumerate(trr['rotations']):
if np.linalg.norm(np.array([0,0,-1]) - op[2]) < 0.0000001 and np.linalg.det(op) > 0 :
#print('transformation found' ,op, index, trr['translations'][index])
trafo ={'rot_frac': op.tolist(), 'trans_frac': trr['translations'][index].tolist() }
break
# Now we have the trafo (to be used on fractional coordinates)
if trafo == []:
for index, op in enumerate(trr['rotations']):
if np.linalg.norm(np.array([0,0,-1]) - op[2]) < 0.0000001:
#print('transformation found' ,op, index, trr['translations'][index])
trafo ={'rot_frac': op.tolist(), 'trans_frac': trr['translations'][index].tolist() }
break
return trafo | 5,357,210 |
def convert_sentence_into_byte_sequence(words, tags, space_idx=32, other='O'):
""" Convert a list of words and their tags into a sequence of bytes, and
the corresponding tag of each byte.
"""
byte_list = []
tag_list = []
for word_index, (word, tag) in enumerate(zip(words, tags)):
tag_type = get_tag_type(tag)
if is_inside_tag(tag) and word_index > 0:
byte_list += [space_idx]
tag_list += [tag_type]
elif word_index > 0:
byte_list += [space_idx]
tag_list += [other]
b_seq = bytes(word, encoding='utf-8')
nbytes = len(b_seq)
byte_list += b_seq
tag_list += [tag_type] * nbytes
assert len(byte_list) == len(tag_list)
return byte_list, tag_list | 5,357,211 |
def corr_cov(data, sample, xdata, xlabel='x', plabels=None, interpolation=None,
fname=None):
"""Correlation and covariance matrices.
Compute the covariance regarding YY and XY as well as the correlation
regarding YY.
:param array_like data: function evaluations (n_samples, n_features).
:param array_like sample: sample (n_samples, n_featrues).
:param array_like xdata: 1D discretization of the function (n_features,).
:param str xlabel: label of the discretization parameter.
:param list(str) plabels: parameters' labels.
:param str interpolation: If None, does not interpolate correlation and
covariance matrices (YY). Otherwize use Matplotlib methods from
`imshow` such as `['bilinear', 'lanczos', 'spline16', 'hermite', ...]`.
:param str fname: whether to export to filename or display the figures.
:returns: figure.
:rtype: Matplotlib figure instances, Matplotlib AxesSubplot instances.
"""
p_len = np.asarray(sample).shape[1]
data = ot.Sample(data)
corr_yy = np.array(data.computePearsonCorrelation())
cov_yy = np.array(data.computeCovariance())
cov_matrix_xy = np.dot((np.mean(sample) - sample).T,
np.mean(data, axis=0) - data) / (len(sample) - 1)
x_2d_yy, y_2d_yy = np.meshgrid(xdata, xdata)
x_2d_xy, y_2d_xy = np.meshgrid(xdata, np.arange(p_len))
c_map = cm.viridis
figures, axs = [], []
# Covariance matrix YY
fig, ax = plt.subplots()
figures.append(fig)
axs.append(ax)
cax = ax.imshow(cov_yy, cmap=c_map, interpolation=interpolation, origin='lower')
cbar = fig.colorbar(cax)
cbar.set_label(r"Covariance", size=26)
cbar.ax.tick_params(labelsize=23)
ax.set_xlabel(xlabel, fontsize=26)
ax.set_ylabel(xlabel, fontsize=26)
ax.tick_params(axis='x', labelsize=23)
ax.tick_params(axis='y', labelsize=23)
# Correlation matrix YY
fig, ax = plt.subplots()
figures.append(fig)
cax = ax.imshow(corr_yy, cmap=c_map, interpolation=interpolation, origin='lower')
cbar = fig.colorbar(cax)
cbar.set_label(r"Correlation", size=26)
cbar.ax.tick_params(labelsize=23)
ax.set_xlabel(xlabel, fontsize=26)
ax.set_ylabel(xlabel, fontsize=26)
ax.tick_params(axis='x', labelsize=23)
ax.tick_params(axis='y', labelsize=23)
if plabels is None:
plabels = ['x' + str(i) for i in range(p_len + 1)]
else:
plabels.insert(0, 0)
# Covariance matrix XY
fig, ax = plt.subplots()
figures.append(fig)
axs.append(ax)
cax = ax.imshow(cov_matrix_xy, cmap=c_map, interpolation='nearest')
ax.set_yticklabels(plabels, fontsize=6)
cbar = fig.colorbar(cax)
cbar.set_label(r"Covariance", size=26)
cbar.ax.tick_params(labelsize=23)
ax.set_xlabel(xlabel, fontsize=26)
ax.set_ylabel('Input parameters', fontsize=26)
ax.tick_params(axis='x', labelsize=23)
ax.tick_params(axis='y', labelsize=23)
if fname is not None:
io = formater('json')
filename, _ = os.path.splitext(fname)
data = np.append(x_2d_yy, [y_2d_yy, corr_yy, cov_yy])
names = ['x', 'y', 'Correlation-YY', 'Covariance']
sizes = [np.size(x_2d_yy), np.size(y_2d_yy), np.size(corr_yy), np.size(cov_yy)]
io.write(filename + '-correlation_covariance.json', data, names, sizes)
data = np.append(x_2d_xy, [y_2d_xy, cov_matrix_xy])
names = ['x', 'y', 'Correlation-XY']
sizes = [np.size(x_2d_xy), np.size(y_2d_xy), np.size(cov_matrix_xy)]
io.write(filename + '-correlation_XY.json', data, names, sizes)
bat.visualization.save_show(fname, figures)
return figures, axs | 5,357,212 |
def get_number_rows(ai_settings, ship_height, alien_height):
"""Determina o numero de linhas com alienigenas que cabem na tela."""
available_space_y = (ai_settings.screen_height -
(3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows | 5,357,213 |
def generate_trial_betas(bc, bh, bcrange, bhrange, step_multiplier, random_state_debug_value=None):
"""Generate trial beta values for an MC move. Move sizes are scaled by the 'step_multiplier' argument,
and individually by the 'bcrange' or 'bhrange' arguments for the beta_c and beta_h values respectively.
Negative beta values are not allowed; moves resulting in negative values will be resampled.
Requires current values of beta_c and beta_h, bcrange, bhrange, and step_multiplier.
Usage: generate_trial_betas(bc, bh, bcrange, bhrange, step_multiplier)
Returns: trial_bc, trial_bh
"""
# Data cleanup, just in case
try:
assert bc >= 0
except AssertionError:
print("Warning: Negative value of beta_C detected in MC sampling. Resetting to 0")
bc = 0
try:
assert bh >= 0
except AssertionError:
print("Warning: Negative value of beta_H detected in MC sampling. Resetting to 0")
bh = 0
# Make move in betas scaled by step size and desired 'range' of sampling. -ve beta values are not allowed
trial_bv_bh, trial_bv_bc = -1, -1
# Note that this regenerates the numpy random state from /dev/urandom
# or the clock if random_state_debug_value is not set
state = np.random.RandomState(random_state_debug_value)
while trial_bv_bh < 0:
trial_bv_bh = bh + ((state.random_sample()) - 0.5) \
* step_multiplier * bhrange
while trial_bv_bc < 0:
trial_bv_bc = bc + ((state.random_sample()) - 0.5) \
* step_multiplier * bcrange
return trial_bv_bc, trial_bv_bh | 5,357,214 |
def map_ref_sites(routed: xr.Dataset, gauge_reference: xr.Dataset,
gauge_sites=None, route_var='IRFroutedRunoff',
fill_method='r2', min_kge=-0.41):
"""
Assigns segs within routed boolean 'is_gauge' "identifiers" and
what each seg's upstream and downstream reference seg designations are.
Parameters
----------
routed: xr.Dataset
Contains the input flow timeseries data.
gauge_reference: xr.Dataset
Contains reference flow timeseries data for the same watershed
as the routed dataset.
gauge_sites: list, optional
If None, gauge_sites will be taken as all those listed in
gauge_reference.
route_var: str
Variable name of flows used for fill_method purposes within routed.
This is defaulted as 'IRFroutedRunoff'.
fill_method: str
While finding some upstream/downstream reference segs may be simple,
(segs with 'is_gauge' = True are their own reference segs, others
may be easy to find looking directly up or downstream), some river
networks may have multiple options to select gauge sites and may fail
to have upstream/downstream reference segs designated. 'fill_method'
specifies how segs should be assigned upstream/downstream reference
segs for bias correction if they are missed walking upstream or downstream.
Currently supported methods:
'leave_null'
nothing is done to fill missing reference segs, np.nan values are
replaced with a -1 seg designation and that's it
'forward_fill'
xarray's ffill method is used to fill in any np.nan values
'r2'
reference segs are selected based on which reference site that
seg's flows has the greatest r2 value with
'kldiv'
reference segs are selected based on which reference site that
seg's flows has the smallest KL Divergence value with
'kge'
reference segs are selected based on which reference site that
seg's flows has the greatest KGE value with
Returns
-------
routed: xr.Dataset
Routed timeseries with reference gauge site river segments assigned to
each river segement in the original routed.
"""
if isinstance(gauge_sites, type(None)):
gauge_sites = gauge_reference['site'].values
else:
# need to typecheck since we do a for loop later and don't
# want to end up iterating through a string by accident
assert isinstance(gauge_sites, list)
gauge_segs = gauge_reference.sel(site=gauge_sites)['seg'].values
routed['is_gauge'] = False * routed['seg']
routed['down_ref_seg'] = np.nan * routed['seg']
routed['up_ref_seg'] = np.nan * routed['seg']
routed['up_seg'] = 0 * routed['is_headwaters']
routed['up_seg'].values = [find_up(routed, s, sel_method=fill_method) for s in routed['seg'].values]
for s in routed['seg']:
if s in list(gauge_segs):
routed['is_gauge'].loc[{'seg':s}] = True
routed['down_ref_seg'].loc[{'seg': s}] = s
routed['up_ref_seg'].loc[{'seg': s}] = s
for seg in routed['seg']:
cur_seg = seg.values[()]
while cur_seg in routed['seg'].values and np.isnan(routed['down_ref_seg'].sel(seg=cur_seg)):
cur_seg = routed['down_seg'].sel(seg=cur_seg).values[()]
if cur_seg in routed['seg'].values:
routed['down_ref_seg'].loc[{'seg':seg}] = routed['down_ref_seg'].sel(seg=cur_seg).values[()]
for seg in routed['seg']:
cur_seg = seg.values[()]
while cur_seg in routed['seg'].values and np.isnan(routed['up_ref_seg'].sel(seg=cur_seg)):
cur_seg = routed['up_seg'].sel(seg=cur_seg).values[()]
if cur_seg in routed['seg'].values:
routed['up_ref_seg'].loc[{'seg':seg}] = routed['up_ref_seg'].sel(seg=cur_seg).values[()]
# Fill in any remaining nulls (head/tailwaters)
if fill_method == 'leave_null':
# since there should be no -1 segs from mizuroute, we can set nan's to -1 to acknowledge
# that they have been addressed and still set them apart from the rest of the data
routed['up_ref_seg'] = (routed['up_ref_seg'].where(~np.isnan(routed['up_ref_seg']), other=-1))
routed['down_ref_seg'] = (routed['down_ref_seg'].where(~np.isnan(routed['down_ref_seg']), other=-1))
elif fill_method == 'forward_fill':
routed['up_ref_seg'] = (routed['up_ref_seg'].where(
~np.isnan(routed['up_ref_seg']), other=routed['down_ref_seg'])).ffill('seg')
routed['down_ref_seg'] = (routed['down_ref_seg'].where(
~np.isnan(routed['down_ref_seg']), other=routed['up_ref_seg'])).ffill('seg')
elif fill_method == 'r2':
fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0]
fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0]
routed['r2_up_gauge'] = 0 * routed['is_gauge']
routed['r2_down_gauge'] = 0 * routed['is_gauge']
for curr_seg in routed['seg'].values:
up_ref_seg = np.nan
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values):
up_ref_r2, up_ref_seg = find_max_r2(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['r2_up_gauge'].loc[{'seg':curr_seg}] = up_ref_r2
routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg
else:
# this seg has already been filled in, but r2 still needs to be calculated
ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg)).values
up_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2
routed['r2_up_gauge'].loc[{'seg':curr_seg}] = up_ref_r2
for curr_seg in routed['seg'].values:
down_ref_seg = np.nan
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values):
down_ref_r2, down_ref_seg = find_max_r2(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['r2_down_gauge'].loc[{'seg':curr_seg}] = down_ref_r2
routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg
else:
# this seg has already been filled in, but r2 still needs to be calculated
ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg)).values
down_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2
routed['r2_down_gauge'].loc[{'seg':curr_seg}] = down_ref_r2
elif fill_method == 'kldiv':
fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0]
fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0]
routed['kldiv_up_gauge'] = 0 * routed['is_gauge']
routed['kldiv_down_gauge'] = 0 * routed['is_gauge']
for curr_seg in routed['seg'].values:
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values):
up_ref_kldiv, up_ref_seg = find_min_kldiv(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kldiv_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kldiv
routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg
else:
# this seg has already been filled in, but kldiv still needs to be calculated
# kldiv computation could probably be gutted in the furture ...
TINY_VAL = 1e-6
total_bins = int(np.sqrt(len(curr_seg_flow)))
curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram(
curr_seg_flow, bins=total_bins, density=True)
curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL
ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg).values).values
ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0]
ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL
up_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf)
routed['kldiv_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kldiv
for curr_seg in routed['seg'].values:
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values):
down_ref_kldiv, down_ref_seg = find_min_kldiv(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kldiv_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kldiv
routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg
else:
# this seg has already been filled in, but kldiv still needs to be calculated
# kldiv computation could probably be gutted in the furture ...
TINY_VAL = 1e-6
total_bins = int(np.sqrt(len(curr_seg_flow)))
curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram(
curr_seg_flow, bins=total_bins, density=True)
curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL
ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg).values).values
ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0]
ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL
down_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf)
routed['kldiv_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kldiv
elif fill_method == 'kge':
fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0]
fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0]
routed['kge_up_gauge'] = min_kge + 0.0 * routed['is_gauge']
routed['kge_down_gauge'] = min_kge + 0.0 * routed['is_gauge']
for curr_seg in routed['seg'].values:
up_ref_seg = np.nan
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values):
up_ref_kge, up_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kge_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kge
routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg
else:
# this seg has already been filled in, but kge still needs to be calculated
ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg)).values
up_ref_kge = kling_gupta_efficiency(curr_seg_flow, ref_flow)
routed['kge_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kge
for curr_seg in routed['seg'].values:
down_ref_seg = np.nan
curr_seg_flow = routed[route_var].sel(seg=curr_seg).values
if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values):
down_ref_kge, down_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kge_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kge
routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg
else:
# this seg has already been filled in, but kge still needs to be calculated
ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg)).values
down_ref_kge = kling_gupta_efficiency(curr_seg_flow, ref_flow)
if down_ref_kge < min_kge:
down_ref_kge, down_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow)
routed['kge_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kge
else:
raise ValueError('Invalid method provided for "fill_method"')
return routed | 5,357,215 |
def get_error(est_track, true_track):
"""
"""
if est_track.ndim > 1:
true_track = true_track.reshape((true_track.shape[0],1))
error = np.recarray(shape=est_track.shape,
dtype=[('position', float),
('orientation', float),
('orientation_weighted', float)])
# Position error
pos_err = (true_track.x - est_track.x)**2 + (true_track.y - est_track.y)**2
error.position = np.sqrt(pos_err)
# Orientation error
error.orientation = anglediff(true_track.angle, est_track.angle, units='deg')
error.orientation_weighted = anglediff(true_track.angle, est_track.angle_w, units='deg')
descr = {}
bix = np.logical_not(np.isnan(error.orientation))
descr['orientation_median'] = np.median(np.abs(error.orientation[bix]))
descr['orientation_mean'] = np.mean(np.abs(error.orientation[bix]))
bix = np.logical_not(np.isnan(error.orientation_weighted))
descr['orientation_weighted_median'] = np.nanmedian(np.abs(error.orientation_weighted[bix]))
descr['orientation_weighted_mean'] = np.nanmean(np.abs(error.orientation_weighted[bix]))
# no angle
true_no_angle = np.isnan(true_track.angle)
est_no_angle = np.isnan(est_track.angle)
agree = np.logical_and(true_no_angle, est_no_angle)
disagree = np.logical_xor(true_no_angle, est_no_angle)
both = np.logical_or(true_no_angle, est_no_angle)
#ipdb.set_trace()
descr['no_angle_auc'] = roc_auc_score(true_no_angle, est_no_angle)
descr['no_angle_mcc'] = matthews_corrcoef(true_no_angle, est_no_angle)
descr['no_angle_brier'] = brier_score_loss(true_no_angle, est_no_angle)
descr['no_angle_acc'] = agree.sum()/both.sum()
descr['no_angle_p_per_frame'] = disagree.sum()/disagree.shape[0]
descr['position_median'] = np.median(error.position)
descr['position_mean'] = np.mean(error.position)
#print('True frequency of angle-does-not-apply:',
# true_no_angle.sum()/true_no_angle.shape[0])
#print('Estimated frequency of angle-does-not-apply:',
# est_no_angle.sum()/est_no_angle.shape[0])
return error, descr | 5,357,216 |
def test_conflict():
"""
Tiles that have extras that conflict with indices should produce an error.
"""
def tile_extras_provider(hyb: int, ch: int, z: int) -> Any:
return {
Indices.HYB: hyb,
Indices.CH: ch,
Indices.Z: z,
}
stack = synthetic_stack(
tile_extras_provider=tile_extras_provider,
)
with pytest.raises(ValueError):
stack.tile_metadata | 5,357,217 |
def get_node_to_srn_mapping(match_config_filename):
"""
Returns the node-to-srn map from match_conf.json
"""
with open(match_config_filename) as config_file:
config_json = json.loads(config_file.read())
if "node_to_srn_mapping" in config_json:
return config_json["node_to_srn_mapping"]
else:
node_to_srn = {}
for node_info in config_json["NodeData"]:
node_id = node_info["TrafficNode"]
srn_num = node_info["srn_number"]
node_to_srn[node_id] = srn_num
return node_to_srn | 5,357,218 |
def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None:
"""Update the cache file."""
cache_file = get_cache_file(mode)
try:
CACHE_DIR.mkdir(parents=True, exist_ok=True)
new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
pickle.dump(new_cache, f, protocol=4)
os.replace(f.name, cache_file)
except OSError:
pass | 5,357,219 |
def interpolate(values, color_map=None, dtype=np.uint8):
"""
Given a 1D list of values, return interpolated colors
for the range.
Parameters
---------------
values : (n, ) float
Values to be interpolated over
color_map : None, or str
Key to a colormap contained in:
matplotlib.pyplot.colormaps()
e.g: 'viridis'
Returns
-------------
interpolated : (n, 4) dtype
Interpolated RGBA colors
"""
# get a color interpolation function
if color_map is None:
cmap = linear_color_map
else:
from matplotlib.pyplot import get_cmap
cmap = get_cmap(color_map)
# make input always float
values = np.asanyarray(values, dtype=np.float64).ravel()
# scale values to 0.0 - 1.0 and get colors
colors = cmap((values - values.min()) / values.ptp())
# convert to 0-255 RGBA
rgba = to_rgba(colors, dtype=dtype)
return rgba | 5,357,220 |
def nest_dictionary(flat_dict, separator):
""" Nests a given flat dictionary.
Nested keys are created by splitting given keys around the `separator`.
"""
nested_dict = {}
for key, val in flat_dict.items():
split_key = key.split(separator)
act_dict = nested_dict
final_key = split_key.pop()
for new_key in split_key:
if not new_key in act_dict:
act_dict[new_key] = {}
act_dict = act_dict[new_key]
act_dict[final_key] = val
return nested_dict | 5,357,221 |
def test_normal_df(test_dataframe, expected_test_return):
"""
Test.
"""
pd.testing.assert_frame_equal(sample_row(test_dataframe), expected_test_return) | 5,357,222 |
def test_classifier_gait_example():
"""Verify that the gait classifier can be packaged and used."""
perform_capsule_tests(
Path("vcap", "examples", "classifier_gait_example"),
ALL_IMAGE_PATHS) | 5,357,223 |
def write(path_, *write_):
"""Overwrites file with passed data. Data can be a string, number or boolean type. Returns True, None if writing operation was successful, False and reason message otherwise."""
return _writeOrAppend(False, path_, *write_) | 5,357,224 |
def annotate(wsdjar: Binary = Binary("[wsd.jar]"),
sense_model: Model = Model("[wsd.sense_model]"),
context_model: Model = Model("[wsd.context_model]"),
out: Output = Output("<token>:wsd.sense", cls="token:sense",
description="Sense disambiguated SALDO identifiers"),
sentence: Annotation = Annotation("<sentence>"),
word: Annotation = Annotation("<token:word>"),
ref: Annotation = Annotation("<token>:misc.number_rel_<sentence>"),
lemgram: Annotation = Annotation("<token>:saldo.lemgram"),
saldo: Annotation = Annotation("<token>:saldo.sense"),
pos: Annotation = Annotation("<token:pos>"),
token: Annotation = Annotation("<token>"),
prob_format: str = Config("wsd.prob_format"),
default_prob: float = Config("wsd.default_prob"),
encoding: str = util.UTF8):
"""Run the word sense disambiguation tool (saldowsd.jar) to add probabilities to the saldo annotation.
Unanalyzed senses (e.g. multiword expressions) receive the probability value given by default_prob.
- wsdjar is the name of the java programme to be used for the wsd
- sense_model and context_model are the models to be used with wsdjar
- out is the resulting annotation file
- sentence is an existing annotation for sentences and their children (words)
- word is an existing annotations for wordforms
- ref is an existing annotation for word references
- lemgram and saldo are existing annotations for inflection tables and meanings
- pos is an existing annotations for part-of-speech
- prob_format is a format string for how to print the sense probability
- default_prob is the default value for unanalyzed senses
"""
word_annotation = list(word.read())
ref_annotation = list(ref.read())
lemgram_annotation = list(lemgram.read())
saldo_annotation = list(saldo.read())
pos_annotation = list(pos.read())
sentences, orphans = sentence.get_children(token)
sentences.append(orphans)
# Start WSD process
process = wsd_start(wsdjar, sense_model.path, context_model.path, encoding)
# Construct input and send to WSD
stdin = build_input(sentences, word_annotation, ref_annotation, lemgram_annotation, saldo_annotation,
pos_annotation)
if encoding:
stdin = stdin.encode(encoding)
stdout, stderr = process.communicate(stdin)
# TODO: Solve hack line below!
# Problem is that regular messages "Reading sense vectors.." are also piped to stderr.
if len(stderr) > 52:
util.system.kill_process(process)
log.error(str(stderr))
return
if encoding:
stdout = stdout.decode(encoding)
process_output(word, out, stdout, sentences, saldo_annotation, prob_format, default_prob)
# Kill running subprocess
util.system.kill_process(process)
return | 5,357,225 |
def get_all(url):
"""A wrapper for `get_many()`: a generator getting and iterating through all results"""
data = get_many(url, limit=50)
yield from data['data']
while 'paging' in data and 'next' in data['paging']:
data = get_many(data['paging']['next'])
yield from data['data'] | 5,357,226 |
def prepare_saab_data(sequence):
"""
Processing data after anarci parsing.
Preparing data for SAAB+
------------
Parameters
sequence - sequence object ( OAS database format )
------------
Return
sequence.Sequence - full (not-numbered) antibody sequence
oas_output_parser(Numbered) - antibody sequence that is imgt numbered
to comply with SAAB+ input format
sequence_info_dict - Dictionary that contains sequence metadata
which is requeired for SAAB+ to run
"""
cdr3sequence = sequence.CDRH3
VGene = sequence.VGene[:5]
Numbered = json.loads( sequence.Numbered )
CDRs = [ loop for loop in Numbered.keys() if "cdr" in loop ]
sequence_info_dict = { formatLoops[loop] : Numbered[loop] if "3" not in loop else cdr3sequence
for loop in CDRs }
sequence_info_dict["V"] = VGene
sequence_info_dict["Redundancy"] = find_redundancy( sequence.Redundancy )
return sequence_obj( sequence.Sequence, oas_output_parser(Numbered), sequence_info_dict ) | 5,357,227 |
def covariance_align(data):
"""Covariance align continuous or windowed data in-place.
Parameters
----------
data: np.ndarray (n_channels, n_times) or (n_windows, n_channels, n_times)
continuous or windowed signal
Returns
-------
aligned: np.ndarray (n_channels x n_times) or (n_windows x n_channels x
n_times)
aligned continuous or windowed data
..note:
If this function is supposed to preprocess continuous data, it should be
given to raw.apply_function().
"""
aligned = data.copy()
if len(data.shape)==3:
for i_window in range(aligned.shape[0]):
covar = np.cov(aligned[i_window])
proj = pinv(sqrtm(covar))
aligned[i_window] = np.matmul(proj, aligned[i_window])
elif len(data.shape)==2:
covar = np.cov(aligned)
proj = pinv(sqrtm(covar))
aligned = np.matmul(proj, aligned)
# TODO: the overriding of protected '_data' should be implemented in the
# TODO: dataset when transforms are applied to windows
if hasattr(data, '_data'):
data._data = aligned
return aligned | 5,357,228 |
def test_scope_dunder_methods(sample_scope_object):
"""Tests dunder methods for a scope object."""
assert repr(sample_scope_object) == "Scope(arg='kw', base='select')"
assert str(sample_scope_object) == "Scope(arg='kw', base='select', included=False)"
assert not bool(sample_scope_object) | 5,357,229 |
def test_theme_eq():
"""Test Theme.__eq__"""
theme1 = Theme(
name="test", description="Test theme", styles={"test": Style(color="red")}
)
theme2 = Theme(
name="test", description="Test theme", styles={"test": Style(color="red")}
)
assert theme1 == theme2
theme3 = Theme(
name="test", description="Test theme", styles={"test": Style(color="blue")}
)
assert theme1 != theme3
assert theme1 != "foo" | 5,357,230 |
def _is_equidistant(array: np.ndarray) -> bool:
"""
Check if the given 1D array is equidistant. E.g. the
distance between all elements of the array should be equal.
:param array: The array that should be equidistant
"""
step = abs(array[1] - array[0])
for i in range(0, len(array) - 1):
curr_step = abs(array[i + 1] - array[i])
if not math.isclose(curr_step, step, rel_tol=1e-3):
return False
return True | 5,357,231 |
def list_a_minus_b(list1, list2):
"""Given two lists, A and B, returns A-B."""
return filter(lambda x: x not in list2, list1) | 5,357,232 |
def is_equivalent(a, b):
"""Compares two strings and returns whether they are the same R code
This is unable to determine if a and b are different code, however. If this returns True you may assume that they
are the same, but if this returns False you must not assume that they are different.
is_equivalent("0 + 1", "1") is False, for example, even though those two commands do the same thing.
"""
# String pointers
ap = 0
bp = 0
ps = 0
an_comp = False
while ap < len(a) and bp < len(b):
# If none of the current chars are alphanumeric or the last character match is not alphanumeric then skip
# whitespace forward
if (a[ap] not in _an and b[bp] not in _an) or not an_comp:
while ap < len(a) and a[ap] in _ws and not _is_a_number(a, ap):
ap += 1
while bp < len(b) and b[bp] in _ws and not _is_a_number(b, bp):
bp += 1
if ap >= len(a) or bp >= len(b):
# Reached end of string
break
an_comp = False
if a[ap] != b[bp]:
# They must be equal
# print("Failed {}:{} / {}:{}".format(a, ap, b, bp))
return False
if a[ap] in _an:
# This is comparing two alphanumeric values
an_comp = True
if a[ap] in _quotes:
opener = a[ap]
# String; must match exactly
ap += 1
bp += 1
while ap < len(a) and bp < len(b) and a[ap] == b[bp]:
if a[ap] == opener and a[ap-1] not in _esc:
break
ap += 1
bp += 1
else:
# print("Failed {}:{} / {}:{} in string".format(a, ap, b, bp))
return False
ap += 1
bp += 1
# Clean up ending whitespace
while ap < len(a) and a[ap] in _ws:
ap += 1
while bp < len(b) and b[bp] in _ws:
bp += 1
if ap >= len(a) and bp >= len(b):
return True
else:
return False | 5,357,233 |
def solve(lines, n):
"""Solve the problem."""
grid = Grid(lines)
for _ in range(n):
grid.step()
return grid.new_infections | 5,357,234 |
def get_ingredient_id():
"""Need to get ingredient ID in order to access all attributes"""
query = request.args["text"]
resp = requests.get(f"{BASE_URL_SP}/food/ingredients/search?", params={"apiKey":APP_KEY,"query":query})
res = resp.json()
lst = {res['results'][i]["name"]:res['results'][i]["id"] for i in range(len(res['results']))}
return jsonify(lst) | 5,357,235 |
def xticks(ticks=None, labels=None, **kwargs):
"""
Get or set the current tick locations and labels of the x-axis.
Call signatures::
locs, labels = xticks() # Get locations and labels
xticks(ticks, [labels], **kwargs) # Set locations and labels
Parameters
----------
ticks : array_like
A list of positions at which ticks should be placed. You can pass an
empty list to disable xticks.
labels : array_like, optional
A list of explicit labels to place at the given *locs*.
**kwargs
:class:`.Text` properties can be used to control the appearance of
the labels.
Returns
-------
locs
An array of label locations.
labels
A list of `.Text` objects.
Notes
-----
Calling this function with no arguments (e.g. ``xticks()``) is the pyplot
equivalent of calling `~.Axes.get_xticks` and `~.Axes.get_xticklabels` on
the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_xticks` and `~.Axes.set_xticklabels` on the current axes.
Examples
--------
Get the current locations and labels:
>>> locs, labels = xticks()
Set label locations:
>>> xticks(np.arange(0, 1, step=0.2))
Set text labels:
>>> xticks(np.arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue'))
Set text labels and properties:
>>> xticks(np.arange(12), calendar.month_name[1:13], rotation=20)
Disable xticks:
>>> xticks([])
"""
ax = gca()
if ticks is None and labels is None:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif labels is None:
locs = ax.set_xticks(ticks)
labels = ax.get_xticklabels()
else:
locs = ax.set_xticks(ticks)
labels = ax.set_xticklabels(labels, **kwargs)
for l in labels:
l.update(kwargs)
return locs, silent_list('Text xticklabel', labels) | 5,357,236 |
def main() -> None:
"""
Program entry point.
:return: Nothing
"""
try:
connection = connect_to_db2()
kwargs = {'year_to_schedule': 2018}
start = timer()
result = run(connection, **kwargs)
output_results(result, connection)
end = timer()
print(f'time elapsed: {end - start}')
connection.close()
except Exception as e:
print(f'Something broke ...\n\tReason:{str(e)}')
connection.close()
exit(1)
return None | 5,357,237 |
def _generate_cfg_dir(cfg_dir: Path = None) -> Path:
"""Make sure there is a working directory.
Args:
cfg_dir: If cfg dir is None or does not exist then create sub-directory
in CFG['output_dir']
"""
if cfg_dir is None:
scratch_dir = CFG["output_dir"]
# TODO this timestamp isnot safe for parallel processing
timestamp = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%d_%H%M%S"
)
cfg_dir = to_absolute_path(f"marrmot_{timestamp}", parent=Path(scratch_dir))
cfg_dir.mkdir(parents=True, exist_ok=True)
return cfg_dir | 5,357,238 |
def p_identifier_list_singleton(p: yacc.YaccProduction) -> yacc.YaccProduction:
"""
identifier_list : IDENTIFIER
"""
p[0] = [p[1]] | 5,357,239 |
def test_get_novelty_mask():
"""Test `get_novelty_mask()`."""
num_triples = 7
base = torch.arange(num_triples)
mapped_triples = torch.stack([base, base, 3 * base], dim=-1)
query_ids = torch.randperm(num_triples).numpy()[:num_triples // 2]
exp_novel = query_ids != 0
col = 2
other_col_ids = numpy.asarray([0, 0])
mask = get_novelty_mask(
mapped_triples=mapped_triples,
query_ids=query_ids,
col=col,
other_col_ids=other_col_ids,
)
assert mask.shape == query_ids.shape
assert (mask == exp_novel).all() | 5,357,240 |
def test_md041_good_heading_top_level_setext():
"""
Test to make sure this rule does not trigger with a document that
contains a good top level setext heading.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md041/good_heading_top_level_setext.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(
arguments=supplied_arguments, suppress_first_line_heading_rule=False
)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
) | 5,357,241 |
def delete(home_id):
"""
Delete A About
---
"""
try:
return custom_response({"message":"deleted", "id":home_id}, 200)
except Exception as error:
return custom_response(str(error), 500) | 5,357,242 |
def round_to_sigfigs(x, sigfigs=1):
"""
>>> round_to_sigfigs(12345.6789, 7) # doctest: +ELLIPSIS
12345.68
>>> round_to_sigfigs(12345.6789, 1) # doctest: +ELLIPSIS
10000.0
>>> round_to_sigfigs(12345.6789, 0) # doctest: +ELLIPSIS
100000.0
>>> round_to_sigfigs(12345.6789, -1) # doctest: +ELLIPSIS
1000000.0
"""
place = int(log(x, 10))
if sigfigs <= 0:
additional_place = x > 10. ** place
return 10. ** (-sigfigs + place + additional_place)
return round_to_place(x, sigfigs - 1 - place) | 5,357,243 |
def filter_netcdf(filename1, filename2, first=0, last=None, step=1):
"""Filter data file, selecting timesteps first:step:last.
Read netcdf filename1, pick timesteps first:step:last and save to
nettcdf file filename2
"""
from Scientific.IO.NetCDF import NetCDFFile
# Get NetCDF
infile = NetCDFFile(filename1, netcdf_mode_r) #Open existing file for read
outfile = NetCDFFile(filename2, netcdf_mode_w) #Open new file
# Copy dimensions
for d in infile.dimensions:
outfile.createDimension(d, infile.dimensions[d])
# Copy variable definitions
for name in infile.variables:
var = infile.variables[name]
outfile.createVariable(name, var.dtype.char, var.dimensions)
# Copy the static variables
for name in infile.variables:
if name == 'time' or name == 'stage':
pass
else:
outfile.variables[name][:] = infile.variables[name][:]
# Copy selected timesteps
time = infile.variables['time']
stage = infile.variables['stage']
newtime = outfile.variables['time']
newstage = outfile.variables['stage']
if last is None:
last = len(time)
selection = list(range(first, last, step))
for i, j in enumerate(selection):
log.critical('Copying timestep %d of %d (%f)'
% (j, last-first, time[j]))
newtime[i] = time[j]
newstage[i,:] = stage[j,:]
# Close
infile.close()
outfile.close() | 5,357,244 |
def load_api_data (API_URL):
"""
Download data from API_URL
return: json
"""
#actual download
with urllib.request.urlopen(API_URL) as url:
api_data = json.loads(url.read().decode())
#testing data
##with open('nrw.json', 'r') as testing_set:
## api_data = json.load(testing_set)
return api_data | 5,357,245 |
def test_parsing(monkeypatch, capfd, configuration, expected_record_keys):
"""Verifies the feed is parsed as expected"""
def mock_get(*args, **kwargs):
return MockResponse()
test_tap: Tap = TapFeed(config=configuration)
monkeypatch.setattr(test_tap.streams["feed"]._requests_session, "send", mock_get)
test_tap.sync_all()
out, err = capfd.readouterr()
tap_records = get_parsed_records(out)
assert len(tap_records) == 10
for record in tap_records:
print(record)
assert record["type"] == "RECORD"
assert record["stream"] == "feed"
assert record["record"]["feed_url"] == MockResponse.url
assert list(record["record"].keys()) == expected_record_keys | 5,357,246 |
def hsl(h, s, l):
"""Converts an Hsl(h, s, l) triplet into a color."""
return Color.from_hsl(h, s, l) | 5,357,247 |
def factor(afunc):
"""decompose the string m.f or m.f(parms) and return function and parameter dictionaries
afunc has the form xxx or xxx(p1=value, p2=value,...)
create a dictionary from the parameters consisting of at least _first:True.
parameter must have the form name=value, name=value,...
"""
firstparen = afunc.find("(")
if firstparen >0: # parameters found, make a dictionary of them
try:
f = afunc[:firstparen]
afunc = "_customfunction" + afunc[firstparen:]
co = compile(afunc, "<string>", "eval")
spssparams = set(co.co_names)
except :
raise ValueError(_("The formula syntax given is invalid:\n") + str(sys.exc_info()[1]))
else:
spssparams = set()
f = afunc
co = compile("_customfunction()", "<string>", "eval")
return f, co, spssparams | 5,357,248 |
def spline(xyz, s=3, k=2, nest=-1):
""" Generate B-splines as documented in
http://www.scipy.org/Cookbook/Interpolation
The scipy.interpolate packages wraps the netlib FITPACK routines
(Dierckx) for calculating smoothing splines for various kinds of
data and geometries. Although the data is evenly spaced in this
example, it need not be so to use this routine.
Parameters
---------------
xyz : array, shape (N,3)
array representing x,y,z of N points in 3d space
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s
where g(x) is the smoothed interpolation of (x,y). The user can
use s to control the tradeoff between closeness and smoothness of
fit. Larger satisfying the conditions: sum((w * (y -
g))**2,axis=0) <= s where g(x) is the smoothed interpolation of
(x,y). The user can use s to control the tradeoff between
closeness and smoothness of fit. Larger s means more smoothing
while smaller values of s indicate less smoothing. Recommended
values of s depend on the weights, w. If the weights represent
the inverse of the standard-deviation of y, then a: good s value
should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w.
k : int, optional
Degree of the spline. Cubic splines are recommended. Even
values of k should be avoided especially with a small s-value.
for the same set of data. If task=-1 find the weighted least
square spline for a given set of knots, t.
nest : None or int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. None results in value
m+2*k. -1 results in m+k+1. Always large enough is nest=m+k+1.
Default is -1.
Returns
----------
xyzn : array, shape (M,3)
array representing x,y,z of the M points inside the sphere
Examples
----------
>>> import numpy as np
>>> t=np.linspace(0,1.75*2*np.pi,100)# make ascending spiral in 3-space
>>> x = np.sin(t)
>>> y = np.cos(t)
>>> z = t
>>> x+= np.random.normal(scale=0.1, size=x.shape) # add noise
>>> y+= np.random.normal(scale=0.1, size=y.shape)
>>> z+= np.random.normal(scale=0.1, size=z.shape)
>>> xyz=np.vstack((x,y,z)).T
>>> xyzn=spline(xyz,3,2,-1)
>>> len(xyzn) > len(xyz)
True
See also
----------
scipy.interpolate.splprep
scipy.interpolate.splev
"""
# find the knot points
tckp, u = splprep([xyz[:, 0], xyz[:, 1], xyz[:, 2]], s=s, k=k, nest=nest)
# evaluate spline, including interpolated points
xnew, ynew, znew = splev(np.linspace(0, 1, 400), tckp)
return np.vstack((xnew, ynew, znew)).T | 5,357,249 |
def inputs_def():
"""
# Date-x: str: dates of the sample (Date-0 is today and we aim to predict price action in the next 4 days)
# averageDailyVolume10Day: Average volume of the last 10 days. Notice that due to limitation of Yahoo, we could not
put the true average at Date-0. Rather, we just put the current daily avg volume.
Should be fine in reality since Yahoo gives these info for actual today
(which we need in the operation mode).
# averageVolume: float: Average volume of the ticker. Notice that due to limitation of Yahoo, we could not
put the true average at Date-0. Rather, we just put the current daily avg volume.
Should be fine in reality since Yahoo gives these info for actual today
(which we need in the operation mode).
# VolumeNormalized-x: Volume of Date-x divided by the averageVolume
# IntraDayVolumeIndicator-x: binary (1: could keep its high volume and strength during Date-x)
# ChartPatterns-x: categorical: chart pattern for Date-x.
1: flat: not so much changes during he day, mostly consolidating
2: downfall shape, strong start-of-day and weak end-of-day
3: bell shape closer to the market open with some high strength during regular hours but not close to the bells
4: bell shape closer to the market close with some high strength during regular hours but not close to the bells
5: uprise: almost constant growth during the day and strong close near high of the day
6: recovery: going down and trying to recover toward the end
7: mexican hat: going down and trying to recover but again going down toward the end-of-day
# marketCap: float: market cap in 100M
# Open-x: float: Price at the Open of the Date-x (Open-1 means opening price of the last trading day)
# Close-x: float: Price at the End of the Date-x (Close-1 means EoD of last trading day)
# High-x: float: Price at High of the Date-x (High-1 means high of last trading day)
# EoDtoHoD-x: float: EoD/HoD for Date-x
# OverNighCahange-x: float: relative change at the tomorrow's opening (captures after hours and premarket moves)
# fiftyDayAverage: float: suffers from the same problem as averageVolume.
Yahoo gives most recent value not the one on Date-0
# fiftyTwoWeekHigh: float: 52-weeks high of the price
# marketCap: float: market cap in 100M
# heldPercentInstitutions: float in [0,1]
# Labels: max potential gain in Day 1 (tomorrow), Day 2, 3, and 4, and max over all these 4 days.
""" | 5,357,250 |
def plot_sample_imgs(get_imgs_fun, img_shape, plot_side=5, savepath=None, cmap='gray'):
"""
Generate visual samples and plot on a grid
:param get_imgs_fun: function that given a int return a corresponding number of generated samples
:param img_shape: shape of image to plot
:param plot_side: samples per row (and column). Generated plot_side x plot_side samples
:param savepath: if given, save plot to such filepath, otherwise show plot
:param cmap: matplotlib specific cmap to use for the plot
"""
f, axarr = plt.subplots(plot_side, plot_side)
samples = get_imgs_fun(plot_side*plot_side)
for row in range(plot_side):
for col in range(plot_side):
axarr[row, col].imshow(samples[plot_side*row+col].reshape(img_shape), cmap=cmap)
axarr[row, col].set_title('')
axarr[row, col].axis('off')
if savepath:
f.savefig(savepath)
plt.close()
else:
plt.show() | 5,357,251 |
def download_image(path, image_url):
"""
图片下载
Parameters:
path - str 图片保存地址
image_url - str 图片下载地址
Returns:
None
"""
print(image_url)
filename = image_url.split('/')[-1]
image_path = os.path.join(path, filename)
download_headers = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'}
size = 0
with closing(requests.get(image_url, headers=download_headers, stream=True)) as response:
chunk_size = 1024
content_size = int(response.headers['content-length'])
if response.status_code == 200:
sys.stdout.write(filename+'下载中:\n')
sys.stdout.write(' [文件大小]:%0.2f MB\n' % (content_size / chunk_size / 1024))
with open(image_path, 'wb') as file:
for data in response.iter_content(chunk_size = chunk_size):
file.write(data)
size += len(data)
file.flush()
sys.stdout.write(' [下载进度]:%.2f%%' % float(size / content_size * 100) + '\r')
sys.stdout.flush() | 5,357,252 |
def main():
""" Entry point """
moves = "U D"
cube = Cube()
for move_string in moves.split():
move_to_make = CubeMove.parse(move_string)
cube.make_move(move_to_make)
print(cube) | 5,357,253 |
def load_supercomputers(log_file, train_ratio=0.5, windows_size=20, step_size=0, e_type='bert', mode="balance",
no_word_piece=0):
""" Load BGL, Thunderbird, and Spirit unstructured log into train and test data
Parameters
----------
log_file: str, the file path of raw log (extension: .log).
train_ratio: float, the ratio of training data for train/test split.
windows_size: int, the window size for sliding window
step_size: int, the step size for sliding window. if step_size is equal to window_size then fixed window is applied.
e_type: str, embedding type (choose from BERT, XLM, and GPT2).
mode: str, split train/testing in balance or not
no_word_piece: bool, use split word into wordpiece or not.
Returns
-------
(x_tr, y_tr): the training data
(x_te, y_te): the testing data
"""
print("Loading", log_file)
with open(log_file, mode="r", encoding='utf8') as f:
logs = f.readlines()
logs = [x.strip() for x in logs]
E = {}
e_type = e_type.lower()
if e_type == "bert":
encoder = bert_encoder
elif e_type == "xlm":
encoder = xlm_encoder
else:
if e_type == "gpt2":
encoder = gpt2_encoder
else:
raise ValueError('Embedding type {0} is not in BERT, XLM, and GPT2'.format(e_type.upper()))
print("Loaded", len(logs), "lines!")
x_tr, y_tr = [], []
i = 0
failure_count = 0
n_train = int(len(logs) * train_ratio)
c = 0
t0 = time.time()
while i < n_train - windows_size:
c += 1
if c % 1000 == 0:
print("\rLoading {0:.2f}% - {1} unique logs".format(i * 100 / n_train, len(E.keys())), end="")
if logs[i][0] != "-":
failure_count += 1
seq = []
label = 0
for j in range(i, i + windows_size):
if logs[j][0] != "-":
label = 1
content = logs[j]
# remove label from log messages
content = content[content.find(' ') + 1:]
content = clean(content.lower())
if content not in E.keys():
try:
E[content] = encoder(content, no_word_piece)
except Exception as _:
print(content)
emb = E[content]
seq.append(emb)
x_tr.append(seq.copy())
y_tr.append(label)
i = i + step_size
print("\nlast train index:", i)
x_te = []
y_te = []
#
for i in range(n_train, len(logs) - windows_size, step_size):
if i % 1000 == 0:
print("Loading {:.2f}".format(i * 100 / n_train))
if logs[i][0] != "-":
failure_count += 1
seq = []
label = 0
for j in range(i, i + windows_size):
if logs[j][0] != "-":
label = 1
content = logs[j]
# remove label from log messages
content = content[content.find(' ') + 1:]
content = clean(content.lower())
if content not in E.keys():
E[content] = encoder(content, no_word_piece)
emb = E[content]
seq.append(emb)
x_te.append(seq.copy())
y_te.append(label)
(x_tr, y_tr) = shuffle(x_tr, y_tr)
print("Total failure logs: {0}".format(failure_count))
if mode == 'balance':
x_tr, y_tr = balancing(x_tr, y_tr)
num_train = len(x_tr)
num_test = len(x_te)
num_total = num_train + num_test
num_train_pos = sum(y_tr)
num_test_pos = sum(y_te)
num_pos = num_train_pos + num_test_pos
print('Total: {} instances, {} anomaly, {} normal' \
.format(num_total, num_pos, num_total - num_pos))
print('Train: {} instances, {} anomaly, {} normal' \
.format(num_train, num_train_pos, num_train - num_train_pos))
print('Test: {} instances, {} anomaly, {} normal\n' \
.format(num_test, num_test_pos, num_test - num_test_pos))
return (x_tr, y_tr), (x_te, y_te) | 5,357,254 |
def is_running(service: Service) -> bool:
"""Is the given pyodine daemon currently running?
:raises ValueError: Unknown `service`.
"""
try:
return bool(TASKS[service]) and not TASKS[service].done()
except KeyError:
raise ValueError("Unknown service type.") | 5,357,255 |
def test_is_not_positive_semidefinite():
"""Test that non-positive semidefinite matrix returns False."""
mat = np.array([[-1, -1], [-1, -1]])
np.testing.assert_equal(is_positive_semidefinite(mat), False) | 5,357,256 |
def PropertyWrapper(prop):
"""Wrapper for db.Property to make it look like a Django model Property"""
if isinstance(prop, db.Reference):
prop.rel = Relation(prop.reference_class)
else:
prop.rel = None
prop.serialize = True
return prop | 5,357,257 |
def plot_metric(
path_csv: str,
metric: str,
path_save: Optional[str] = None,
title: Optional[str] = None,
ymin: Optional[float] = None,
ymax: Optional[float] = None,
) -> None:
"""Plots box-plot of model performance according to some metric.
Parameters
----------
path_csv
Path to csv where each row is a dataset item.
metric
Name of metric. Should be within one or more CSV column names.
path_save
If not None, specifies where to save figure and figure will not be
displayed.
title
Plot title.
ymin
Y-axis minimum value.
ymax
Y-axis maximum value.
"""
if path_save is not None:
plt.switch_backend("Agg")
df = pd.read_csv(path_csv)
cols = [c for c in df.columns if metric in c]
cols_rename = {c: c.split(metric)[-1] for c in cols}
df = df.loc[:, cols].rename(columns=cols_rename)
fig, ax = plt.subplots()
df.boxplot(ax=ax)
if title is not None:
ax.set_title(title)
ax.set_ylim([ymin, ymax])
ax.set_ylabel("Pearson correlation coefficient (r)")
if path_save is not None:
fig.savefig(path_save, bbox_inches="tight")
logger.info(f"Saved: {path_save}")
return
plt.show() | 5,357,258 |
def drowLine(cord,orient,size):
"""
The function provides the coordinates of the line.
Arguments:
starting x or y coordinate of the line, orientation
(string. "vert" or "hor") and length of the line
Return:
list of two points (start and end of the line)
"""
global cv2
if orient == "vert":
x1 = cord
x2 = cord
y1 = 0
y2 = size
elif orient == "hor":
x1 = 0
x2 = size
y1 = cord
y2 = cord
else:
print("not hor not vert")
return 0
return [(x1, y1), (x2, y2)] | 5,357,259 |
def bluetoothRead():
""" Returns the bluetooth address of the robot (if it has been previously stored)
arguments:
none
returns:
string - the bluetooth address of the robot, if it has been previously stored; None otherwise
"""
global EEPROM_BLUETOOTH_ADDRESS
bt = EEPROMread(EEPROM_BLUETOOTH_ADDRESS, 17)
if bluetoothValidate(bt):
return bt
else:
return None | 5,357,260 |
def is_step_done(client, step_name):
"""Query the trail status using the client and return True if step_name has completed.
Arguments:
client -- A TrailClient or similar object.
step_name -- The 'name' tag of the step to check for completion.
Returns:
True -- if the step has succeeded.
False -- otherwise.
"""
# To understand the structure of the result returned by the API calls, please see the documentation of the
# TrailClient class.
statuses = client.status(fields=[StatusField.STATE], name=step_name)
# In this case, the status call returns a list of step statuses.
# Since we have exactly one step with each name and we are querying the status of steps with the given name,
# there will be only one element in the result list. Hence we refer to the zeroth element of results.
if statuses and statuses[0][StatusField.STATE] == Step.SUCCESS:
return True
return False | 5,357,261 |
def counter(filename:Path, window:int):
"""Part one (without window) and part two (with window) of day one."""
if window:
print(
f"sum (window = {window}): "
f"{count_increases(number_from_window(filename, window))}"
)
else:
print(f"sum: {count_increases(number_from_line(filename))}") | 5,357,262 |
def bubblesort_2(list_):
"""
Sort the items in list_ in non-decreasing order.
@param list list_: list to sort
@rtype: None
"""
j = len(list_) - 1
swapped = True
# Stop when no elements are swapped.
while swapped and j != 0:
swapped = False
# Swap every item that is out of order.
for i in range(j):
if list_[i] > list_[i + 1]:
swapped = True
list_[i], list_[i + 1] = list_[i + 1], list_[i]
j -= 1 | 5,357,263 |
def on_chat_send(message):
"""Broadcast chat message to a watch room"""
# Check if params are correct
if 'roomId' not in message:
return {'status_code': 400}, request.sid
room_token = message['roomId']
# Check if room exist
if not db.hexists('rooms', room_token):
{'status_code': 404}, request.sid
# Check if user wasnt in the room
if not room_token in rooms(sid=request.sid):
return {'status_code': 403}, request.sid
# Add current sever timestamp to the state
message = add_current_time_to_state(message)
# Send message to everybody in the room
emit('message_update', message, room=room_token)
# Response
return {'status_code': 200}, 200 | 5,357,264 |
def arima(size: int = 100,
phi: Union[float, ndarray] = 0,
theta: Union[float, ndarray] = 0,
d: int = 0,
var: float = 0.01,
random_state: float = None) -> ndarray:
# inherit from arima_with_seasonality
"""Simulate a realization from an ARIMA characteristic.
Acts like `tswge::gen.arima.wge()`
Parameters
----------
size: scalar int
Number of samples to generate.
phi: scalar float or list-like
AR process order
theta: scalar float or list-like
MA process order
d: scalar int
ARIMA process difference order
var: scalar float, optional
Nosie variance level.
random_state: scalar int, optional
Seed the random number generator.
Returns
-------
signal: np.ndarray
Simulated ARIMA.
"""
return arima_with_seasonality(size = size,
phi = phi,
theta = theta,
d = d,
s = 0,
var = var,
random_state = random_state) | 5,357,265 |
def gt_dosage(gt):
"""Convert unphased genotype to dosage"""
x = gt.split(b'/')
return int(x[0])+int(x[1]) | 5,357,266 |
def check_cwd_is_repo_root():
"""Check that script is being called from root of Git repo."""
cwd = os.getcwd()
cwd_name = os.path.split(cwd)[1]
if not os.path.exists(join(cwd, '.git')) or cwd_name != 'wge':
raise RuntimeError('Must run script from root of the Git repository.') | 5,357,267 |
def create_key_pair_in_ssm(
ec2: EC2Client,
ssm: SSMClient,
keypair_name: str,
parameter_name: str,
kms_key_id: Optional[str] = None,
) -> Optional[KeyPairInfo]:
"""Create keypair in SSM."""
keypair = create_key_pair(ec2, keypair_name)
try:
kms_key_label = "default"
kms_args: Dict[str, Any] = {}
if kms_key_id:
kms_key_label = kms_key_id
kms_args = {"KeyId": kms_key_id}
LOGGER.info(
'storing generated key in SSM parameter "%s" using KMS key "%s"',
parameter_name,
kms_key_label,
)
ssm.put_parameter(
Name=parameter_name,
Description='SSH private key for KeyPair "{}" '
"(generated by Runway)".format(keypair_name),
Value=keypair["KeyMaterial"],
Type="SecureString",
Overwrite=False,
**kms_args,
)
except ClientError:
# Erase the key pair if we failed to store it in SSM, since the
# private key will be lost anyway
LOGGER.exception(
"failed to store generated key in SSM; deleting "
"created key pair as private key will be lost"
)
ec2.delete_key_pair(KeyName=keypair_name, DryRun=False)
return None
return {
"status": "created",
"key_name": keypair.get("KeyName", ""),
"fingerprint": keypair.get("KeyFingerprint", ""),
} | 5,357,268 |
def test__string():
""" test graph.string and graph.from_string
"""
for sgr in C8H13O_SGRS:
assert sgr == automol.graph.from_string(automol.graph.string(sgr)) | 5,357,269 |
def sumofsq(im, axis=0):
"""Compute square root of sum of squares.
Args:
im: Raw image.
axis: Channel axis.
Returns:
Square root of sum of squares of input image.
"""
out = np.sqrt(np.sum(im.real * im.real + im.imag * im.imag, axis=axis))
return out | 5,357,270 |
def get_airflow_home():
"""Get path to Airflow Home"""
return expand_env_var(os.environ.get('AIRFLOW_HOME', '~/airflow')) | 5,357,271 |
def read_mapping_file(map_file):
"""
Mappings are simply a CSV file with three columns.
The first is a string to be matched against an entry description.
The second is the payee against which such entries should be posted.
The third is the account against which such entries should be posted.
If the match string begins and ends with '/' it is taken to be a
regular expression.
"""
mappings = []
with open(map_file, "r", encoding='utf-8', newline='') as f:
map_reader = csv.reader(f)
for row in map_reader:
if len(row) > 1:
pattern = row[0].strip()
payee = row[1].strip()
account = row[2].strip()
tags = row[3:]
if pattern.startswith('/') and pattern.endswith('/'):
try:
pattern = re.compile(pattern[1:-1])
except re.error as e:
print("Invalid regex '{0}' in '{1}': {2}"
.format(pattern, map_file, e),
file=sys.stderr)
sys.exit(1)
mappings.append((pattern, payee, account, tags))
return mappings | 5,357,272 |
def biquad_bp2nd(fm, q, fs, q_warp_method="cos"):
"""Calc coeff for bandpass 2nd order.
input:
fm...mid frequency in Hz
q...bandpass quality
fs...sampling frequency in Hz
q_warp_method..."sin", "cos", "tan"
output:
B...numerator coefficients Laplace transfer function
A...denominator coefficients Laplace transfer function
b...numerator coefficients z-transfer function
a...denominator coefficients z-transfer function
"""
wm = 2*np.pi*fm
B = np.array([0, 1 / (q*wm), 0])
A = np.array([1 / wm**2, 1 / (q*wm), 1])
wmpre = f_prewarping(fm, fs)
qpre = q_prewarping(q, fm, fs, q_warp_method)
Bp = 0., 1 / (qpre*wmpre), 0.
Ap = 1 / wmpre**2, 1 / (qpre*wmpre), 1.
b, a = bilinear_biquad(Bp, Ap, fs)
return B, A, b, a | 5,357,273 |
def pullAllData():
""" Pulls all available data from the database
Sends all analyzed data back in a json with fileNames and list of list
of all "spots" intensities and backgrounds.
Args:
db.d4Images (Mongo db collection): Mongo DB collection with processed
data
Returns:
payload (jsonify(dict)): data dictionary with filename, spots, and
background info
statusCode (int): HTTP status code
"""
pullFileNames = []
pullSpotData = []
pullBgData = []
for eachEntry in db.d4Images.find():
pullFileNames.append(eachEntry["filename"])
pullSpotData.append(eachEntry["spots"])
pullBgData.append(eachEntry["background"])
payload = {"filename": pullFileNames,
"spots": pullSpotData,
"background": pullBgData}
statusCode = 200
return jsonify(payload), statusCode | 5,357,274 |
def f2():
"""
>>> # +--------------+-----------+-----------+------------+-----------+--------------+
>>> # | Chromosome | Start | End | Name | Score | Strand |
>>> # | (category) | (int32) | (int32) | (object) | (int64) | (category) |
>>> # |--------------+-----------+-----------+------------+-----------+--------------|
>>> # | chr1 | 1 | 2 | a | 0 | + |
>>> # | chr1 | 6 | 7 | b | 0 | - |
>>> # +--------------+-----------+-----------+------------+-----------+--------------+
>>> # Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
>>> # For printing, the PyRanges was sorted on Chromosome and Strand.
"""
full_path = get_example_path("f2.bed")
return pr.read_bed(full_path) | 5,357,275 |
def latin(n, d):
"""
Build latin hypercube.
Parameters
----------
n : int
Number of points.
d : int
Size of space.
Returns
-------
lh : ndarray
Array of points uniformly placed in d-dimensional unit cube.
"""
# spread function
def spread(points):
return sum(1./np.linalg.norm(np.subtract(points[i], points[j])) for i in range(n) for j in range(n) if i > j)
# starting with diagonal shape
lh = [[i/(n-1.)]*d for i in range(n)]
# minimizing spread function by shuffling
minspread = spread(lh)
for i in range(1000):
point1 = np.random.randint(n)
point2 = np.random.randint(n)
dim = np.random.randint(d)
newlh = np.copy(lh)
newlh[point1, dim], newlh[point2, dim] = newlh[point2, dim], newlh[point1, dim]
newspread = spread(newlh)
if newspread < minspread:
lh = np.copy(newlh)
minspread = newspread
return lh | 5,357,276 |
def test_read_ego_SE3_sensor(test_data_root_dir: Path) -> None:
"""Read sensor extrinsics for a particular log."""
ego_SE3_sensor_path = test_data_root_dir / "sensor_dataset_logs" / "test_log"
sensor_name_to_sensor_pose = read_ego_SE3_sensor(ego_SE3_sensor_path)
assert sensor_name_to_sensor_pose is not None
assert len(sensor_name_to_sensor_pose) > 0 | 5,357,277 |
def _get_valid_dtype(series_type, logical_type):
"""Return the dtype that is considered valid for a series
with the given logical_type"""
backup_dtype = logical_type.backup_dtype
if ks and series_type == ks.Series and backup_dtype:
valid_dtype = backup_dtype
else:
valid_dtype = logical_type.primary_dtype
return valid_dtype | 5,357,278 |
def geometric_mean_longitude(t='now'):
"""
Returns the geometric mean longitude (in degrees).
Parameters
----------
t : {parse_time_types}
A time (usually the start time) specified as a parse_time-compatible
time string, number, or a datetime object.
"""
T = julian_centuries(t)
result = 279.696680 + 36000.76892 * T + 0.0003025 * T**2
result = result * u.deg
return Longitude(result) | 5,357,279 |
def calculate_value_functions_and_flow_utilities(
wages,
nonpec,
continuation_values,
draws,
delta,
is_inadmissible,
value_functions,
flow_utilities,
):
"""Calculate the choice-specific value functions and flow utilities.
Parameters
----------
wages : numpy.ndarray
Array with shape (n_choices,).
nonpec : numpy.ndarray
Array with shape (n_choices,).
continuation_values : numpy.ndarray
Array with shape (n_choices,)
draws : numpy.ndarray
Array with shape (n_draws, n_choices)
delta : float
Discount rate.
is_inadmissible: numpy.ndarray
Array with shape (n_choices,) containing indicator for whether the following
state is inadmissible.
Returns
-------
value_functions : numpy.ndarray
Array with shape (n_choices, n_draws).
flow_utilities : numpy.ndarray
Array with shape (n_choices, n_draws).
"""
n_draws, n_choices = draws.shape
for i in range(n_draws):
for j in range(n_choices):
value_function, flow_utility = aggregate_keane_wolpin_utility(
wages[j],
nonpec[j],
continuation_values[j],
draws[i, j],
delta,
is_inadmissible[j],
)
flow_utilities[j, i] = flow_utility
value_functions[j, i] = value_function | 5,357,280 |
def kill(update: Update, context: CallbackContext):
"""Juega a la ruleta rusa"""
try:
user = update.effective_user
context.bot.sendChatAction(chat_id=update.message.chat_id, action=ChatAction.TYPING, timeout=10)
time = datetime.datetime.now()
context.bot.restrictChatMember(chat_id=update.message.chat_id, user_id=user.id, until_date=time,
permissions=ban_chat_permissions)
context.bot.send_message(chat_id=update.effective_chat.id, text="Se hizo la automorición!")
except Exception:
context.bot.send_message(chat_id=update.effective_chat.id, text='Ojalá poder hacerlo, pero no tengo permiso...') | 5,357,281 |
def create_feature_data_batch(im_dir,video_ids):
"""
create_feature_data_batch
Similar function to create_feature_data however utilizing the batch version of functions used in the original function
suited towards larger set of images
Input : directory of thumbnails, list of video ids
Output : dataframe containing facial features for all faces containing a face
"""
cols = ['videoId', 'numFaces', 'emotions', 'age', 'gender', 'race', 'face_locations']
#Create empty dataframe
df = pd.DataFrame(columns=cols)
#Initialize variables
batch = 0 #Current batch size
videoIds = []
face_locations_batch = []
faces_batch = []
img_obj_batch = []
img_objs = []
files = [f + ".jpg" for f in video_ids if os.path.exists(im_dir+f+".jpg")]
last_file = files[-1]
num_batch = 0
for filename in files:
#Append image objects from files into lists until batch size of 50 or last file
image = face_recognition.load_image_file(im_dir + '/' + filename)
img_obj_batch.append(image)
img_objs.append(image)
videoIds.append(filename[:-4])
batch += 1
if batch == 50 or filename == last_file:
num_batch += 1
#Extract faces from all images in batch
print("Batch {0} Facial Recognition Start!".format(num_batch))
face_locations_batch += face_recognition.batch_face_locations(img_obj_batch,
number_of_times_to_upsample=1,
batch_size=batch)
#Get index where no faces were detected in image
empty_indices = [empty_ix for empty_ix, element in enumerate(face_locations_batch) if element == []]
#Remove those entries from all our lists
for index in sorted(empty_indices, reverse=True):
del face_locations_batch[index]
del videoIds[index]
del img_objs[index]
batch = 0
img_obj_batch = []
print("Batch {0} Facial Recognition Finished!".format(num_batch))
print("Face Image Extraction Start!")
#For each image, crop out all the faces and append to faces_batch
for ix in range(len(face_locations_batch)):
im = Image.fromarray(img_objs[ix])
for f in face_locations_batch[ix]:
face = im.crop((f[3], f[0], f[1], f[2]))
face = np.asarray(face)
faces_batch.append(face)
print("Face Image Extraction Finished!")
print("Facial Analysis Begin!")
#Do analysis of all faces in faces_batch
analysis_counter = 0
"""
Note : On DSMLP servers and other computers when running DeepFace.analyze multiple times the model would get loaded
over and over causing memory issues so instead we do one large analysis job of all faces
"""
analysis = DeepFace.analyze(faces_batch)
print("Facial Analysis Finished!")
#Append all features into the dataframe
for i in range(len(face_locations_batch)):
f = face_locations_batch[i]
emotions = []
age = []
gender = []
race = []
for j in range(len(f)):
analysis_counter += 1
curr_analysis = analysis['instance_' + str(analysis_counter)]
emotions.append(curr_analysis['dominant_emotion'])
age.append(curr_analysis['age'])
gender.append(curr_analysis['gender'])
race.append(curr_analysis['dominant_race'])
df = df.append({'videoId': videoIds[i], 'numFaces': len(f), 'emotions': emotions, 'age': age,
'gender': gender, 'race': race, 'face_locations': f}, ignore_index=True)
return df | 5,357,282 |
def test_tick_fontsize_setter():
"""Assert that the tick_fontsize setter works as intended."""
with pytest.raises(ValueError):
BasePlotter().tick_fontsize = 0 | 5,357,283 |
def export_to_sunrise_from_halolist(ds,fni,star_particle_type,
halo_list,domains_list=None,**kwargs):
"""
Using the center of mass and the virial radius
for a halo, calculate the regions to extract for sunrise.
The regions are defined on the root grid, and so individual
octs may span a large range encompassing many halos
and subhalos. Instead of repeating the oct extraction for each
halo, arrange halos such that we only calculate what we need to.
Parameters
----------
ds : `Dataset`
The dataset to convert. We use the root grid to specify the domain.
fni : string
The filename of the output FITS file, but depends on the domain. The
dle and dre are appended to the name.
particle_type : int
The particle index for stars
halo_list : list of halo objects
The halo list objects must have halo.CoM and halo.Rvir,
both of which are assumed to be in unitary length units.
frvir (optional) : float
Ensure that CoM +/- frvir*Rvir is contained within each domain
domains_list (optiona): dict of halos
Organize halos into a dict of domains. Keys are DLE/DRE tuple
values are a list of halos
"""
dn = ds.domain_dimensions
if domains_list is None:
domains_list = domains_from_halos(ds,halo_list,**kwargs)
if fni.endswith('.fits'):
fni = fni.replace('.fits','')
for (num_halos, domain, halos) in domains_list:
dle,dre = domain
print('exporting: ')
print("[%03i %03i %03i] -"%tuple(dle), end=' ')
print("[%03i %03i %03i] "%tuple(dre), end=' ')
print(" with %i halos"%num_halos)
dle,dre = domain
dle, dre = np.array(dle),np.array(dre)
fn = fni
fn += "%03i_%03i_%03i-"%tuple(dle)
fn += "%03i_%03i_%03i"%tuple(dre)
fnf = fn + '.fits'
fnt = fn + '.halos'
if os.path.exists(fnt):
os.remove(fnt)
fh = open(fnt,'w')
for halo in halos:
fh.write("%i "%halo.ID)
fh.write("%6.6e "%(halo.CoM[0]*ds['kpc']))
fh.write("%6.6e "%(halo.CoM[1]*ds['kpc']))
fh.write("%6.6e "%(halo.CoM[2]*ds['kpc']))
fh.write("%6.6e "%(halo.Mvir))
fh.write("%6.6e \n"%(halo.Rvir*ds['kpc']))
fh.close()
export_to_sunrise(ds, fnf, star_particle_type, dle*1.0/dn, dre*1.0/dn) | 5,357,284 |
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default | 5,357,285 |
def produce_quick_fig_mol(
molecules, filename, labels=True, mpr=5, ims=200
):
"""
Produce figure showing all the 2D coordinates of molecules.
"""
DrawingOptions.bondLineWidth = 1.8
DrawingOptions.atomLabelFontSize = 16
mols = [Chem.MolFromSmiles(x) for x in molecules.values()]
for m in mols:
_ = Chem.Compute2DCoords(m)
# Draw.MolToFile(mols[0], output_dir+'mol1.png')
if len(mols) > 20:
im = 1
M = []
for i in mols:
M.append(i)
if len(M) == 20:
if labels:
img = Draw.MolsToGridImage(
M,
molsPerRow=mpr,
subImgSize=(ims, ims),
legends=[x for x in molecules.keys()]
)
else:
img = Draw.MolsToGridImage(M, molsPerRow=mpr,
subImgSize=(ims, ims))
out_name = filename.replace('.pdf', '_'+str(im)+'.pdf')
print(out_name)
img.save(out_name)
im += 1
M = []
# final figure with remaining
if len(M) > 0:
if labels:
img = Draw.MolsToGridImage(
M, molsPerRow=mpr,
subImgSize=(ims, ims),
legends=[x for x in molecules.keys()]
)
else:
img = Draw.MolsToGridImage(M, molsPerRow=mpr,
subImgSize=(ims, ims))
out_name = filename.replace('.pdf', '_'+str(im)+'.pdf')
print(out_name)
img.save(out_name)
else:
out_name = filename
M = mols
if labels:
img = Draw.MolsToGridImage(
M, molsPerRow=mpr,
subImgSize=(ims, ims),
legends=[x for x in molecules.keys()]
)
else:
img = Draw.MolsToGridImage(M, molsPerRow=mpr,
subImgSize=(ims, ims))
img.save(out_name) | 5,357,286 |
def write_band_table(results,filename,band_definition,fields=None,default=np.nan):
"""Writes level energy, moment, and in-band transition data.
With default arguments, recovers behavior of write_level_table.
Each output line is written in the form
"seq"=0 J p n T Eabs <"E2_moments"> <"M1_moments"> <"E2_transitions_dJ1"> <"E2_transitions_dJ2"> <"M1_transitions_dJ1">
The field seq is included for historical reasons but written as a
dummy zero.
Any missing or undefined transition RMEs are written as a NaN (or
as the numerical value given by the argument default).
Entries within a field are written in the order RME(lp,ln,sp,sn)
for M1 or RME(p,n) for E2. Note that the electromagnetic M1
moment is *not* included in the list of moments, even though it
was in the original tabulation format for berotor. The
electromagnetic M1 RME can be recovered by taking the linear
combination with the standard gyromagnetic ratios as coefficients.
Some of these fields may optionally be omitted.
The given levels are assumed to be unique by J.
RME lookup is done with lower-J state as final state (presumes
lower-J state served as a reference state in the MFDn
calculation).
Hint: If all the transitions come out as missing, have you set the correct M value?
Args:
results (MFDnRunData): results object containing the levels
filename (str): output filename
band_definition (BandDefinition): band definition
fields (set of str): fields to include, else all fields written if None (default: None)
default (float): value to use for missing numerical entries (default: np.nan)
"""
# resolve special values of fields argument
if (fields is None):
fields = {"E2_moments","M1_moments","E2_transitions_dJ1","E2_transitions_dJ2","M1_transitions_dJ1"}
# assemble table lines
value_format = " {:9.4f}" # format string for numerical values
lines = []
for J in band_definition.J_values:
# determine level
qn = band_definition.members[J]
# initial state data
line = "{:1d} {:6.3f} {:1d} {:2d} {:6.3f} {:8.3f}".format(
0,
results.get_property(qn,"J"),
results.get_property(qn,"g"),
results.get_property(qn,"n"),
results.get_property(qn,"T"),
results.get_energy(qn)
)
# loop over moment fields
# (field,op,entries)
field_definitions = [
("E2_moments","E2",2),
("M1_moments","M1",4)
]
for (field,op,entries) in field_definitions:
if (field in fields):
values = default*np.ones(entries)
if ((qn,op) in results.moments):
# values exist
values = np.array(results.moments[(qn,op)])
line += (entries*value_format).format(*values)
# loop over transition fields
# (field,op,dJ,entries)
field_definitions = [
("E2_transitions_dJ1","E2",1,2),
("E2_transitions_dJ2","E2",2,2),
("M1_transitions_dJ1","M1",1,4)
]
for (field,op,dJ,entries) in field_definitions:
if (field in fields):
Ji = J
Jf = Ji-dJ
qni = qn
values = default*np.ones(entries)
if ((Jf in band_definition.members) and (Ji in band_definition.M)):
# final level and appropriate M calculation are defined
M = band_definition.M[Ji]
qnf = band_definition.members[Jf]
values = results.get_rme(qnf,qni,op,M,default=default)
if ((Ji,M) in band_definition.signs) and ((Jf,M) in band_definition.signs):
# phases are defined for these states at the required M
values *= band_definition.signs[(Ji,M)]*band_definition.signs[(Jf,M)]
line += (entries*value_format).format(*values)
# finalize line
line += "\n"
lines.append(line)
# write to file
with open(filename,"wt") as fout:
fout.writelines(lines) | 5,357,287 |
def get_or_create(model, **kwargs):
"""Get or a create a database model."""
instance = model.query.filter_by(**kwargs)
if instance:
return instance
else:
instance = model(**kwargs)
db.session.add(instance)
return instance | 5,357,288 |
def test_class_id_cube_strategy_elliptic_paraboloid(experiment_enviroment,
renormalize,
thread_flag):
""" """
tm, dataset, experiment, dictionary = experiment_enviroment
class_id_params = {
"class_ids" + MAIN_MODALITY: list(np.arange(0, 1.0, 0.25)),
"class_ids" + NGRAM_MODALITY: list(np.arange(0, 2.05, 0.25)),
}
def retrieve_elliptic_paraboloid_score(topic_model):
""" """
model = topic_model._model
return -((model.class_ids[MAIN_MODALITY]-0.6-model.class_ids[NGRAM_MODALITY]) ** 2 +
(model.class_ids[MAIN_MODALITY]-0.6+model.class_ids[NGRAM_MODALITY]/2) ** 2)
cube = CubeCreator(
num_iter=1,
parameters=class_id_params,
reg_search="grid",
strategy=GreedyStrategy(renormalize),
tracked_score_function=retrieve_elliptic_paraboloid_score,
separate_thread=thread_flag
)
dummies = cube(tm, dataset)
tmodels_lvl2 = [dummy.restore() for dummy in dummies]
if not renormalize:
assert len(tmodels_lvl2) == sum(len(m) for m in class_id_params.values())
else:
assert len(tmodels_lvl2) == 10
if renormalize:
CLASS_IDS_FOR_CHECKING = [(1.0, 0.0), (1.0, 0.0), (0.8, 0.2), (0.667, 0.333),
(0.571, 0.429), (0.5, 0.5), (0.444, 0.556),
(0.4, 0.6), (0.364, 0.636), (0.333, 0.667)]
for i, one_model in enumerate(tmodels_lvl2):
assert np.round(one_model.class_ids[MAIN_MODALITY], 3) == CLASS_IDS_FOR_CHECKING[i][0]
assert np.round(one_model.class_ids[NGRAM_MODALITY], 3) == CLASS_IDS_FOR_CHECKING[i][1]
else:
one_model = tmodels_lvl2[len(class_id_params["class_ids" + MAIN_MODALITY])]
assert np.round(one_model.class_ids[MAIN_MODALITY], 3) == 0.5
assert np.round(one_model.class_ids[NGRAM_MODALITY], 3) == 0
assert cube.strategy.best_score >= -0.09 | 5,357,289 |
def apply_filters(filters: Dict, colnames: List, row: List) -> List:
"""
Process data based on filter chains
:param filters:
:param colnames:
:param row:
:return:
"""
if filters:
new_row = []
for col, data in zip(colnames, row):
if col in filters:
params = filters[col][:]
for f in params:
current_filter = f[:] # copy so that pop does not break next iteration
filter_name = current_filter.pop(0)
if filter_name not in FILTERS:
raise FilterError(f"Error: Invalid filter name: {filter_name}")
func, num_params = FILTERS[filter_name][:2]
if len(current_filter) != num_params:
raise FilterError(
f"Error: Incorrect number of params for {filter_name}. Expected {num_params}, got {len(current_filter)})")
data = func(data, *current_filter)
new_row.append(data)
return new_row
return row | 5,357,290 |
def kaiming(shape, dtype, partition_info=None):
"""Kaiming initialization as described in https://arxiv.org/pdf/1502.01852.pdf"""
return tf.random.truncated_normal(shape) * tf.sqrt(2 / float(shape[0])) | 5,357,291 |
def version():
"""Show version or update meta descriptor"""
pass | 5,357,292 |
def save_all_detection(im_array, detections, imdb_classes=None, thresh=0.7):
"""
save all detections in one image with result.png
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param imdb_classes: list of names in imdb
:param thresh: threshold for valid detections
:return:
"""
import random
im = image_processing.transform_inverse(im_array, config.PIXEL_MEANS)
im = im[:, :, ::-1].copy() # back to b,g,r
for j in range(1, len(imdb_classes)):
color = (255*random.random(), 255*random.random(), 255*random.random()) # generate a random color
dets = detections[j]
for i in range(dets.shape[0]):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
cv2.rectangle(im, (int(round(bbox[0])), int(round(bbox[1]))),
(int(round(bbox[2])), int(round(bbox[3]))), color, 2)
cv2.putText(im, '%s'%imdb_classes[j], (bbox[0], bbox[1]),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2)
cv2.imwrite("result.jpg", im) | 5,357,293 |
def trunc_artist(df: pd.DataFrame, artist: str, keep: float = 0.5, random_state: int = None):
"""
Keeps only the requested portion of songs by the artist
(this method is not in use anymore)
"""
data = df.copy()
df_artist = data[data.artist == artist]
data = data[data.artist != artist]
orig_length = len(df_artist)
try:
df_artist = df_artist.sample(int(len(df_artist) * keep), random_state=random_state)
except ValueError:
pass
new_length = len(df_artist)
print("Truncating data for {artist}, original length = {orig}, new length = {new}".format(artist=artist,
orig=orig_length,
new=new_length))
data = data.append(df_artist)
return data.reset_index(drop=True) | 5,357,294 |
async def test_postprocess_results(original, expected):
"""Test Application._postprocess_results."""
callback1_called = False
callback2_called = False
app = Application("testing")
@app.result_postprocessor
async def callback1(app, message):
nonlocal callback1_called
callback1_called = True
return message + 1
@app.result_postprocessor
async def callback2(app, message):
nonlocal callback2_called
callback2_called = True
# Nothing is returned out of Application._postprocess_results so
# the assertion needs to happen inside a callback.
assert message == expected
await app._postprocess_results([original])
assert callback1_called
assert callback2_called | 5,357,295 |
def toContinuousCategory(
oX: pd.DataFrame,
features: list = [],
drop: bool = True,
int_: bool = True,
float_: bool = True,
quantile: bool = True,
nbin: int = 10,
inplace: bool = True,
verbose: bool = True,
) -> pd.DataFrame:
"""
Transforms any float, continuous integer values of
a pandas dataframe to category values.
Parameters:
X: dataset
Keywords:
features: default: []
The column names to be transform from continuous to category.
drop: default: True)
If True then the datetime feature/column will be removed.
int_: Default: True
set integer=False if not continuous and not to transform into category.
float_: Default: True
set floaty=False if not continuous and not to transform into category.
quantile: Default: True use quantile bin.
quantile is simular to v/(maxy-miny), works on any any scale.
False, use fixed-width bin. miny,maxy arguments are ignored.
nbin: default: 10
Alternately ``nbins`` can be integer for number of bins. Or it can be
array of quantiles, e.g. [0, .25, .5, .75, 1.]
or array of fixed-width bin boundaries i.e. [0., 4., 10, 100].
verbose: Default True
True: output
False: silent
inplace: Default: True
True: replace 1st argument with resulting dataframe
False: (boolean)change unplace the dataframe X
Returns: pd.DataFrame
Raises:
TypeError('" requires boolean type.")
Note:
Binning, also known as quantization is used for
transforming continuous numeric features
(``np.number`` type) into ``category`` type.
These categories group the continuous values
into bins. Each bin represents a range of continuous numeric values.
Specific strategies of binning data include fixed-width
(``quantile_bins=False``) and adaptive binning (``quantile_bins = True``).
Datasets that are used as ``train``, ``valid``, and ``test``
must have same bin widths and labels and thus the
same categories.
Assumes **paso** data
cleaning steps (such as removal of Null and NA values)
have already been applied.
Fixed-width bin, only works, WITHOUT SCALING, with datasets with multiple features
for tree-based models such as CART, random forest, xgboost, lightgbm,
catboost,etc. Namely Deep Learning using neural nets won't work.
quantile is similar to min-max scaling: v/(maxy-miny)
works on any any scale
**Statistical problems with linear binning.**
Binning increases type I and type II error; (simple proof is that as number
of bins approaches infinity then information loss approaches zero).
In addition, changing the number of bins will alter the bin distrution shape,
unless the distribution is uniformLY FLAT.
**Quantile binning can only be used with a singular data set.**
Transforming a Continuous featuree ino a Category feature based on percentiles (QUANTILES) is WRONG
if you have a train and test data sets. Quaniles are based on the data set and will be different unless
each data set is distribution is equal. In rhe limit there are only two bins,
then almost no relationship can be modeled. We are essentially doing a t-test.
**if there are nonlinear or even nonmonotonic relationships between features**
If you need linear binning, not quantile, use
``quantile_bins=False`` and specify the bin width (``delta``) or fixed bin boundaries
of any distribution of cuts you wish with ``nbin`` = [ cut-1, cut-2...cut-n ]
**If you want Quantile-binning.**
Despite the above warnings, your use case may require. qantile binning.
Quantile based binning is a faily good strategy to use for adaptive binning.
Quantiles are specific values or cut-points which partition
the continuous valued distribution of a feature into
discrete contiguous bins or intervals. Thus, q-Quantiles
partition a numeric attribute into q equal (percetage-width) partitions.
Well-known examples of quantiles include the 2-Quantile ,median,
divides the data distribution into two equal (percetage-width) bins, 4-Quantiles,
,standard quartiles, 4 equal bins (percetage-width) and 10-Quantiles,
deciles, 10 equal width (percetage-width) bins.
**You should maybe looking for outliers AFTER applying a Gaussian transformation.**
"""
_fun_name = toContinuousCategory.__name__
# todo put in decorator
if inplace:
X = oX
else:
X = oX.copy()
validate_bool_kwarg(int_, "int_")
validate_bool_kwarg(float_, "float_")
# handles float, continuous integer. set integer=False if not contunuous
# any other dataframe value type left as is.
if features == []:
features = X.columns
for nth, feature in enumerate(features):
if (float_ and X[feature].dtype == float) or (int_ and X[feature].dtype == int):
nbin = _must_be_list_tuple_int(nbin)
# import pdb; pdb.set_trace() # debugging starts here
if quantile:
# quantile is similar to min-max scaling: v/(maxy-miny)
# works on any any scale
X[feature + "q"] = pd.qcut(X[feature], nbin, duplicates="drop")
else:
# fixed-width bin, only works, WITHOUT SCALING, with datasets with multiple features
# for tree-based models such as CART, random forest, xgboost, lightgbm,
X[feature + "fw"] = pd.cut(X[feature], nbin, duplicates="drop")
# drop feature, if a list and its short, then their is an error.
# no drop for integer=False or float_=False
if drop:
X.drop(features, axis=1, inplace=True)
if verbose:
logger.info("{} features:: {}".format(_fun_name, features))
return X | 5,357,296 |
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'fibonacci_calculator.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output | 5,357,297 |
def task_migrate():
"""Create django databases"""
return {
'actions': ['''cd CCwebsite && python3 manage.py migrate''']
} | 5,357,298 |
def distance_to_arc(alon, alat, aazimuth, plons, plats):
"""
Calculate a closest distance between a great circle arc and a point
(or a collection of points).
:param float alon, alat:
Arc reference point longitude and latitude, in decimal degrees.
:param azimuth:
Arc azimuth (an angle between direction to a north and arc in clockwise
direction), measured in a reference point, in decimal degrees.
:param float plons, plats:
Longitudes and latitudes of points to measure distance. Either scalar
values or numpy arrays of decimal degrees.
:returns:
Distance in km, a scalar value or numpy array depending on ``plons``
and ``plats``. A distance is negative if the target point lies on the
right hand side of the arc.
Solves a spherical triangle formed by reference point, target point and
a projection of target point to a reference great circle arc.
"""
azimuth_to_target = azimuth(alon, alat, plons, plats)
distance_to_target = geodetic_distance(alon, alat, plons, plats)
# find an angle between an arc and a great circle arc connecting
# arc's reference point and a target point
t_angle = (azimuth_to_target - aazimuth + 360) % 360
# in a spherical right triangle cosine of the angle of a cathetus
# augmented to pi/2 is equal to sine of an opposite angle times
# sine of hypotenuse, see
# http://en.wikipedia.org/wiki/Spherical_trigonometry#Napier.27s_Pentagon
angle = numpy.arccos(
(numpy.sin(numpy.radians(t_angle))
* numpy.sin(distance_to_target / EARTH_RADIUS))
)
return (numpy.pi / 2 - angle) * EARTH_RADIUS | 5,357,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.