content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_to_scio_submit_post_data():
"""Test that the maps to send to scio is on the correct form"""
tests = [(b'\x00\x00', 'test.txt', 'AAA='),
(b'test', 'something.bin', 'dGVzdA=='),
(b'\x00\xFF\x0A', 'thisisafile.pck', 'AP8K')]
for byte_content, file_name, encoded_content in tests:
file_like = io.BytesIO(byte_content)
my_map = upload.to_scio_submit_post_data(file_like, file_name)
assert my_map == {'content': encoded_content, 'filename': file_name} | 5,356,000 |
def fix_unused(model, signal):
"""Unused states decided MAP or viterbi usage"""
# model.algorithm = 'map'
# pred = model.predict(signal)
# usage = np.bincount(pred,minlength=model.n_components)
# treshold = np.sort(usage)[model.n_components//10]
#
# ids = np.argwhere(usage <= treshold).flatten()
# used = np.argwhere(usage > treshold).flatten()
# probs = usage/float(sum(usage))
"""Unused states decided on average state probability"""
logprob, posterior = model.score_samples(signal)
usage = np.sum(posterior.T,axis=1)
treshold = np.sort(usage)[model.n_components//10]
ids = np.argwhere(usage <= treshold).flatten()
used = np.argwhere(usage > treshold).flatten()
probs = usage/float(sum(usage))
ids = np.argwhere(probs <= 0.001).flatten()
used = np.argwhere(usage > 0.001).flatten()
mapped = {}
# model.algorithm = 'map'
import random
import sklearn.mixture
ids = ids[0:len(used)]
# ids = ids[0:model.n_components//10]
for id in ids:
# replace_id = np.random.choice(used)
# randomly select node to clone according to its "information weight"
# replace_id = np.random.choice(model.n_components,p=probs)
replace_id = random.choices(range(model.n_components),weights=probs)[0]
mapped[id] = [replace_id, int(probs[id]*1000)/1000, int(probs[replace_id]*1000)/1000, int(model.transmat_[replace_id,replace_id]*1000)/1000]
# if (np.sum(model.transmat_[:,replace_id])) > 3):
# unroll thight self loop
if model.transmat_[replace_id,replace_id] > 0.1:
# can clone this state any more
probs[replace_id] = 0
probs[id] = probs[replace_id]
mapped[id].append('s')
in_trans = model.transmat_[:,id].copy()
model.transmat_[id,:] = model.transmat_[replace_id,:]
model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]
model.transmat_[id,id] += model.transmat_[replace_id,replace_id]
model.transmat_[replace_id,replace_id] = 2e-290
# staing in giver state is forbidden
# in place of that transit to cloned state
# model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]
# model.transmat_[replace_id,replace_id] = 0.0001
utils.normalize(model.transmat_, 1)
model.startprob_[replace_id] /= 2.
model.startprob_[id] += model.startprob_[replace_id]
model.means_[id] = model.means_[replace_id]
# diverge them slighly to cover more ground
# model.means_[replace_id] *= 1.001
model._covars_[id] = model._covars_[replace_id]
#TODO: unroll longer loops
#refit to general node
# to many ins, to many out, to large emission - coverage
elif random.random() > 0.5:
# lower prob of used node
# allow cloning of both
probs[replace_id] //= 2
probs[id] = probs[replace_id]
size = model.n_components
ord = np.random.binomial(1,0.5,model.n_components)
nord = 1 - ord
mapped[id].append('i')
in_trans = model.transmat_[:,id].copy()
# clone the not used node
# out transitions (row) like in original
model.transmat_[id,:] = model.transmat_[replace_id,:]
# in trasitions (column) half for each of two (original and clone)
model.transmat_[:,id][ord == 1] = model.transmat_[:,replace_id][ord == 1]
model.transmat_[:,id][ord == 0] = 2e-290
model.transmat_[:,replace_id][ord == 1] = 2e-290
# original trans should be small, add to them to keep row normalization to 1
utils.normalize(model.transmat_, 1)
model.startprob_[replace_id] /= 2.
model.startprob_[id] += model.startprob_[replace_id]
model.means_[id] = model.means_[replace_id]
model._covars_[id] = model._covars_[replace_id]
else:
# lower prob of used node
# allow cloning of both
probs[replace_id] //= 2
probs[id] = probs[replace_id]
size = model.n_components
ord = np.random.binomial(1,0.5,model.n_components)
nord = 1 - ord
mapped[id].append('o')
in_trans = model.transmat_[:,id].copy()
# clone the not used node
# out transitions (row) like in original
model.transmat_[id,:][ord == 1] = model.transmat_[replace_id,:][ord == 1]
model.transmat_[id,:][ord == 0] = 2e-290
model.transmat_[replace_id,:][ord == 1] = 2e-290
# in trasitions (column) half for each of two (original and clone)
model.transmat_[:,replace_id] /= 2.
model.transmat_[:,id] = in_trans/2. + model.transmat_[:,replace_id]
# model.transmat_[:,replace_id] += in_trans/2.
# original trans should be small, add to them to keep row normalization to 1
utils.normalize(model.transmat_, 1)
model.startprob_[replace_id] /= 2.
model.startprob_[id] += model.startprob_[replace_id]
model.means_[id] = model.means_[replace_id]
model._covars_[id] = model._covars_[replace_id]
print("fixed {} nodes of used {} and unused {}, with map {}".format(len(ids), len(used), model.n_components - len(used), mapped)) | 5,356,001 |
def search_transitions_in_freq_range(freq_min, freq_max, atomic_number,
atomic_mass, n_min=1, n_max=1000,
dn_min=1, dn_max=10, z=0.0,
screening=False, extendsearch=None):
"""
---------------------------------------------------------------------------
Search for electronic transitions of recombination lines at a specified
redshift that lie within the specified frequency range
Inputs:
freq_min [scalar] Minimum in the frequency range (Hz)
freq_max [scalar] Maximum in the frequency range (Hz)
atomic_number [integer] Atomic number of the atom. It is equal to the
number of protons in the nucleus. Must be positive and
greater than or equal to unity.
atomic_mass [integer] Atomic mass of the atom. It is equal to the sum
of the number of protons and neutrons in the nucleus. Must
be positive and greater than or equal to unity.
n_min [scalar] Minimum in the range of principal quantum numbers
of lower electron orbit to search for transitions.
Must be positive and greater than or equal to unity unity.
n_max [scalar] Maximum in the range of principal quantum numbers
of lower electron orbit to search for transitions.
Must be positive and greater than or equal to unity unity.
dn_min [scalar] Minimum in the range of difference in principal
quantum numbers search for transitions. Must be positive
and greater than or equal to unity unity.
dn_max [scalar] Maximum in the range of difference in principal
quantum numbers search for transitions. Must be positive
and greater than or equal to unity unity.
z [scalar or numpy array] The redshift (when positive) or
blueshift (when negative) by which the recombination lines
are shifted. Default=0
screening [boolean] If set to False (default), assume the effective
charge is equal to the number of protons. If set to True,
assume the charges from the nucleus are screened and the
effecctive nuclear charge is equal to unity.
extendsearch [None or dictionary] Specifies if the search should be
extended beyond the ranges for n and dn by calling this
function recursively. If set to None (default), the search
will not be extended. Otherwise, search will extend along n
and/or dn if in-range frequencies are found at the
specified boundaries of n and dn. This parameter must be
specified as a dictionary with the following keys and
values:
'n' [None or list] If set to None, do not extend search
for more values of n. Otherwise it must be a list
containing one or both of the strings 'up' and
'down'. If 'up' is present, extend search for
higher values of n from the previous iteration. If
'down' is present in the list, extend search for
values of n lower than specified in the range in
previous iteration.
'dn' [None or list] If set to None, do not extend search
for more values of dn. Otherwise it must be a list
containing one or both of the strings 'up' and
'down'. If 'up' is present, extend search for
higher values of dn from the previous iteration. If
'down' is present in the list, extend search for
values of dn lower than specified in the range in
previous iteration.
Output:
Tuple of (n, dn, freq) where each of the elements in the tuple is an array
such that the transitions of combinations of n and dn produces
recombination lines for a given redshift in the specified frequency range.
freq will be returned as an instance of class astropy.units.Quantity
---------------------------------------------------------------------------
"""
try:
freq_min, freq_max, atomic_number, atomic_mass
except NameError:
raise NameError('Inputs freq_min, freq_max, atomic_number, atomic_mass must be specified')
if not isinstance(n_min, int):
raise TypeError('Input n_min must be an integer')
if n_min < 1:
raise ValueError('Input n_min must be greater than 1')
if not isinstance(n_max, int):
raise TypeError('Input n_max must be an integer')
if n_max < n_min:
raise ValueError('Input n_max must be greater than n_min')
if not isinstance(dn_min, int):
raise TypeError('Input dn_min must be an integer')
if dn_min < 1:
raise ValueError('Input dn_min must be greater than 1')
if not isinstance(dn_max, int):
raise TypeError('Input dn_max must be an integer')
if dn_max < dn_min:
raise ValueError('Input dn_max must be greater than dn_min')
if not isinstance(z, (int,float)):
if isinstance(z, NP.ndarray):
if z.size != 1:
raise TypeError('Input z must be a scalar')
else:
raise TypeError('Input z must be a scalar')
if not isinstance(freq_min, (int,float,units.Quantity)):
raise TypeError('Input freq_min must be a scalar')
if not isinstance(freq_min, units.Quantity):
freq_min = freq_min * units.Hertz
if freq_min <= 0.0 * units.Hertz:
raise ValueError('Input freq_min must be positive')
if not isinstance(freq_max, (int,float,units.Quantity)):
raise TypeError('Input freq_max must be a scalar')
if not isinstance(freq_max, units.Quantity):
freq_max = freq_max * units.Hertz
if freq_max <= freq_min:
raise ValueError('Input freq_max must be greater than freq_min')
if extendsearch is not None:
if not isinstance(extendsearch, dict):
raise TypeError('Input extendsearch must be a dictionary')
for key in extendsearch:
if extendsearch[key] is not None:
if not isinstance(extendsearch[key], list):
raise TypeError('Value under key {0} of input dictionary extendsearch must be a list'.format(key))
nvect = NP.arange(n_min, n_max+1)
dnvect = NP.arange(dn_min, dn_max+1)
ngrid, dngrid = NP.meshgrid(nvect, dnvect, indexing='ij')
nu = redshifted_freq_recomb(atomic_number, atomic_mass, ngrid.reshape(-1), dngrid.reshape(-1), z=z, screening=screening)
nu = nu.reshape(nvect.size, dnvect.size, -1)
ind_select = NP.where(NP.logical_and(nu >= freq_min, nu <= freq_max))
nu_select = nu[ind_select]
n_select = ngrid[:,:,NP.newaxis][ind_select]
dn_select = dngrid[:,:,NP.newaxis][ind_select]
nu_in_range = None
n_in_range = None
dn_in_range = None
if nu_select.size > 0:
if nu_in_range is not None:
nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, nu_select.value)), nu_select.unit)
n_in_range = NP.concatenate((n_in_range, n_select))
dn_in_range = NP.concatenate((dn_in_range, dn_select))
else:
nu_in_range = nu_select.copy()
n_in_range = NP.copy(n_select)
dn_in_range = NP.copy(dn_select)
if extendsearch is not None:
new_extendsearch = None
for key in extendsearch:
if extendsearch[key] is not None:
if key == 'n':
if n_select.max() == n_max:
if 'up' in extendsearch[key]:
new_n_min = n_max + 1
new_n_max = 2 * n_max + 1 - n_min
if new_extendsearch is None:
new_extendsearch = {key: ['up']}
elif key not in new_extendsearch:
new_extendsearch[key] = ['up']
else:
new_extendsearch[key] += ['up']
new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=new_n_min, n_max=new_n_max, dn_min=dn_min, dn_max=dn_max, z=z, screening=screening, extendsearch=new_extendsearch)
if new_nu_select.size > 0:
if nu_in_range is not None:
nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, new_nu_select.value)), new_nu_select.unit)
n_in_range = NP.concatenate((n_in_range, new_n_select))
dn_in_range = NP.concatenate((dn_in_range, new_dn_select))
else:
nu_in_range = new_nu_select.copy()
n_in_range = NP.copy(new_n_select)
dn_in_range = NP.copy(new_dn_select)
if n_select.min() == n_min:
if 'down' in extendsearch[key]:
if n_min > 1:
new_n_min = max([1, 2*n_min - n_max - 1])
new_n_max = n_max - 1
if new_extendsearch is None:
new_extendsearch = {key: ['down']}
elif key not in new_extendsearch:
new_extendsearch[key] = ['down']
else:
new_extendsearch[key] += ['down']
new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=new_n_min, n_max=new_n_max, dn_min=dn_min, dn_max=dn_max, z=z, screening=screening, extendsearch=new_extendsearch)
if new_nu_select.size > 0:
if nu_in_range is not None:
nu_in_range = units.Quantity(NP.concatenate((new_nu_select.value, nu_in_range.value)), new_nu_select.unit)
n_in_range = NP.concatenate((new_n_select, n_in_range))
dn_in_range = NP.concatenate((new_dn_select, dn_in_range))
else:
nu_in_range = new_nu_select.copy()
n_in_range = NP.copy(new_n_select)
dn_in_range = NP.copy(new_dn_select)
if key == 'dn':
if dn_select.max() == dn_max:
if 'up' in extendsearch[key]:
new_dn_min = dn_max + 1
new_dn_max = 2 * dn_max + 1 - dn_min
if new_extendsearch is None:
new_extendsearch = {key: ['up']}
elif key not in new_extendsearch:
new_extendsearch[key] = ['up']
else:
new_extendsearch[key] += ['up']
new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=n_min, n_max=n_max, dn_min=new_dn_min, dn_max=new_dn_max, z=z, screening=screening, extendsearch=new_extendsearch)
if new_nu_select.size > 0:
if nu_in_range is not None:
nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, new_nu_select.value)), new_nu_select.unit)
n_in_range = NP.concatenate((n_in_range, new_n_select))
dn_in_range = NP.concatenate((dn_in_range, new_dn_select))
else:
nu_in_range = new_nu_select.copy()
n_in_range = NP.copy(new_n_select)
dn_in_range = NP.copy(new_dn_select)
if dn_select.min() == dn_min:
if 'down' in extendsearch[key]:
if dn_min > 1:
new_dn_min = max([1, 2*dn_min - dn_max - 1])
new_dn_max = dn_max - 1
if new_extendsearch is None:
new_extendsearch = {key: ['down']}
elif key not in new_extendsearch:
new_extendsearch[key] = ['down']
else:
new_extendsearch[key] += ['down']
new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=n_min, n_max=n_max, dn_min=new_dn_min, dn_max=new_dn_max, z=z, screening=screening, extendsearch=new_extendsearch)
if new_nu_select.size > 0:
if nu_in_range is not None:
nu_in_range = units.Quantity(NP.concatenate((new_nu_select.value, nu_in_range.value)), new_nu_select.unit)
n_in_range = NP.concatenate((new_n_select, n_in_range))
dn_in_range = NP.concatenate((new_dn_select, dn_in_range))
else:
nu_in_range = new_nu_select.copy()
n_in_range = NP.copy(new_n_select)
dn_in_range = NP.copy(new_dn_select)
return (n_in_range, dn_in_range, nu_in_range) | 5,356,002 |
async def test_async__rollback():
"""Should rollback basic async actions"""
state = {"counter": 0}
async def incr():
state["counter"] += 1
return state["counter"]
async def decr():
state["counter"] -= 1
async def fail():
raise ValueError("oops")
try:
with Saga() as saga:
counter = await saga.action(incr, decr)
assert counter == 1
counter = await saga.action(incr, decr)
assert counter == 2
await saga.action(fail, noop)
except SagaFailed as e:
assert state["counter"] == 0
assert e.transaction.name == "3"
assert e.__cause__.args == ("oops",) | 5,356,003 |
def _solve_checkpoint_challenge(_bot):
"""Solve the annoying checkpoint_challenge"""
# --- Start challenge
time.sleep(3)
challenge_url = _bot.last_json['challenge']['api_path'][1:]
try:
_bot.send_request(
challenge_url, None, login=True, with_signature=False)
except Exception as e:
_bot.logger.error(e)
return False
# --- Choose and send back the choice
# TODO: Sometimes ask to confirm phone or email.
# TODO: TESTS NEEDED
time.sleep(3)
choices = _get_challenge_choices(_bot.last_json)
for choice in choices:
print(choice)
code = input('Insert choice:\n')
data = json.dumps({'choice': code})
try:
_bot.send_request(challenge_url, data, login=True)
except Exception as e:
_bot.logger.error(e)
return False
# Print output for testing
_print_bot_last_state(_bot)
# --- Wait for the code, insert the code
time.sleep(3)
print("A code has been sent to the method selected, please check.")
code = input('Insert code:\n')
data = json.dumps({'security_code': code})
try:
_bot.send_request(challenge_url, data, login=True)
except Exception as e:
_bot.logger.error(e)
return False
# Print output for testing
_print_bot_last_state(_bot)
# --- If user logged in, save cookie, otherwise PASS
worked = (
('logged_in_user' in _bot.last_json)
and (_bot.last_json.get('action', '') == 'close')
and (_bot.last_json.get('status', '') == 'ok'))
if worked:
# IMPORTANT, save the cookie at this step!
_bot.save_cookie(COOKIE_FNAME)
return True
else:
_bot.logger.error('Not possible to log in. Reset and try again')
return False | 5,356,004 |
def write_to_csv(csv_name, df, append=True, index=True, sep=';'):
"""Create CSV file or append data to it.
Parameters
----------
csv_name : str
Name of file.
df : DataFrame
Sata saved to file.
append : bool
If False create a new CSV file (default), else append to it.
index : bool
If False do not write index into CSV file
sep : str
seperator to be used while writing csv. Semicolon ';' is standard
"""
#if os.path.exists(os.path.dirname(csv_name)):
# os.remove(os.path.dirname(csv_name))
if append:
mode = 'a'
else:
mode = 'w'
if not os.path.exists(os.path.dirname(csv_name)):
os.makedirs(os.path.dirname(csv_name))
with open(csv_name, mode=mode, encoding='utf-8') as file:
df.to_csv(file, sep=sep,
mode=mode,
header=file.tell() == 0,
line_terminator='\n',
encoding='utf-8',
index=index
)
log.info(f'Write data to file: {csv_name} with append-mode={append}') | 5,356,005 |
def simplify(func, cfg):
"""
Simplify control flow. Merge consecutive blocks where the parent has one
child, the child one parent, and both have compatible instruction leaders.
"""
for block in reversed(list(func.blocks)):
if len(cfg.predecessors(block)) == 1 and not list(block.leaders):
[pred] = cfg.predecessors(block)
exc_block = any(op.opcode in ('exc_setup',) for op in pred.leaders)
if not exc_block and len(cfg[pred]) == 1:
merge_blocks(func, pred, block) | 5,356,006 |
def is_referenced(url, id, catalog_info):
"""Given the url of a resource from the catalog, this function returns True
if the resource is referenced by data.gouv.fr
False otherwise
:param :url: url of a resource in the catalog
:type :url: string"""
dgf_page = catalog_info['url_dgf']
headers = requests.head(url).headers
downloadable = 'attachment' in headers.get('Content-Disposition', '')
if not downloadable:
raise Exception(f'This id is associated to a dataset not referenced by data.gouv.fr. \n '
f'Please download the dataset from here: {dgf_page}\n'
f'Then manually upload it in the corresponding folder and name it: {id}.csv')
return downloadable | 5,356,007 |
def tags_to_ascii(filepaths):
"""
Receives a list of mp3 files (by their paths) and updates all their tags, so
that name of the Artists, Album, Genre, etc. use only ASCII characters.
It also removes some unnecessary information like composer or comments. This is
a program to automate the proccess to add music to my Garmin Watch, so I
don't need a whole lot of details about the track.
"""
# Iterate though all the music files and update their tags.
for filepath in filepaths:
audiofile = eyed3.load(filepath)
songtag = audiofile.tag
songtag.title = unidecode(songtag.title)
songtag.artist = unidecode(songtag.artist)
songtag.album = unidecode(songtag.album)
songtag.album_artist = unidecode(songtag.album_artist)
songtag.comments.set("Music for Gelin's Garmin Watch.")
songtag.save() | 5,356,008 |
def create_sema3d_datasets(args, test_seed_offset=0):
""" Gets training and test datasets. """
train_names = ['bildstein_station1', 'bildstein_station5', 'domfountain_station1', 'domfountain_station3', 'neugasse_station1', 'sg27_station1', 'sg27_station2', 'sg27_station5', 'sg27_station9', 'sg28_station4', 'untermaederbrunnen_station1']
valid_names = ['bildstein_station3', 'domfountain_station2', 'sg27_station4', 'untermaederbrunnen_station3']
#train_names = ['bildstein_station1', 'domfountain_station1', 'untermaederbrunnen_station1']
#valid_names = ['domfountain_station2', 'untermaederbrunnen_station3']
path = '{}/features_supervision/'.format(args.ROOT_PATH)
if args.db_train_name == 'train':
trainlist = [path + 'train/' + f + '.h5' for f in train_names]
elif args.db_train_name == 'trainval':
trainlist = [path + 'train/' + f + '.h5' for f in train_names + valid_names]
testlist = []
if 'train' in args.db_test_name:
testlist += [path + 'train/' + f + '.h5' for f in train_names]
if 'val' in args.db_test_name:
testlist += [path + 'train/' + f + '.h5' for f in valid_names]
if 'testred' in args.db_test_name:
testlist += [f for f in glob.glob(path + 'test_reduced/*.h5')]
if 'testfull' in args.db_test_name:
testlist += [f for f in glob.glob(path + 'test_full/*.h5')]
return tnt.dataset.ListDataset(trainlist,
functools.partial(graph_loader, train=True, args=args, db_path=args.ROOT_PATH)), \
tnt.dataset.ListDataset(testlist,
functools.partial(graph_loader, train=False, args=args, db_path=args.ROOT_PATH, full_cpu = True)) | 5,356,009 |
def adjust_learning_rate(optimizer, step, args):
"""
Sets the learning rate to the initial LR decayed by gamma
at every specified step/epoch
Adapted from PyTorch Imagenet example:
https://github.com/pytorch/examples/blob/master/imagenet/main.py
step could also be epoch
"""
schedule_list = np.array(args.schedule)
decay = args.gamma ** (sum(step >= schedule_list))
lr = args.lr * decay
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr | 5,356,010 |
def sigmoid(z):
"""sigmoid函数
"""
return 1.0/(1.0+np.exp(-z)) | 5,356,011 |
def worker_init_fn(worker_id):
"""Pytorch worker initialization function."""
np.random.seed(np.random.get_state()[1][0] + worker_id) | 5,356,012 |
def main(parseinfo):
"""Start a parser"""
parseengine = LiveParser()
parseengine.parse_file(parseinfo) | 5,356,013 |
def export_fixtures(app=None):
"""Export fixtures as JSON to `[app]/fixtures`"""
if app:
apps = [app]
else:
apps = frappe.get_installed_apps()
for app in apps:
for fixture in frappe.get_hooks("fixtures", app_name=app):
filters = None
or_filters = None
if isinstance(fixture, dict):
filters = fixture.get("filters")
or_filters = fixture.get("or_filters")
fixture = fixture.get("doctype") or fixture.get("dt")
print("Exporting {0} app {1} filters {2}".format(fixture, app, (filters if filters else or_filters)))
if not os.path.exists(frappe.get_app_path(app, "fixtures")):
os.mkdir(frappe.get_app_path(app, "fixtures"))
export_json(fixture, frappe.get_app_path(app, "fixtures", frappe.scrub(fixture) + ".json"),
filters=filters, or_filters=or_filters, order_by="idx asc, creation asc") | 5,356,014 |
def beamformerFreq(steerVecType, boolRemovedDiagOfCSM, normFactor, inputTupleSteer, inputTupleCsm):
""" Conventional beamformer in frequency domain. Use either a predefined
steering vector formulation (see Sarradj 2012) or pass your own
steering vector.
Parameters
----------
steerVecType : (one of the following strings: 'classic' (I), 'inverse' (II), 'true level' (III), 'true location' (IV), 'custom')
Either build the steering vector via the predefined formulations
I - IV (see :ref:`Sarradj, 2012<Sarradj2012>`) or pass it directly.
boolRemovedDiagOfCSM : bool
Should the diagonal of the csm be removed?
normFactor : float
In here both the signalenergy loss factor (due to removal of the csm diagonal) as well as
beamforming algorithm (music, capon, ...) dependent normalization factors are handled.
inputTupleSteer : contains the information needed to create the steering vector. Is dependent of steerVecType. There are 2 cases:
steerVecType != 'custom' :
inputTupleSteer = (distGridToArrayCenter, distGridToAllMics, waveNumber) , with
distGridToArrayCenter : float64[nGridpoints]
Distance of all gridpoints to the center of sensor array
distGridToAllMics : float64[nGridpoints, nMics]
Distance of all gridpoints to all sensors of array
waveNumber : float64
The wave number
steerVecType == 'custom' :
inputTupleSteer = steeringVector , with
steeringVector : complex128[nGridPoints, nMics]
The steering vector of each gridpoint for the same frequency as the CSM
inputTupleCsm : contains the data of measurement as a tuple. There are 2 cases:
perform standard CSM-beamformer:
inputTupleCsm = csm
csm : complex128[ nMics, nMics]
The cross spectral matrix for one frequency
perform beamformer on eigenvalue decomposition of csm:
inputTupleCsm = (eigValues, eigVectors) , with
eigValues : float64[nEV]
nEV is the number of eigenvalues which should be taken into account.
All passed eigenvalues will be evaluated.
eigVectors : complex128[nMics, nEV]
Eigen vectors corresponding to eigValues. All passed eigenvector slices will be evaluated.
Returns
-------
*Autopower spectrum beamforming map [nGridPoints]
*steer normalization factor [nGridPoints]... contains the values the autopower needs to be multiplied with, in order to
fullfill 'steer^H * steer = 1' as needed for functional beamforming.
Some Notes on the optimization of all subroutines
-------------------------------------------------
Reducing beamforming equation:
Let the csm be C and the steering vector be h, than, using Linear Albegra, the conventional beamformer can be written as
.. math:: B = h^H \\cdot C \\cdot h,
with ^H meaning the complex conjugated transpose.
When using that C is a hermitian matrix one can reduce the equation to
.. math:: B = h^H \\cdot C_D \\cdot h + 2 \\cdot Real(h^H \\cdot C_U \\cdot h),
where C_D and C_U are the diagonal part and upper part of C respectively.
Steering vector:
Theoretically the steering vector always includes the term "exp(distMicsGrid - distArrayCenterGrid)",
but as the steering vector gets multplied with its complex conjugation in all beamformer routines,
the constant "distArrayCenterGrid" cancels out --> In order to save operations, it is not implemented.
Spectral decomposition of the CSM:
In Linear Algebra the spectral decomposition of the CSM matrix would be:
.. math:: CSM = \\sum_{i=1}^{nEigenvalues} \\lambda_i (v_i \\cdot v_i^H) ,
where lambda_i is the i-th eigenvalue and
v_i is the eigenvector[nEigVal,1] belonging to lambda_i and ^H denotes the complex conjug transpose.
Using this, one must not build the whole CSM (which would be time consuming), but can drag the
steering vector into the sum of the spectral decomp. This saves a lot of operations.
Squares:
Seemingly "a * a" is slightly faster than "a**2" in numba
Square of abs():
Even though "a.real**2 + a.imag**2" would have fewer operations, modern processors seem to be optimized
for "a * a.conj" and are slightly faster the latter way. Both Versions are much faster than "abs(a)**2".
Using Cascading Sums:
When using the Spectral-Decomposition-Beamformer one could use numpys cascading sums for the scalar product
"eigenVec.conj * steeringVector". BUT (at the moment) this only brings benefits in comp-time for a very
small range of nMics (approx 250) --> Therefor it is not implemented here.
"""
boolIsEigValProb = isinstance(inputTupleCsm, tuple)# len(inputTupleCsm) > 1
# get the beamformer type (key-tuple = (isEigValProblem, formulationOfSteeringVector, RemovalOfCSMDiag))
beamformerDict = {(False, 'classic', False) : _freqBeamformer_Formulation1AkaClassic_FullCSM,
(False, 'classic', True) : _freqBeamformer_Formulation1AkaClassic_CsmRemovedDiag,
(False, 'inverse', False) : _freqBeamformer_Formulation2AkaInverse_FullCSM,
(False, 'inverse', True) : _freqBeamformer_Formulation2AkaInverse_CsmRemovedDiag,
(False, 'true level', False) : _freqBeamformer_Formulation3AkaTrueLevel_FullCSM,
(False, 'true level', True) : _freqBeamformer_Formulation3AkaTrueLevel_CsmRemovedDiag,
(False, 'true location', False) : _freqBeamformer_Formulation4AkaTrueLocation_FullCSM,
(False, 'true location', True) : _freqBeamformer_Formulation4AkaTrueLocation_CsmRemovedDiag,
(False, 'custom', False) : _freqBeamformer_SpecificSteerVec_FullCSM,
(False, 'custom', True) : _freqBeamformer_SpecificSteerVec_CsmRemovedDiag,
(True, 'classic', False) : _freqBeamformer_EigValProb_Formulation1AkaClassic_FullCSM,
(True, 'classic', True) : _freqBeamformer_EigValProb_Formulation1AkaClassic_CsmRemovedDiag,
(True, 'inverse', False) : _freqBeamformer_EigValProb_Formulation2AkaInverse_FullCSM,
(True, 'inverse', True) : _freqBeamformer_EigValProb_Formulation2AkaInverse_CsmRemovedDiag,
(True, 'true level', False) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_FullCSM,
(True, 'true level', True) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_CsmRemovedDiag,
(True, 'true location', False) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_FullCSM,
(True, 'true location', True) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_CsmRemovedDiag,
(True, 'custom', False) : _freqBeamformer_EigValProb_SpecificSteerVec_FullCSM,
(True, 'custom', True) : _freqBeamformer_EigValProb_SpecificSteerVec_CsmRemovedDiag}
coreFunc = beamformerDict[(boolIsEigValProb, steerVecType, boolRemovedDiagOfCSM)]
# prepare Input
if steerVecType == 'custom': # beamformer with custom steering vector
steerVec = inputTupleSteer
#nFreqs, nGridPoints = steerVec.shape[0], steerVec.shape[1]
nGridPoints = steerVec.shape[0]
else: # predefined beamformers (Formulation I - IV)
distGridToArrayCenter, distGridToAllMics, waveNumber = inputTupleSteer#[0], inputTupleSteer[1], inputTupleSteer[2]
if not isinstance(waveNumber, np.ndarray): waveNumber = np.array([waveNumber])
#nFreqs, nGridPoints = waveNumber.shape[0], distGridToAllMics.shape[0]
nGridPoints = distGridToAllMics.shape[0]
if boolIsEigValProb:
eigVal, eigVec = inputTupleCsm#[0], inputTupleCsm[1]
else:
csm = inputTupleCsm
# beamformer routine: parallelized over Gridpoints
beamformOutput = np.zeros(nGridPoints, np.float64)
steerNormalizeOutput = np.zeros_like(beamformOutput)
result = np.zeros(nGridPoints, np.float64)
normalHelp = np.zeros_like(result)
if steerVecType == 'custom': # beamformer with custom steering vector
if boolIsEigValProb:
coreFunc(eigVal, eigVec, steerVec, normFactor, result, normalHelp)
else:
coreFunc(csm, steerVec, normFactor, result, normalHelp)
else: # predefined beamformers (Formulation I - IV)
if boolIsEigValProb:
coreFunc(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp)
else:
coreFunc(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp)
beamformOutput = result
steerNormalizeOutput = normalHelp
return beamformOutput, steerNormalizeOutput | 5,356,015 |
def createNewPY():
"""trans normal pinyin to TTS pinyin"""
py_trans = {}
input_pinyin_list = IO.readList(r'docs/transTTSPinyin.txt')
for line in input_pinyin_list:
line_array = line.split(',')
py_trans[line_array[0]] = line_array[1]
return py_trans | 5,356,016 |
def state_processing_do(app, cfg):
"""Blink smile display
"""
app.pimoroni_11x7.blink_scroll_display(interval=0.5) | 5,356,017 |
def search_wheelmap (lat, lng, interval, name, n):
"""Searches for a place which matches the given name in the
given coordinates range. Returns false if nothing found"""
# Calculate the bbox for the API call
from_lat = lat - interval
to_lat = lat + interval
from_lng = lng - interval
to_lng = lng + interval
# Remove parentheses (better for search, generally)
name = re.sub(r'\([^)]*\)', '', name)
wheelmap_client = wheelmap.Wheelmap(env['WHEELMAP_API_KEY'])
bbox= (from_lng, from_lat, to_lng, to_lat)
nodes = wheelmap_client.nodes_collection(bbox=bbox, per_page=n)
# max_node and max_name_match are holding the
# best match through the SequenceMatcher after the loop
max_name_match = 0.0
for node in nodes:
if node.name and name:
name_match = SequenceMatcher(None, node.name, name).ratio()
if name_match > max_name_match:
max_node = node
max_name_match = name_match
# Is the best match better than 60% ?
# If yes, let's take it. Otherwise nothing was found.
if max_name_match > 0.6:
return max_node
else:
return False | 5,356,018 |
def get_entity_contents(entity: Dict) -> Dict:
"""
:param entity: Entity is a dictionary
:return: A dict representation of the contents of entity
"""
return {
'ID': entity.get('id'),
'Name': entity.get('name'),
'EmailAddress': entity.get('email_address'),
'Organization': entity.get('organization'),
'Tags': entity.get('labels'),
'StrictNameMatching': entity.get('strict_name_matching'),
'PolicyID': entity.get('policy_id'),
'Profile': entity.get('profile'),
'EntityGroupID': entity.get('entity_group', {}).get('id') if entity.get('entity_group') else None,
'EntityGroupName': entity.get('entity_group', {}).get('name') if entity.get('entity_group') else None,
'TypeID': entity.get('type', {}).get('id') if entity.get('type') else None,
'TypeName': entity.get('type', {}).get('name') if entity.get('type') else None
} | 5,356,019 |
def t68tot90(t68):
"""Convert from IPTS-68 to ITS-90 temperature scales,
as specified in the CF Standard Name information for
sea_water_temperature
http://cfconventions.org/Data/cf-standard-names/27/build/cf-standard-name-table.html
temperatures are in degrees C"""
t90 = 0.99976 * t68
return t90 | 5,356,020 |
def get_sort_accuracy_together(fake_ys, y):
"""
Args:
fake_ys (np.ndarray): with shape (n_results, n_sample,).
y (np.ndarray): with sample (n_sample,).
Returns:
corr (np.ndarray): with shape (n_result,)
"""
y_sort = np.sort(y)
y_sort2 = np.sort(y)[::-1]
fake_ys = np.nan_to_num(fake_ys, nan=np.nan, posinf=np.nan, neginf=np.nan)
mark = np.any(np.isnan(fake_ys), axis=1)
fake_ys = np.nan_to_num(fake_ys, nan=-1, posinf=-1, neginf=-1)
index = np.argsort(fake_ys, axis=1)
y_pre_sort = y[index]
acc1 = 1 - np.mean(np.abs(y_pre_sort - y_sort), axis=1)
acc2 = 1 - np.mean(np.abs(y_pre_sort - y_sort2), axis=1)
score = np.max(np.concatenate((acc1.reshape(1, -1), acc2.reshape(1, -1)), axis=0), axis=0)
score[mark] = 0.0
return score | 5,356,021 |
def random_account_number():
"""
Generate random encoded account number for testing
"""
_, account_number = create_account()
return encode_verify_key(verify_key=account_number) | 5,356,022 |
def get_recommendation_summary_of_projects(project_ids, state, credentials):
"""Returns the summary of recommendations on all the given projects.
Args:
project_ids: List(str) project to which recommendation is needed.
state: state of recommendations
credentials: client credentials.
"""
recommender = build("recommender",
"v1",
credentials=credentials,
cache_discovery=False)
def get_metric(project_id):
recommendation_metric = common.get_recommendations(
project_id,
recommender=recommender,
state=state,
credentials=credentials)
return accounts_can_made_safe(project_id, state, recommendation_metric)
recommendation_stats = common.rate_limit_execution(get_metric, RATE_LIMIT,
project_ids)
recommendation_stats_sorted = sorted(
recommendation_stats, key=lambda metric: -sum(metric["stats"].values()))
return recommendation_stats_sorted | 5,356,023 |
def suppress_stdout():
"""
Suppress the standard out messages.
"""
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = devnull
sys.stderr = devnull
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr | 5,356,024 |
def selection_filter(file_path):
"""
获得经过filter方法获得的特征子集
f_classif, chi2, mutual_info_classif
"""
df = pd.read_csv(file_path)
delete_list = ['id']
df.drop(delete_list, axis=1, inplace=True)
feature_attr = [i for i in df.columns if i not in ['label']]
df.fillna(0, inplace=True)
# 特征预处理
obj_attrs = []
for attr in feature_attr:
if df.dtypes[attr] == np.dtype(object): # 添加离散数据列
obj_attrs.append(attr)
if len(obj_attrs) > 0:
df = pd.get_dummies(df, columns=obj_attrs) # 转为哑变量
y = df.label
X = df.drop('label', axis=1)
model = SelectKBest(f_classif, k=108)
X_new = model.fit_transform(X, y)
df_X_new = pd.DataFrame(X_new)
list = []
for i in X.columns:
for j in df_X_new.columns:
if np.sum(np.abs(X[i].values - df_X_new[j].values)) == 0:
list.append(i)
break
useful_list = sorted(set(X.columns.to_list()) - set(list), key = X.columns.to_list().index)
print(useful_list)
list.append('label')
return list | 5,356,025 |
def cnndm_eval(ratio):
"""Evaluation for the CNN/DailyMail dataset"""
for model_name in MODELS:
print("Evaluating \"{}\"".format(model_name))
print("=" * 20)
with open(str(RESULTS_DIR / "cnndm_use_{}_{}.pred".format(
model_name, int(ratio*100)))) as fin:
predictions = fin.read().split("\n")[:-1]
with open(str(DATA_DIR / "cnndm" / "test.txt.tgt.tagged")) as fin:
references = fin.read().replace("<t> ", "").replace(
"</t> ", "").split("\n")[:-1]
assert all([len(x) > 0 for x in predictions])
scores = Rouge().get_scores(predictions, references, avg=True)
pprint(scores) | 5,356,026 |
def setup():
"""
Install Supervisor and enable/disable configured programs
"""
install()
configure() | 5,356,027 |
def _process_voucher_data_for_order(cart):
"""Fetch, process and return voucher/discount data from cart."""
vouchers = Voucher.objects.active(date=date.today()).select_for_update()
voucher = get_voucher_for_cart(cart, vouchers)
if cart.voucher_code and not voucher:
msg = pgettext(
'Voucher not applicable',
'Voucher expired in meantime. Task placement aborted.')
raise NotApplicable(msg)
if not voucher:
return {}
increase_voucher_usage(voucher)
return {
'voucher': voucher,
'discount_amount': cart.discount_amount,
'discount_name': cart.discount_name,
'translated_discount_name': cart.translated_discount_name} | 5,356,028 |
def is_insertion(ref, alt):
"""Is alt an insertion w.r.t. ref?
Args:
ref: A string of the reference allele.
alt: A string of the alternative allele.
Returns:
True if alt is an insertion w.r.t. ref.
"""
return len(ref) < len(alt) | 5,356,029 |
def rm_network(c):
"""Destroy local test network."""
print('Stopping local test network and removing containers')
with c.cd('images'):
c.run('sudo docker-compose down -v', hide='stderr')
c.run('sudo rm -rf volumes/stellar-core/opt/stellar-core/buckets')
c.run('sudo rm -f volumes/stellar-core/opt/stellar-core/*.log')
c.run('sudo rm -rf volumes/stellar-core/tmp') | 5,356,030 |
def identify_fast_board(switches: int, drivers: int) -> Optional[FastIOBoard]:
"""Instantiate and return a FAST board capable of accommodating the given number of switches and drivers."""
if switches > 32 or drivers > 16:
return None
if switches > 16:
return None if drivers > 8 else FastIO3208()
if drivers <= 4:
return FastIO0804()
if switches <= 8:
return FastIO1616()
return None | 5,356,031 |
def show():
"""Show the registered controllers."""
Registry().show_controllers() | 5,356,032 |
def fit(
model_fn,
train_input_fn,
epochs,
verbose,
callbacks,
eval_input_fn,
class_weight,
steps_per_epoch,
validation_steps,
**kwargs
):
"""Trains networks using Keras models."""
log_parameters(logger)
# Train
save_callback = [
c for c in callbacks if isinstance(c, tf.keras.callbacks.ModelCheckpoint)
]
model_dir = None
if save_callback:
model_dir = save_callback[0].filepath
logger.info("Training a model in %s", model_dir)
model = model_fn()
history = model.fit(
x=train_input_fn(),
epochs=epochs,
verbose=max(verbose, 2),
callbacks=list(callbacks) if callbacks else None,
validation_data=None if eval_input_fn is None else eval_input_fn(),
class_weight=class_weight,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
)
click.echo(history.history)
if model_dir is not None:
with open(os.path.join(model_dir, "keras_fit_history.json"), "w") as f:
json.dump(history.history, f) | 5,356,033 |
def encode_hop_data(
short_channel_id: bytes, amt_to_forward: int, outgoing_cltv_value: int
) -> bytes:
"""Encode a legacy 'hop_data' payload to bytes
https://github.com/lightningnetwork/lightning-rfc/blob/master/04-onion-routing.md#legacy-hop_data-payload-format
:param short_channel_id: the short channel id this hop relates to
:param amt_to_forward: the amount to forward on this hop
:param outgoing_cltv_value: the outgoing cltv value to use for this hop
:return: the hop_data payload
"""
# Bolt #7: The hop_data format is identified by a single 0x00-byte length, for
# backward compatibility.
hop_data = struct.pack(config.be_u8, 0x00)
hop_data += short_channel_id
hop_data += struct.pack(config.be_u64, amt_to_forward)
hop_data += struct.pack(config.be_u32, outgoing_cltv_value)
# [12*byte:padding]
hop_data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
return hop_data | 5,356,034 |
def extract_all_patterns(game_state, action, mask, span):
""" Extracting the local forward model pattern for each cell of the grid's game-state and returning a numpy array
:param prev_game_state: game-state at time t
:param action: players action at time t
:param game_state: resulting game-state at time t+1
:param mask: square pattern mask (boolean array to mark which tiles should be included.
:param span: The span of the mask.
:return: np.ndarray of observed patterns
"""
data_set = np.zeros((game_state.shape[0]*game_state.shape[1], np.sum(mask)+1))
# only iterate over positions that were affected by the game state's changes
positions = [(x, y) for x in range(game_state.shape[0]) for y in range(game_state.shape[1])]
ext_game_state_grid = np.pad(game_state, span, "constant", constant_values=1)
for i, (x, y) in enumerate(positions):
el = ext_game_state_grid[span + x - span: span + x + span + 1, span + y - span: span + y + span + 1][mask].tolist()
el.append(action)
data_set[i, :] = el
return data_set | 5,356,035 |
def wrapper_subcavities(final_cavities, cav_of_interest, grid_min, grid_shape, cavities, code, out, sourcedir, list_ligands,
seeds_mindist = 3, merge_subcavs = True, minsize_subcavs = 50, min_contacts = 0.667, v = False,
printv = False, print_pphores_subcavs = False, export_subcavs = False, gridspace = 1.0, frame = None):
"""
Wraps transform_cav2im3d, find_subcav_watershed, map_subcav_in_cav
merge_small_enclosed_subcavs, print_subcavs_pphores and export_pdb_subcavities
as one function
"""
# Convert to a 3D image for skimage
im3d = transform_cav2im3d(final_cavities[cav_of_interest], grid_min,
grid_shape) #filtered_pharma[order][cav_of_interest])
# Perform the watershed algorithm, including entropy of pharmacophores
labels = find_subcav_watershed(im3d, seeds_mindist)
# Map results of watershed to grid points of cavity
#subcavs = map_subcav_in_cav(cavities, cav_of_interest, labels, args.code, grid_min, grid_shape)
subcavs = map_subcav_in_cav(labels, grid_min)
if merge_subcavs == True:
subcavs = merge_small_enclosed_subcavs(subcavs, minsize_subcavs = minsize_subcavs,
min_contacts = min_contacts, v = v)
subcavs_table = print_subcavs_pphores(cavities, subcavs, cav_of_interest, code, grid_min, grid_shape, frame)
# Export
if export_subcavs:
try:
os.mkdir(out)
except:
pass
if frame:
export_pdb_subcavities(subcavs, code[:-4]+"_"+str(frame), grid_min, grid_shape,
cavid = cav_of_interest, gridspace = gridspace, outdir = out,
listlig = list_ligands, oridir = sourcedir)
else:
export_pdb_subcavities(subcavs, code[:-4], grid_min, grid_shape,
cavid = cav_of_interest, gridspace = gridspace, outdir = out,
listlig = list_ligands, oridir = sourcedir)
return subcavs_table | 5,356,036 |
def generate_datafile(lists_of_systems, output_dir, filename):
"""
take in a list of lists which contains systems
generate one input data file per list
"""
result = []
for index, list_of_sys in enumerate(lists_of_systems):
output_filename = filename + "_" + str(index) + ".xml"
output_file = os.path.join(output_dir, output_filename)
fd = file_Utils.open_file(output_file, "w+")
if fd is not None:
root = xml_Utils.create_element("root")
for system in list_of_sys:
root.append(system)
fd.write(xml_Utils.convert_element_to_string(root))
result.append(output_file)
return result | 5,356,037 |
def cond(*args, **kwargs):
"""Conditional computation to run on accelerators."""
return backend()['cond'](*args, **kwargs) | 5,356,038 |
def get_testfile_paths():
"""
return the necessary paths for the testfile tests
Returns
-------
str
absolute file path to the test file
str
absolute folder path to the expected output folder
"""
testfile = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', '0009_20170523_181119_FA2806.all')
expected_output = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', 'converted')
return testfile, expected_output | 5,356,039 |
def singleton(cls):
"""Decorator that provides singleton functionality.
>>> @singleton
... class Foo(object):
... pass
...
>>> a = Foo()
>>> b = Foo()
>>> a is b
True
"""
_inst = [None]
def decorated(*args, **kwargs):
if _inst[0] is None:
_inst[0] = cls(*args, **kwargs)
return _inst[0]
return decorated | 5,356,040 |
def _with_factory(make_makers):
"""Return a decorator for test methods or classes.
Args:
make_makers (callable): Return an iterable over (name, maker) pairs,
where maker (callable): Return a fixture (arbitrary object) given
Factory as single argument
"""
def wrap(test_func):
def wrapper(self, *args, **kwargs):
factory = make_factory(
self.addCleanup, test=self, root=None, makers=make_makers())
return test_func(self, factory, *args, **kwargs)
return wrapper
def deco(test_func_or_class):
if inspect.isclass(test_func_or_class):
class_ = test_func_or_class
for name, method in inspect.getmembers(class_, is_test_method):
wrapped_method = wrap(method)
setattr(class_, name, wrapped_method)
return class_
else:
method = test_func_or_class
return wrap(method)
return deco | 5,356,041 |
def timeItDeco(func):
""" Decorator which times the given function. """
def timing(*args, **kwargs):
""" This function will replace the original function. """
# Start the clock
t1 = time.clock()
# Run the original function and collect results
result = func(*args, **kwargs)
# Print out the execution time
print('Execution time', time.clock() - t1)
return result
# Return the funtion that was modified
return timing | 5,356,042 |
def apply_haste(self: Player, target: Player, rules: dict, left: bool) -> EffectReturn:
"""
Apply the effects of haste to the target:
attack beats attack
"""
# "attack": {"beats": ["disrupt", "area", "attack"], "loses": ["block", "dodge"]}
if left:
# Remove attack from the attack: loses dict
if "attack" in rules["attack"]["loses"]:
rules["attack"]["loses"].remove("attack")
# Add attack to the attack: beats dict
if "attack" not in rules["attack"]["beats"]:
rules["attack"]["beats"].append("attack")
# "attack": {"beats": ["disrupt", "area"], "loses": ["block", "dodge", "attack"]}
else:
# Remove attack from the attack: beats dict
if "attack" in rules["attack"]["beats"]:
rules["attack"]["beats"].remove("attack")
# Add attack to the attack: loses dict
if "attack" not in rules["attack"]["loses"]:
rules["attack"]["loses"].append("attack")
return self, target, rules | 5,356,043 |
def get_mean_cube(datasets):
"""Get mean cube of a list of datasets.
Parameters
----------
datasets : list of dict
List of datasets (given as metadata :obj:`dict`).
Returns
-------
iris.cube.Cube
Mean cube.
"""
cubes = iris.cube.CubeList()
for dataset in datasets:
path = dataset['filename']
cube = iris.load_cube(path)
prepare_cube_for_merging(cube, path)
cubes.append(cube)
mean_cube = cubes.merge_cube()
if len(cubes) > 1:
mean_cube = mean_cube.collapsed(['cube_label'], iris.analysis.MEAN)
mean_cube.remove_coord('cube_label')
return mean_cube | 5,356,044 |
def main(args=None):
"""
Main function
:param args:
:return:
"""
# get parser args
if args is None:
args = None if sys.argv[1:] else ['--help']
parser = get_parser()
arguments = parser.parse_args(args=args)
param = Param()
param.fname_data = os.path.abspath(arguments.i)
if arguments.p is not None:
param.process = (arguments.p).split(',')
if param.process[0] not in param.process_list:
sct.printv(parser.usage.generate(error='ERROR: Process ' + param.process[0] + ' is not recognized.'))
if arguments.size is not None:
param.size = arguments.size
if arguments.f is not None:
param.shape = arguments.f
if arguments.o is not None:
param.fname_out = os.path.abspath(arguments.o)
if arguments.r is not None:
param.remove_temp_files = arguments.r
param.verbose = arguments.v
sct.init_sct(log_level=param.verbose, update=True) # Update log level
# run main program
create_mask(param) | 5,356,045 |
async def gen_unique_chk_sum(phone, message, first_dial):
"""Generates a checksum in order to identify every single call"""
return blake2b(
bytes(phone, encoding="utf-8")
+ bytes(message, encoding="utf-8")
+ bytes(str(first_dial), encoding="utf-8"),
digest_size=4,
).hexdigest() | 5,356,046 |
def test_quant_maxpool2d_argmax():
""" Testing """
t_input = get_input_data()
q_input, input_scale, input_zero_point = get_quant_data(t_input.numpy())
pool_size = (2, 2)
strides = (2, 2)
padding = (0, 0)
torch_out, _ = fn.max_pool2d_with_indices(
t_input, kernel_size=2, stride=2, padding=0, ceil_mode=True
)
_, output_scale, output_zero_point = get_quant_data(torch_out)
q_model = quant_maxpool2d_argmax_model(
input_scale, input_zero_point, output_scale, output_zero_point, pool_size, strides, padding
)
q_out = run_model(q_model, q_input)
tvm_out = dequant_data(q_out, output_scale, output_zero_point)
assert np.allclose(torch_out, tvm_out, atol=input_scale) | 5,356,047 |
def getQtipResults(version, installer):
"""
Get QTIP results
"""
period = get_config('qtip.period')
url_base = get_config('testapi.url')
url = ("http://" + url_base + "?project=qtip" +
"&installer=" + installer +
"&version=" + version + "&period=" + str(period))
request = Request(url)
try:
response = urlopen(request)
k = response.read()
response.close()
results = json.loads(k)['results']
except URLError as err:
print 'Got an error code: {}'.format(err)
result_dict = {}
if results:
for r in results:
key = '{}/{}'.format(r['pod_name'], r['scenario'])
if key not in result_dict.keys():
result_dict[key] = []
result_dict[key].append(r['details']['score'])
# return scenario_results
return result_dict | 5,356,048 |
def scaled_softplus(x, alpha, name=None):
"""Returns `alpha * ln(1 + exp(x / alpha))`, for scalar `alpha > 0`.
This can be seen as a softplus applied to the scaled input, with the output
appropriately scaled. As `alpha` tends to 0, `scaled_softplus(x, alpha)` tends
to `relu(x)`.
Note: the gradient for this operation is defined to depend on the backprop
inputs as well as the outputs of this operation.
Args:
x: A `Tensor` of inputs.
alpha: A scalar `Tensor`, indicating the amount of smoothness. The caller
must ensure that `alpha > 0`.
name: A name for the scope of the operations (optional).
Returns:
A tensor of same size and type as `x`.
"""
with ops.name_scope(name, 'scaled_softplus', [x, alpha]):
x = ops.convert_to_tensor(x, name='x')
dtype = x.dtype
alpha = ops.convert_to_tensor(alpha, dtype=dtype, name='alpha')
# Verify that alpha is a scalar.
alpha.get_shape().assert_has_rank(0)
def _grad(op, g):
"""Backprop for scaled softplus."""
y = op.outputs[0]
alpha = op.inputs[1]
# Prevent the expensive computations from happening before g is available.
with ops.control_dependencies([g]):
y /= alpha
emy = math_ops.exp(-y)
dy_dx = 1. - emy
# The eps below avoids log(0). Note that t*log(t) -> 0 as t->0.
eps = 1e-8
dy_dalpha = y * emy - dy_dx * math_ops.log(dy_dx + eps)
return g * dy_dx, math_ops.reduce_sum(g * dy_dalpha)
@function.Defun(dtype, dtype,
func_name='ScaledSoftplus_%s' % dtype.name,
shape_func=lambda op: [op.inputs[0].get_shape()],
python_grad_func=_grad)
def _forward(x, alpha):
"""Forward computation of scaled softplus."""
return alpha * nn.softplus(x / alpha)
return _forward(x, alpha) | 5,356,049 |
def get_choice(options):
"""Devuelve como entero la opcion seleccionada para el input con mensaje message"""
print(options)
try:
return int(input("Por favor, escoja una opción: "))
except ValueError:
return 0 | 5,356,050 |
def _listminus(list1, list2):
"""
"""
return [a for a in list1 if a not in list2] | 5,356,051 |
def pdf_to_hocr(path, lang="fra+deu+ita+eng", config="--psm 4"):
"""Loads and transform a pdf into an hOCR file.
Parameters
----------
path : str, required
The pdf's path
lang: str, optional (default="fra+deu+ita+eng")
Supporter Language of Pytesseract.
config: str, optional (default = "--psm 4")
Custom configuration flag used by Tesseract
"""
try:
import pytesseract
from pdf2image import convert_from_bytes
except ImportError:
logger.error(
"pytesseract and pdf2image have to be installed to use this function\n run `pip install -U pytesseract pdf2image`"
)
return
with open(path, "rb") as f:
images = convert_from_bytes(f.read(), dpi=300)
return images_to_hocr(images) | 5,356,052 |
async def fetch_cart_response(cart_id: str) -> httpx.Response:
"""Fetches cart response."""
headers = await get_headers()
async with httpx.AsyncClient(base_url=CART_BASE_URL) as client:
response = await client.get(
url=f'/{cart_id}',
headers=headers,
)
try:
response.raise_for_status()
except httpx.HTTPStatusError:
raise MoltinError(response.json()) # type: ignore
return response | 5,356,053 |
def checkHardware(binary, silent=False, transaction=None):
"""
probe caffe continuously for incrementing until missing id
structure:
[
{ "id": 0,
"name": "..",
"log": ["..", "..", "..", ... ]
},
{ "id": 1,
"name": "..",
"log": ["..", "..", "..", ... ]
},
...
]
"""
gid = 0
hw = []
if not silent:
stdout.write("Checking Hardware...\n")
logging.info("Checking Hardware...")
cpu = _getCPU()
name = _getCPUName(cpu)
hw.append({"name": name, "log": cpu})
if not silent:
stdout.write("CPU found: " + name + "\n")
logging.info("CPU found: %s", name)
if transaction:
msg = {"key": Protocol.SCANHARDWARE, "finished": False, "name": name}
transaction.send(msg)
while True:
log = _getId(gid, binary)
if not _isValid(log) or _isCpuOnly(log):
if not silent and gid is 0:
stdout.write("No GPU found, CPU mode\n")
logging.info("No GPU found, CPU mode")
break
name = _getName(log)
if not silent:
stdout.write("GPU " + str(gid) + " found: " + name + "\n")
if transaction:
msg = {"key": Protocol.SCANHARDWARE, "finished": False, "name": name, "id": gid}
transaction.send(msg)
hw.append({"id": gid, "name": name, "log": _parseLog(log)})
gid += 1
return hw | 5,356,054 |
def is_namespace_mutable(context, namespace):
"""Return True if the namespace is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return namespace.owner == context.owner | 5,356,055 |
def get_schularten_by_veranst_iq_id(veranst_iq_id):
""" liefert die Liste der zu der Veranstaltung veranst_iq_id passenden Schularten """
query = session.query(Veranstaltung).add_entity(Schulart).join('rel_schulart')
query = query.reset_joinpoint()
query = query.filter_by(veranst_iq_id=veranst_iq_id)
return query.all() | 5,356,056 |
def get_station_freqs(df, method='median'):
"""
apply to df after applying group_by_days and group_by_station
"""
#df['DATE'] = df.index.get_level_values('DATE')
df['DAY'] = [d.dayofweek for d in df.index.get_level_values('DATE')]
df['DAYNAME'] = [d.day_name() for d in df.index.get_level_values('DATE')]
return df.groupby(['STATION', 'DAY','DAYNAME']).agg({'INS':method, 'OUTS':method}) | 5,356,057 |
def faster_symbol_array(genome, symbol):
"""A faster calculation method for counting a symbol in genome.
Args:
genome (str): a DNA string as the search space.
symbol (str): the single base to query in the search space.
Returns:
Dictionary, a dictionary, position-counts pairs of symbol in each genome sliding window.
Examples:
The symbol array for genome equal to "AAAAGGGG" and symbol equal to "A".
>>> genome = 'AAAAGGGG'
>>> symbol = 'A'
>>> position_symbolcount_dict = symbol_array(genome, symbol)
>>> position_symbolcount_dict
{0: 4, 1: 3, 2: 2, 3: 1, 4: 0, 5: 1, 6: 2, 7: 3}
"""
array = {}
n = len(genome)
extended_genome = genome + genome[0:n//2]
# look at the first half of Genome to compute first array value
array[0] = pattern_count(symbol, genome[0:n//2])
for i in range(1, n):
# start by setting the current array value equal to the previous array value
array[i] = array[i-1]
# the current array value can differ from the previous array value by at most 1
if extended_genome[i-1] == symbol:
array[i] = array[i]-1
if extended_genome[i+(n//2)-1] == symbol:
array[i] = array[i]+1
return array | 5,356,058 |
def crosswalk_patient_id(user):
""" Get patient/id from Crosswalk for user """
logger.debug("\ncrosswalk_patient_id User:%s" % user)
try:
patient = Crosswalk.objects.get(user=user)
if patient.fhir_id:
return patient.fhir_id
except Crosswalk.DoesNotExist:
pass
return None | 5,356,059 |
def makeArg(segID: int, N, CA, C, O, geo: ArgGeo) -> Residue:
"""Creates an Arginie residue"""
##R-Group
CA_CB_length = geo.CA_CB_length
C_CA_CB_angle = geo.C_CA_CB_angle
N_C_CA_CB_diangle = geo.N_C_CA_CB_diangle
CB_CG_length = geo.CB_CG_length
CA_CB_CG_angle = geo.CA_CB_CG_angle
N_CA_CB_CG_diangle = geo.N_CA_CB_CG_diangle
CG_CD_length = geo.CG_CD_length
CB_CG_CD_angle = geo.CB_CG_CD_angle
CA_CB_CG_CD_diangle = geo.CA_CB_CG_CD_diangle
CD_NE_length = geo.CD_NE_length
CG_CD_NE_angle = geo.CG_CD_NE_angle
CB_CG_CD_NE_diangle = geo.CB_CG_CD_NE_diangle
NE_CZ_length = geo.NE_CZ_length
CD_NE_CZ_angle = geo.CD_NE_CZ_angle
CG_CD_NE_CZ_diangle = geo.CG_CD_NE_CZ_diangle
CZ_NH1_length = geo.CZ_NH1_length
NE_CZ_NH1_angle = geo.NE_CZ_NH1_angle
CD_NE_CZ_NH1_diangle = geo.CD_NE_CZ_NH1_diangle
CZ_NH2_length = geo.CZ_NH2_length
NE_CZ_NH2_angle = geo.NE_CZ_NH2_angle
CD_NE_CZ_NH2_diangle = geo.CD_NE_CZ_NH2_diangle
carbon_b = calculateCoordinates(
N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle
)
CB = Atom("CB", carbon_b, 0.0, 1.0, " ", " CB", 0, "C")
carbon_g = calculateCoordinates(
N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle
)
CG = Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C")
carbon_d = calculateCoordinates(
CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle
)
CD = Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C")
nitrogen_e = calculateCoordinates(
CB, CG, CD, CD_NE_length, CG_CD_NE_angle, CB_CG_CD_NE_diangle
)
NE = Atom("NE", nitrogen_e, 0.0, 1.0, " ", " NE", 0, "N")
carbon_z = calculateCoordinates(
CG, CD, NE, NE_CZ_length, CD_NE_CZ_angle, CG_CD_NE_CZ_diangle
)
CZ = Atom("CZ", carbon_z, 0.0, 1.0, " ", " CZ", 0, "C")
nitrogen_h1 = calculateCoordinates(
CD, NE, CZ, CZ_NH1_length, NE_CZ_NH1_angle, CD_NE_CZ_NH1_diangle
)
NH1 = Atom("NH1", nitrogen_h1, 0.0, 1.0, " ", " NH1", 0, "N")
nitrogen_h2 = calculateCoordinates(
CD, NE, CZ, CZ_NH2_length, NE_CZ_NH2_angle, CD_NE_CZ_NH2_diangle
)
NH2 = Atom("NH2", nitrogen_h2, 0.0, 1.0, " ", " NH2", 0, "N")
res = Residue((" ", segID, " "), "ARG", " ")
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG)
res.add(CD)
res.add(NE)
res.add(CZ)
res.add(NH1)
res.add(NH2)
return res | 5,356,060 |
def uploadAssignment(req, courseId, assignmentId, archiveFile):
""" Saves a temp file of the uploaded archive and calls
vmchecker.submit.submit method to put the homework in
the testing queue"""
websutil.sanityCheckAssignmentId(assignmentId)
websutil.sanityCheckCourseId(courseId)
# Check permission
req.content_type = 'text/html'
s = Session.Session(req)
if s.is_new():
s.invalidate()
return json.dumps({'errorType':websutil.ERR_AUTH,
'errorMessage':"",
'errorTrace':""})
strout = websutil.OutputString()
try:
s.load()
username = s['username']
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType':websutil.ERR_EXCEPTION,
'errorMessage':"",
'errorTrace':strout.get()})
# Reset the timeout
s.save()
if not hasattr(archiveFile, "filename") or \
archiveFile.filename == None:
return json.dumps({'errorType':websutil.ERR_OTHER,
'errorMessage':"File not uploaded.",
'errorTrace':""})
# Save file in a temp
(fd, tmpname) = tempfile.mkstemp('.zip')
f = open(tmpname, 'wb', 10000)
## Read the file in chunks
for chunk in websutil.fbuffer(archiveFile.file):
f.write(chunk)
f.close()
os.close(fd)
# Call submit.py
## Redirect stdout to catch logging messages from submit
strout = websutil.OutputString()
sys.stdout = strout
try:
submit.submit(tmpname, assignmentId, username, courseId)
update_db.update_grades(courseId, user=username, assignment=assignmentId)
except submit.SubmittedTooSoonError:
traceback.print_exc(file = strout)
return json.dumps({'errorType':websutil.ERR_EXCEPTION,
'errorMessage':"The assignment was submitted too soon",
'errorTrace':strout.get()})
except submit.SubmittedTooLateError:
traceback.print_exc(file = strout)
return json.dumps({'errorType':websutil.ERR_EXCEPTION,
'errorMessage':"The assignment was submitted too late",
'errorTrace':strout.get()})
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType':websutil.ERR_EXCEPTION,
'errorMessage':"",
'errorTrace':strout.get()})
return json.dumps({'status':True,
'dumpLog':strout.get(),
'file': tmpname}) | 5,356,061 |
def apply_tags(datasets, tags):
"""
Modify datasets using the tags system
Parameters
----------
datasets : PickleableTinyDB
Datasets to modify
tags : dict
Dictionary of {tag: update_dict}
Returns
-------
PickleableTinyDB
Notes
-----
In general, everything replaces or is additive. We use the following update rules:
1. If the update value is a list, extend the existing list (empty list if key does not exist)
2. If the update value is scalar, override the previous (deleting any old value, if present)
3. If the update value is a dict, update the exist dict (empty dict if dict does not exist)
4. Otherwise, the value is updated, overriding the previous
Examples
--------
>>> from espei.utils import PickleableTinyDB
>>> from tinydb.storages import MemoryStorage
>>> ds = PickleableTinyDB(storage=MemoryStorage)
>>> doc_id = ds.insert({'tags': ['dft'], 'excluded_model_contributions': ['contrib']})
>>> my_tags = {'dft': {'excluded_model_contributions': ['idmix', 'mag'], 'weight': 5.0}}
>>> from espei.datasets import apply_tags
>>> apply_tags(ds, my_tags)
>>> all_data = ds.all()
>>> all(d['excluded_model_contributions'] == ['contrib', 'idmix', 'mag'] for d in all_data)
True
>>> all(d['weight'] == 5.0 for d in all_data)
True
"""
for tag, update_dict in tags.items():
matching_datasets = datasets.search(where("tags").test(lambda x: tag in x))
for newkey, newval in update_dict.items():
for match in matching_datasets:
if isinstance(newval, list):
match[newkey] = match.get(newkey, []) + newval
elif np.isscalar(newval):
match[newkey] = newval
elif isinstance(newval, dict):
d = match.get(newkey, dict())
d.update(newval)
match[newkey] = d
else:
match[newkey] = newval
datasets.write_back(matching_datasets) | 5,356,062 |
def shows_monthly_aggregate_score_heatmap():
"""Monthly Aggregate Score Heatmap Graph"""
database_connection.reconnect()
all_scores = show_scores.retrieve_monthly_aggregate_scores(database_connection)
if not all_scores:
return render_template("shows/monthly-aggregate-score-heatmap/graph.html",
years=None,
scores=None)
scores_list = []
years = list(all_scores.keys())
for year in all_scores:
scores_list.append(list(all_scores[year].values()))
return render_template("shows/monthly-aggregate-score-heatmap/graph.html",
years=years,
scores=scores_list) | 5,356,063 |
def update_logs(user_id,answers,deck):
""" update DB logs
Parameters:
-----------
Returns:
--------
Modify:
-------
"""
con = sqlite3.connect(db_path)
cursor = con.cursor()
cursor.execute('PRAGMA encoding="UTF-8";')
# [{"question":"2 · 7","answer":"14","number_attempts":"0","timestamp":"1601660293044","timer":"9"}
many_inserts = []
logger.warning("update logs for user id: %s",user_id)
for a in answers:
ts = int(int(a["timestamp"])/1000)
answer = a["answer"]
expected = deck.json[a["question"]]["answer"]
logger.warning("expected: %s",expected)
time_to_answer = a["timer"]
number_attempts = a["number_attempts"]
ttta = 15 #target time to answer
qid = 0
uid = str(user_id)
many_inserts.append((ts, answer,expected, time_to_answer, ttta,number_attempts,qid,uid))
logger.warning("many inserts")
logger.warning(many_inserts)
cursor.executemany(" INSERT INTO 'history' values ( ?, ?, ?, ?, ?, ?, ?, ? ); ", many_inserts )
con.commit()
con.close()
logging.info("db update done") | 5,356,064 |
def run(string, entities):
"""Call a url to create a api in github"""
# db = utils.db()['db']
# query = utils.db()['query']
# operations = utils.db()['operations']
# apikey = utils.config('api_key')
# playlistid = utils.config('playlist_id')
# https://developers.google.com/youtube/v3/docs/playlistItems/list
# url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=' + playlistid + '&key=' + apikey
nombreapi = ''
nombredata = ''
result = ''
for item in entities:
if item['entity'] == 'elapi':
nombreapi = item['sourceText'].lower()
for item in entities:
if item['entity'] == 'eldata':
nombretema = item['sourceText'].lower()
url = 'https://youtochipizarron.herokuapp.com/' + nombreapi + '_' + nombredata
utils.output('inter', 'checking', utils.translate('checking',{
'website_name': url
}))
# call the url to create a github api branch/repository
try:
r = utils.http('GET', url)
# In case there is a problem like wrong settings
#if 'error' in r.json():
# error = r.json()['error']['errors'][0]
# return utils.output('settings_error', 'settings_error', utils.translate('settings_errors', {
# 'reason': error['reason'],
# 'message': error['message']
# }))
# items = r.json()['rooms']
result += utils.translate('list_element', {
'repository_url': url,
'repository_name': nombreapi + '_' + nombredata
}
)
except requests.exceptions.RequestException as e:
return utils.output('request_error', 'request_error', utils.translate('request_errors'))
# Will synchronize the content (because "end" type) if synchronization enabled
return utils.output('end', 'success', utils.translate('success', {
'nuevoapi': nombreapi,
'nuevodata': nombredata,
'result': result
})) | 5,356,065 |
def _sudoku_update(grid, possibilities_list, row, col, paths=None):
"""incremental possibilities update"""
num = grid[row, col]
for row2 in range(9):
for col2 in range(9):
if possibilities_list[row2][col2] is None: # cell already filled
continue
if row == row2 and col == col2: # we just updated this
possibilities_list[row][col] = None
elif (
col2 == col
or row2 == row
or (col2 // 3 == col // 3 and row2 // 3 == row2 // 3)
): # same row or column
possibilities_list[row2][col2] = [
x for x in possibilities_list[row2][col2] if x != num
]
# path check
if paths is None:
return
for path in paths:
if (row, col) not in path: # not a relevant path
continue
for row2, col2 in path:
if row == row2 and col == col2: # same cell, continue
continue
if possibilities_list[row2][col2] is None: # already filled, continue
continue
else: # remove num from this cell's possibility list
possibilities_list[row2][col2] = [
x for x in possibilities_list[row2][col2] if x != num
] | 5,356,066 |
def gamma(surface_potential, temperature):
"""Calculate term from Gouy-Chapmann theory.
Arguments:
surface_potential: Electrostatic potential at the metal/solution boundary in Volts, e.g. 0.05 [V]
temperature: Temperature of the solution in Kelvin, e.g. 300 [K]
Returns:
float
"""
product = sc.elementary_charge * surface_potential / (4 * sc.Stefan_Boltzmann * temperature)
return np.tanh(product) | 5,356,067 |
def import_cmd(project, namespace, data_dir, project_placeholder,
namespace_placeholder, kinds, chunk):
"""Import data to database using previously exported data as input."""
execute_tasks({
'type_task': 'import',
'project': project,
'namespace': namespace,
'data_dir': data_dir,
'project_placeholder': project_placeholder,
'namespace_placeholder': namespace_placeholder,
'kinds': kinds,
'chunk': chunk,
'target': execute_import,
}) | 5,356,068 |
def calculate_mask(maskimage, masks):
"""Extracts watershed seeds from data."""
dims = list(maskimage.slices2shape())
maskdata = np.ones(dims, dtype='bool')
if masks:
dataslices = utils.slices2dataslices(maskimage.slices)
maskdata = utils.string_masks(masks, maskdata, dataslices)
maskimage.write(data=maskdata, slices=maskimage.slices)
return maskdata | 5,356,069 |
def Range(lo, hi, ctx = None):
"""Create the range regular expression over two sequences of length 1
>>> range = Range("a","z")
>>> print(simplify(InRe("b", range)))
True
>>> print(simplify(InRe("bb", range)))
False
"""
lo = _coerce_seq(lo, ctx)
hi = _coerce_seq(hi, ctx)
return ReRef(Z3_mk_re_range(lo.ctx_ref(), lo.ast, hi.ast), lo.ctx) | 5,356,070 |
def quantile(data, num_breaks):
"""
Calculate quantile breaks.
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform.
"""
def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()):
""" function copied from scipy 0.13.3::scipy.stats.mstats.mquantiles """
def _quantiles1D(data,m,p):
x = numpy.sort(data.compressed())
n = len(x)
if n == 0:
return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True)
elif n == 1:
return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask)
aleph = (n*p + m)
k = numpy.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
# Initialization & checks ---------
data = numpy.ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
#
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = numpy.ma.masked
#
p = numpy.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p)
return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks)) | 5,356,071 |
def sort_films(film_list):
"""
This function takes a list of film dictionaries as an argument and sorts each film by category.
Create a dictionary where the keys are the names of the film categories and the values are initially empty lists,
then loop through the list of films and add each film to the list with the approriate category.
The final dictionary should have the following format:
{
'Animated Feature Film': < list of Animated Feature Film nominees >,
'Best Picture': < list of Best Picture nominees >,
'Documentary (Feature)': < list of Documentary (Feature) nominees >,
'International Feature Film': < list of International Feature Film nominees >,
'Short Film (Live Action)': < list of Short Film (Live Action) nominees >
}
Parameters:
film_list (list): A list of dictionaries, each representing a film
Returns:
(dict): A dictionary with five key:value pairs
"""
pass | 5,356,072 |
def extract_conformers_from_rdkit_mol_object(mol_obj, conf_ids):
"""
Generate xyz lists for all the conformers in conf_ids
:param mol_obj: Molecule object
:param conf_ids: (list) list of conformer ids to convert to xyz
:return: (list(list(cgbind.atoms.Atom)))
"""
conformers = []
for i in range(len(conf_ids)):
mol_block_lines = Chem.MolToMolBlock(mol_obj, confId=conf_ids[i]).split('\n')
atoms = []
for line in mol_block_lines:
split_line = line.split()
if len(split_line) == 16:
atom_label, x, y, z = split_line[3], split_line[0], split_line[1], split_line[2]
atoms.append(Atom(atom_label, float(x), float(y), float(z)))
conformer = BaseStruct()
conformer.set_atoms(atoms)
conformers.append(conformer)
if len(conformers) == 0:
raise CgbindCritical('Length of conformer xyz list was 0. RDKit failed')
return conformers | 5,356,073 |
def create_embedding(name: str, env_spec: EnvSpec, *args, **kwargs) -> Embedding:
"""
Create an embedding to use with sbi.
:param name: identifier of the embedding
:param env_spec: environment specification
:param args: positional arguments forwarded to the embedding's constructor
:param kwargs: keyword arguments forwarded to the embedding's constructor
:return: embedding instance
"""
if name == LastStepEmbedding.name:
embedding = LastStepEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs)
elif name == DeltaStepsEmbedding.name:
embedding = DeltaStepsEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs)
elif name == BayesSimEmbedding.name:
embedding = BayesSimEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs)
elif name == DynamicTimeWarpingEmbedding.name:
embedding = DynamicTimeWarpingEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs)
elif name == RNNEmbedding.name:
embedding = RNNEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs)
elif name == AllStepsEmbedding.name:
embedding = AllStepsEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs)
else:
raise pyrado.ValueErr(
given_name=name,
eq_constraint=f"{LastStepEmbedding.name}, {DeltaStepsEmbedding.name}, {BayesSimEmbedding.name}, "
f"{DynamicTimeWarpingEmbedding.name}, or {RNNEmbedding.name}",
)
return embedding | 5,356,074 |
def _init_allreduce_operators(length, split_indices):
""" initialize allreduce communication operators"""
indices = split_indices[0]
fusion = split_indices[1]
op_list = ()
j = 0
for i in range(length):
if j <= len(indices)-1:
temp = indices[j]
else:
temp = length
if i >= temp:
j = j + 1
fusion = fusion + 1
op = AllReduce('sum', GlobalComm.WORLD_COMM_GROUP)
op.add_prim_attr('fusion', fusion)
op_list = op_list + (op,)
return op_list | 5,356,075 |
def plot_load_shape_yd(daily_load_shape):
"""With input 2 dim array plot daily load"""
x_values = range(24)
y_values = list(daily_load_shape[:, 0] * 100) # to get percentages
plt.plot(x_values, y_values)
plt.xlabel("Hours")
plt.ylabel("Percentage of daily demand")
plt.title("Load curve of a day")
plt.legend()
#plt.show() | 5,356,076 |
def get_valid_fields(val: int, cs: dict) -> set:
"""
A value is valid if there's at least one field's interval which contains it.
"""
return {
field
for field, intervals in cs.items()
if any(map(lambda i: i[0] <= val <= i[1], intervals))
} | 5,356,077 |
def load_data_multiview(_path_features, _path_lables, coords, joints, cycles=3, test_size=0.1):
"""Generate multi-view train/test data from gait cycles.
Args:
_path_features (str): Path to gait sequence file
_path_lables (str): Path to labels of corresponding gait sequence
coords (int): Number of co-ordinates representing each joint in gait cycle
joints (int)): Number of joints in the gait sequence
cycles (int, optional): Time duration of gait cycle. Defaults to 3.
test_size (float, optional): Ratio of test data. Defaults to 0.1.
Returns:
[list]: train and test data
"""
feature_files = glob.glob(_path_features)
label_files = glob.glob(_path_lables)
print(f'---> Number of files = {len(feature_files)}')
# sorting files so that features and labels files match
feature_files.sort()
label_files.sort()
angle_regex = re.compile('(\d*).h5')
folder_regex = re.compile('(\w*)\/')
all_data_train = []
all_data_test = []
all_labels_train = []
all_labels_test = []
all_angles_train = []
all_angles_test = []
for feature_file, label_file in zip(feature_files, label_files):
ff = h5py.File(feature_file, 'r')
fl = h5py.File(label_file, 'r')
angle = int(angle_regex.search(feature_file).group(1))
folder = folder_regex.findall(feature_file)[-1]
print(f"--->> processing - {folder} - {angle}")
data_list = []
num_samples = len(ff.keys())
time_steps = 0
labels = np.empty(num_samples)
for si in range(num_samples):
ff_group_key = list(ff.keys())[si]
data_list.append(list(ff[ff_group_key])) # Get the data
time_steps_curr = len(ff[ff_group_key])
if time_steps_curr > time_steps:
time_steps = time_steps_curr
labels[si] = fl[list(fl.keys())[si]][()]
data = np.empty((num_samples, time_steps*cycles, joints*coords))
for si in range(num_samples):
data_list_curr = np.tile(
data_list[si], (int(np.ceil(time_steps / len(data_list[si]))), 1))
for ci in range(cycles):
data[si, time_steps * ci:time_steps *
(ci + 1), :] = data_list_curr[0:time_steps]
data_train, data_test, labels_train, labels_test = train_test_split(data,
labels,
test_size=test_size)
all_data_train.extend(data_train)
all_data_test.extend(data_test)
all_labels_train.extend(labels_train)
all_labels_test.extend(labels_test)
all_angles_train.extend([angle]*len(labels_train))
all_angles_test.extend([angle]*len(labels_test))
return data, labels, \
all_data_train, all_labels_train, \
all_data_test, all_labels_test, \
all_angles_train, all_angles_test | 5,356,078 |
def dir_to_spectrogram(audio_dir, spectrogram_dir, spectrogram_dimensions=(64, 64), noverlap=16, cmap='gray_r'):
""" Creates spectrograms of all the audio files in a dir
:param audio_dir: path of directory with audio files
:param spectrogram_dir: path to save spectrograms
:param spectrogram_dimensions: tuple specifying the dimensions in pixes of the created spectrogram. default:(64,64)
:param noverlap: See http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html
:param cmap: the color scheme to use for the spectrogram. Defaults to 'gray_r'
:return:
"""
file_names = [f for f in listdir(audio_dir) if isfile(join(audio_dir, f)) and '.wav' in f]
for file_name in file_names:
print file_name
audio_path = audio_dir + file_name
spectogram_path = spectrogram_dir + file_name.replace('.wav', '.png')
wav_to_spectrogram(audio_path, spectogram_path, spectrogram_dimensions=spectrogram_dimensions, noverlap=noverlap, cmap=cmap) | 5,356,079 |
def T_ncdm(omega_ncdm, m_ncdm):
# RELICS ONLY?
"""Returns T_ncdm as a function of omega_ncdm, m_ncdm.
omega_ncdm : relative relic abundance. Unitless.
m_ncdm : relic mass in units [eV].
T_ncdm : relic temperature in units [K]
"""
T_ncdm = (np.power( cf.NEUTRINO_SCALE_FACTOR * omega_ncdm / m_ncdm, 1./3.)
* cf.RELIC_TEMP_SCALE)
return T_ncdm | 5,356,080 |
def main(config: Config, dry_run: bool = False) -> int:
"""
Main entrypoint into the program. Takes specified snapshots if they don't exist and deletes old entrys as specified.
:param config: The backup manager configuration.
:param dry_run: Flag to indicate that no commands should be run
:return: 0 on success, non-zero on failure
"""
zfs_path = which("zfs")
if zfs_path is None:
logging.critical("zfs command cannot be found")
return 2
try:
dataset_configs = get_dataset_configs(config)
except RuntimeError as exc:
logging.critical(exc)
return 3
logging.debug(
"Parsed dataset configs: \n\t%s", "\n\t".join((dumps(config) for config in dataset_configs)),
)
today = datetime.now().date()
for dataset_config in dataset_configs:
if not (
dataset_config["keep_days"] > 0
or (dataset_config["keep_weeks"] > 0 and today.isoweekday() == dataset_config["dow"])
or (dataset_config["keep_months"] > 0 and today.day == dataset_config["dom"])
):
logging.debug("No snapshot scheduled for dataset %s", dataset_config["name"])
continue
today_snapshot_name = "{}@{}{}".format(
dataset_config["name"], config.get("snapshot_prefix", ""), today.strftime("%Y%m%d")
)
if today in get_sorted_snapshots(config)[dataset_config["name"]]:
logging.warning("Snapshot %s already exists", today_snapshot_name)
continue
cmd = ["zfs", "snapshot", today_snapshot_name]
if dataset_config["recursive"]:
cmd.insert(2, "-r")
logging.info("Creating snapshot %s", today_snapshot_name)
logging.debug("Running command: %s", cmd)
if not dry_run:
try:
subprocess.check_output(cmd, stderr=subprocess.PIPE, encoding="utf-8")
except subprocess.CalledProcessError as exc:
logging.error("zfs command failed with error: %s", exc.stderr)
# Cleanup snapshots
dataset_snapshots = get_sorted_snapshots(config)[dataset_config["name"]]
keep_daily_set = set(dataset_snapshots[: dataset_config["keep_days"]])
keep_weekly_set = set(
[snapshot for snapshot in dataset_snapshots if snapshot.isoweekday() == dataset_config["dow"]][
: dataset_config["keep_weeks"]
]
)
keep_monthly_set = set(
[snapshot for snapshot in dataset_snapshots if snapshot.day == dataset_config["dom"]][
: dataset_config["keep_months"]
]
)
keep_set = keep_daily_set | keep_weekly_set | keep_monthly_set
for snapshot in set(dataset_snapshots) - keep_set:
delete_snapshot_name = "{}@{}{}".format(
dataset_config["name"], config.get("snapshot_prefix", ""), snapshot.strftime("%Y%m%d")
)
cmd = [
"zfs",
"destroy",
delete_snapshot_name,
]
if dataset_config["recursive"]:
cmd.insert(2, "-r")
logging.info("Destroying snapshot %s", delete_snapshot_name)
logging.debug("Running command: %s", cmd)
if not dry_run:
try:
subprocess.check_output(cmd, stderr=subprocess.PIPE, encoding="utf-8")
except subprocess.CalledProcessError as exc:
logging.error("zfs command failed with error: %s", exc.stderr)
return 0 | 5,356,081 |
def generate_initialisation_vector():
"""Generates an initialisation vector for encryption."""
initialisation_vector = Random.new().read(AES.block_size)
return (initialisation_vector, int(binascii.hexlify(initialisation_vector), 16)) | 5,356,082 |
def test_take_18():
"""
Test take: take first, then do fiter, skip, batch and repeat operation
"""
logger.info("test_take_18")
data1 = ds.GeneratorDataset(generator_10, ["data"])
data1 = data1.take(8)
data1 = data1.filter(predicate=filter_func_ge, num_parallel_workers=4)
data1 = data1.skip(2)
data1 = data1.batch(2)
data1 = data1.repeat(2)
# Here i refers to index, d refers to data element
for _, d in enumerate(data1):
assert d[0].asnumpy()[0] == 2
assert sum([1 for _ in data1]) == 2 | 5,356,083 |
def assign_score(relevant_set):
"""Assign score to each relevant element in descending order and return the score list."""
section = len(relevance[0])//3
score = []
s = 3
for i in range(3):
if s == 1:
num = len(relevance[0]) - len(score)
score.extend([s]*num)
else:
score.extend([s]*section)
s -= 1
return score | 5,356,084 |
def visualize_img(img,
cam,
kp_pred,
vert,
renderer,
kp_gt=None,
text={},
rotated_view=False,
mesh_color='blue',
pad_vals=None,
no_text=False):
"""
Visualizes the image with the ground truth keypoints and
predicted keypoints on left and image with mesh on right.
Keypoints should be in normalized coordinates, not image coordinates.
Args:
img: Image.
cam (3x1): Camera parameters.
kp_gt: Ground truth keypoints.
kp_pred: Predicted keypoints.
vert: Vertices.
renderer: SMPL renderer.
text (dict): Optional information to include in the image.
rotated_view (bool): If True, also visualizes mesh from another angle.
if pad_vals (2,) is not None, removes those values from the image
(undo img pad to make square)
Returns:
Combined image.
"""
img_size = img.shape[0]
text.update({'sc': cam[0], 'tx': cam[1], 'ty': cam[2]})
if kp_gt is not None:
gt_vis = kp_gt[:, 2].astype(bool)
loss = np.sum((kp_gt[gt_vis, :2] - kp_pred[gt_vis])**2)
text['kpl'] = loss
# Undo pre-processing.
# Make sure img is [0-255]
input_img = ((img + 1) * 0.5) * 255.
rend_img = renderer(vert, cam=cam, img=input_img, color_name=mesh_color)
if not no_text:
rend_img = vis_util.draw_text(rend_img, text)
# Draw skeletons
pred_joint = ((kp_pred + 1) * 0.5) * img_size
skel_img = vis_util.draw_skeleton(input_img, pred_joint)
if kp_gt is not None:
gt_joint = ((kp_gt[:, :2] + 1) * 0.5) * img_size
skel_img = vis_util.draw_skeleton(
skel_img, gt_joint, draw_edges=False, vis=gt_vis)
if pad_vals is not None:
skel_img = remove_pads(skel_img, pad_vals)
rend_img = remove_pads(rend_img, pad_vals)
if rotated_view:
rot_img = renderer.rotated(
vert, 90, cam=cam, alpha=False, color_name=mesh_color)
if pad_vals is not None:
rot_img = remove_pads(rot_img, pad_vals)
return skel_img / 255, rend_img / 255, rot_img / 255
else:
return skel_img / 255, rend_img / 255 | 5,356,085 |
def synchronize_photos(albums, command):
"""
Synchronize photos from the filesystem to the database.
``albums`` is the result of ``scan_photo_storage``.
"""
for (category, dirpath), filenames in albums.items():
album = Album.objects.get(category=category, dirpath=dirpath)
new_keys = set(filenames.keys())
old_keys = set(p.filename for p in album.photo_set.all())
for filename in sorted(new_keys - old_keys):
date = get_photo_info(albums[category, dirpath][filename], command)
command.write_out(
f"Adding photo {filename} to album {dirpath} ({category})",
verbosity=2)
photo = Photo.objects.create(album=album, filename=filename, date=date)
for preset in command.resize_presets:
photo.thumbnail(preset)
for filename in sorted(old_keys - new_keys):
command.write_out(
f"Removing photo {filename} from album {dirpath} ({category})",
verbosity=2)
photo = Photo.objects.get(album=album, filename=filename)
photo.delete()
if not command.full_sync:
continue
for filename in sorted(old_keys & new_keys):
date = get_photo_info(albums[category, dirpath][filename], command)
photo = Photo.objects.get(album=album, filename=filename)
if date != photo.date:
command.write_out(
f"Fixing date of photo {filename} from album {dirpath} ({category})",
verbosity=2)
photo.date = date
photo.save() | 5,356,086 |
def _check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
failed: bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
failed = False
total_error_count = 0
if pattern:
for regexp in pattern:
if _check_bad_pattern_in_file(filepath, content, regexp):
failed = True
total_error_count += 1
return failed, total_error_count | 5,356,087 |
def _update(__version__, __code_name__, language, socks_proxy):
"""
update the framework
Args:
__version__: version number
__code_name__: code name
language: language
socks_proxy: socks proxy
Returns:
True if success otherwise None
"""
try:
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
data = requests.get(
url, headers={"User-Agent": "OWASP Nettacker"}).content
if version() is 3:
data = data.decode("utf-8")
if __version__ + ' ' + __code_name__ == data.rsplit('\n')[0]:
info(messages(language, "last_version"))
else:
warn(messages(language, "not_last_version"))
warn(messages(language, "feature_unavailable"))
except:
warn(messages(language, "cannot_update"))
return True | 5,356,088 |
def conv_noncart_to_cart(points, values, xrange, yrange, zrange):
"""
:param points: Data point locations (non-cartesian system)
:param vals: Values corresponding to each data point
:param xrange: Range of x values to include on output cartesian grid
:param yrange: y
:param zrange: z
:return: 3d array with sides (xrange, yrange, zrange) of values
"""
# Get all points on cartesian grid specified
xv, yv, zv = np.meshgrid(xrange, yrange, zrange)
print(xv)
print(yv)
print(zv)
# Determine interpolated values of points on the cartesian grid
valarray = scipy.interpolate.griddata(points=points, values=values, xi=(xv, yv, zv), method="linear")
# Returns 3D array of vals on cartesian grid
return(valarray) | 5,356,089 |
def createColumnsFromJson(json_file, defaultMaximumSize=250):
"""Create a list of Synapse Table Columns from a Synapse annotations JSON file.
This creates a list of columns; if the column is a 'STRING' and
defaultMaximumSize is specified, change the default maximum size for that
column.
"""
f = urllib.urlopen(path2url(json_file))
data = json.load(f)
cols = []
for d in data:
d['enumValues'] = [a['value'] for a in d['enumValues']]
if d['columnType'] == 'STRING' and defaultMaximumSize:
d['maximumSize'] = defaultMaximumSize
cols.append(synapseclient.Column(**d))
return cols | 5,356,090 |
def record(
fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None
) -> Callable[..., T]:
"""
Syntactic sugar to record errors/exceptions that happened in the decorated
function using the provided ``error_handler``.
Using this decorator is equivalent to:
::
error_handler = get_error_handler()
error_handler.initialize()
try:
foobar()
except ChildFailedError as e:
_, failure = e.get_first_failure()
error_handler.dump_error_file(failure.error_file, failure.exitcode)
raise
except Exception as e:
error_handler.record(e)
raise
.. important:: use this decorator once per process at the top level method,
typically this is the main method.
Example
::
@record
def main():
pass
if __name__=="__main__":
main()
"""
if not error_handler:
error_handler = get_error_handler()
def wrap(f):
@wraps(f)
def wrapper(*args, **kwargs):
assert error_handler is not None # assertion for mypy type checker
error_handler.initialize()
try:
return f(*args, **kwargs)
except ChildFailedError as e:
rank, failure = e.get_first_failure()
if failure.error_file != _NOT_AVAILABLE:
error_handler.dump_error_file(failure.error_file, failure.exitcode)
else:
warnings.warn(_no_error_file_warning_msg(rank, failure))
raise
except Exception as e:
error_handler.record_exception(e)
raise
return wrapper
return wrap(fn) | 5,356,091 |
def bbox_overlaps_2D(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
boxes1_repeat = boxes2.size()[0]
boxes2_repeat = boxes1.size()[0]
boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,4)
boxes2 = boxes2.repeat(boxes2_repeat,1)
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = boxes1.chunk(4, dim=1)
b2_y1, b2_x1, b2_y2, b2_x2 = boxes2.chunk(4, dim=1)
y1 = torch.max(b1_y1, b2_y1)[:, 0]
x1 = torch.max(b1_x1, b2_x1)[:, 0]
y2 = torch.min(b1_y2, b2_y2)[:, 0]
x2 = torch.min(b1_x2, b2_x2)[:, 0]
zeros = Variable(torch.zeros(y1.size()[0]), requires_grad=False)
if y1.is_cuda:
zeros = zeros.cuda()
intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area[:,0] + b2_area[:,0] - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = iou.view(boxes2_repeat, boxes1_repeat)
return overlaps | 5,356,092 |
def _noisy_action_impl(
scene_node: hsim.SceneNode,
translate_amount: float,
rotate_amount: float,
multiplier: float,
model: MotionNoiseModel,
motion_type: str,
):
"""
multiplier is the
ROTATION STD NOISE in case of rotational motion type,
MOTION STD NOISE in case of linear motion type
"""
# Perform the action in the coordinate system of the node
transform = scene_node.transformation
move_ax = -transform[_Z_AXIS].xyz
perp_ax = transform[_X_AXIS].xyz
# if the action is rotational, introduce a rotation error but NOT a translation error
if motion_type == "rotational":
translation_noise=np.array([0., 0.], dtype=np.float32)
else: # is linear
translation_noise = ( (model.linear.sample() * 10.0) * multiplier )/10.0
# apply the noise along the 2 axis
scene_node.translate_local(
move_ax * (translate_amount + translation_noise[0])
+ perp_ax * (translation_noise[1])
)
if motion_type == "linear":
# if the movement was straight, add a bit of noise to rotation
rot_noise = 10.0 * translate_amount * model.rotation.sample()
else:
rot_noise = ( (model.rotation.sample() * 10.0) * multiplier )/10.0
scene_node.rotate_y_local(mn.Deg(rotate_amount) + mn.Deg(rot_noise))
scene_node.rotation = scene_node.rotation.normalized() | 5,356,093 |
def decoder(data):
"""
This generator processes a sequence of bytes in Modified UTF-8 encoding
and produces a sequence of unicode string characters.
It takes bits from the byte until it matches one of the known encoding
sequences.
It uses ``DecodeMap`` to mask, compare and generate values.
:param data: a string of bytes in Modified UTF-8 encoding.
:return: a generator producing a string of unicode characters
:raises UnicodeDecodeError: unrecognised byte in sequence encountered.
"""
def next_byte(_it, start, count):
try:
return next(_it)[1]
except StopIteration:
raise UnicodeDecodeError(
NAME, data, start, start + count, "incomplete byte sequence"
)
it = iter(enumerate(data))
for i, d in it:
if d == 0x00: # 00000000
raise UnicodeDecodeError(
NAME, data, i, i + 1, "embedded zero-byte not allowed"
)
if d & 0x80: # 1xxxxxxx
if d & 0x40: # 11xxxxxx
if d & 0x20: # 111xxxxx
if d & 0x10: # 1111xxxx
raise UnicodeDecodeError(
NAME, data, i, i + 1, "invalid encoding character"
)
if d == 0xED:
value = 0
for i1, dm in enumerate(DECODE_MAP[6]):
d1 = next_byte(it, i, i1 + 1)
value = dm.apply(d1, value, data, i, i1 + 1)
else: # 1110xxxx
value = d & 0x0F
for i1, dm in enumerate(DECODE_MAP[3]):
d1 = next_byte(it, i, i1 + 1)
value = dm.apply(d1, value, data, i, i1 + 1)
else: # 110xxxxx
value = d & 0x1F
for i1, dm in enumerate(DECODE_MAP[2]):
d1 = next_byte(it, i, i1 + 1)
value = dm.apply(d1, value, data, i, i1 + 1)
else: # 10xxxxxx
raise UnicodeDecodeError(
NAME, data, i, i + 1, "misplaced continuation character"
)
else: # 0xxxxxxx
value = d
# noinspection PyCompatibility
yield mutf8_unichr(value) | 5,356,094 |
def calculate_exvolume_redfactor():
"""
Calculates DEER background reduction factor alpha(d)
See
Kattnig et al
J.Phys. Chem. B, 117, 16542 (2013)
https://doi.org/10.1021/jp408338q
The background reduction factor alpha(d) is defined in Eq.(18)
For large d, one can use the limiting expression
alpha = (3/2/pi)*(2*pi/3-sqrt(3)./d)
as an excellent approximation (error at d
"""
def KK(d):
q = np.sqrt(6*d/pi)
S,C = scp.special.fresnel(q)
y = 1 - (np.cos(d)*C+np.sin(d)*S)/q
y[y==0] = 0
return y
def h(d):
d = np.atleast_1d(d)
y = np.zeros(np.shape(d))
for k in range(len(d)):
y[k],_ = scp.integrate.quad(lambda x:(1-x**2)*Si((1-x**2)*d[k]),0,np.sqrt(3))
return y
def Si(t):
t = np.atleast_1d(t)
y = np.zeros(np.shape(t))
for k in range(len(t)):
y[k],_ = scp.integrate.quad(lambda x:np.sin(x)/(x+np.finfo(float).eps),0,t[k],limit=1000)
y[y==0] = 0
return y
# Set up dR range
#-------------------------------------------------------------------------------
# dR = A*t/R^3, where t is time, R is excluded-volume radius, and A is the
# dipolar constant (in units compatible with t and R)
dRlin = np.arange(0,20,0.05)
dRlog = 10**(np.arange(1,3,0.05))
dRlog = np.delete(dRlog, np.where(dRlog < max(dRlin)))
dR = np.concatenate((dRlin, dRlog))
# Evaluate reduction factor alpha as a function of dR
#-------------------------------------------------------------------------------
h_ = h(dR)
K_ = KK(dR)
alpha = (3/2/pi)*(h_ - np.sqrt(3)/dR*K_)
alpha[dR==0] = 0
return alpha | 5,356,095 |
def get_successors(graph):
"""Returns a dict of all successors of each node."""
d = {}
for e in graph.get_edge_list():
src = e.get_source()
dst = e.get_destination()
if src in d.keys():
d[src].add(dst)
else:
d[src] = set([dst])
return d | 5,356,096 |
def main(options):
"""
The primary modes of operation are:
* run query & save output, read cache, plot results
--query <foo>
* read cache or csv file, plot results
--csvfile <foo.csv>
* merge csv files, noplot
--mergefiles <foo1.csv> --mergefiles <foo2.csv> --output <merge.csv>
"""
cache_file = None
if options.query:
# RUN QUERY
(query_file,cache_file) = get_filenames(options.query,
options.define_values)
query_str = process_macros(read_file(query_file), options.define_values)
if options.refresh or not has_recent_mtime(cache_file, query_file):
if options.api:
bigquery_api(query_str, cache_file, options)
else:
bigquery_exec(query_str, cache_file, options)
elif len(options.mergefiles) >= 2:
# HANDLE MERGE (if applicable)
success = merge_csvfiles(options)
return
elif options.csvfile is not None:
# USE EXPLICIT CSV FILE (instead of running a query first)
cache_file = options.csvfile
else:
print "Error: failed to identify operation."
sys.exit(1)
if not options.plot:
return
# READ RECORDS
records = read_csvfile(cache_file, True)
# INITIALIZE
x_list = []
y_lists = {}
y_errs = {}
c_list = []
split_column_names = map(split_column_name, options.columns)
for y_col,y_err_col in split_column_names:
y_lists[y_col] = []
y_errs[y_err_col] = []
# SORT DATA
# TODO/NOTE: expects 'timestamp' values are sorted in ascending order
for row in records:
if options.verbose: print row
try:
# NOTE: First convert all values in this row.
x = float(row[options.timestamp])
if options.count_column: c = float(row[options.count_column])
y = {col:None for col,err in split_column_names}
e = {err:None for col,err in split_column_names if err is not None}
for y_col,y_err_col in split_column_names:
y[y_col] = float(row[y_col])
if y_err_col: e[y_err_col] = float(row[y_err_col])
# NOTE: only save values if all converted successfully
x_list.append(x)
if options.count_column: c_list.append(c)
for y_col,y_err_col in split_column_names:
y_lists[y_col].append(y[y_col])
if y_err_col: y_errs[y_err_col].append(e[y_err_col])
except ValueError:
# NOTE: a conversion failed. and, b/c conversion & save are
# separate, the data saved so far is still valid.
continue
# VISUALIZE
plot_data(x_list, y_lists, y_errs, c_list, options) | 5,356,097 |
def readDataTable2o2(request):
"""Vuetify練習"""
form1Textarea1 = request.POST["textarea1"]
template = loader.get_template(
'webapp1/practice/vuetify-data-table2.html')
# -----------------------------------------
# 1
# 1. host1/webapp1/templates/webapp1/practice/vuetify-data-table2.html を取ってきます。
# -----------------------------------------
context = {
'dessertsJson': form1Textarea1
}
return HttpResponse(template.render(context, request)) | 5,356,098 |
def dump_variables2json(var_list, write_file):
"""Dump the variable list object to JSON."""
with open(write_file, 'w') as file:
json.dump(var_list, file, indent=2) | 5,356,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.