content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def check_credentials(username):
"""
Function that check if a Credentials exists with that username and return true or false
"""
return Credentials.if_credential_exist(username) | 8515bbc39afd003fc193cbb80c97f5f718657fa6 | 3,656,629 |
def rpc_category_to_super_category(category_id, num_classes):
"""Map category to super-category id
Args:
category_id: list of category ids, 1-based
num_classes: 1, 17, 200
Returns:
super-category id, 0-based
"""
cat_id = -1
assert num_classes in RPC_SUPPORT_CATEGORIES, \
'Not support {} density categories'.format(num_classes)
if num_classes == 17:
cat_id = _categories[category_id]
elif num_classes == 1:
cat_id = 0
elif num_classes == 200:
cat_id = category_id - 1
assert 199 >= cat_id >= 0
return cat_id | 8056aea308f66a65a4135a6fc7f061873d990624 | 3,656,630 |
def setup_integration():
"""Set up a test resource."""
print('Setting up a test integration for an API')
return Integration(name='myapi',
base_url='https://jsonplaceholder.typicode.com') | d2720db6ae520e21edc555ad0c899652c6584406 | 3,656,631 |
def secondsToHMS(intervalInSeconds):
"""converts time in seconds to a string representing time in hours, minutes, and seconds
:param intervalInSeconds: a time measured in seconds
:returns: time in HH:MM:SS format
"""
interval = [0, 0, intervalInSeconds]
interval[0] = (interval[2] / 3600) - ((interval[2] % 3600) / 3600)
interval[1] = ((interval[2] % 3600) / 60) - ((interval[2] % 3600) % 60) / 60
interval[2] = interval[2] % 60
intervalString = '{0:02.0f}:{1:02.0f}:{2:02.0f}'.format(interval[0],
interval[1], interval[2])
return intervalString | b38d4b886eaabd1361c162b6b7f55e11493dfb60 | 3,656,632 |
import itertools
def build_rdn(coords, r, **kwargs):
"""
Reconstruct edges between nodes by radial distance neighbors (rdn) method.
An edge is drawn between each node and the nodes closer
than a threshold distance (within a radius).
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
r : float, optional
Radius in which nodes are connected.
Examples
--------
>>> coords = make_simple_coords()
>>> pairs = build_rdn(coords, r=60)
Returns
-------
pairs : ndarray
The (n_pairs x 2) matrix of neighbors indices.
"""
tree = BallTree(coords, **kwargs)
ind = tree.query_radius(coords, r=r)
# clean arrays of neighbors from self referencing neighbors
# and aggregate at the same time
source_nodes = []
target_nodes = []
for i, arr in enumerate(ind):
neigh = arr[arr != i]
source_nodes.append([i]*(neigh.size))
target_nodes.append(neigh)
# flatten arrays of arrays
source_nodes = np.fromiter(itertools.chain.from_iterable(source_nodes), int).reshape(-1,1)
target_nodes = np.fromiter(itertools.chain.from_iterable(target_nodes), int).reshape(-1,1)
# remove duplicate pairs
pairs = np.hstack((source_nodes, target_nodes))
pairs = np.sort(pairs, axis=1)
pairs = np.unique(pairs, axis=0)
return pairs | 83f2d68fbb854e2ef25e03f5d58d6c96c02c0127 | 3,656,633 |
def find_layer(model, type, order=0):
"""
Given a model, find the Nth layer of the specified type.
:param model: the model that will be searched
:param type: the lowercase type, as it is automatically saved by keras in the layer's name (e.g. conv2d, dense)
:param order: 0 by default (the first matching layer will be returned)
:return: The index of the matching layer or None if it was not found.
"""
num_found = 0
for layer in model.layers:
if type + '_' in layer.get_config()['name']:
if order == num_found:
return layer
num_found += 1
return None | 6d4e08c181900774b9e5666a11df9767f68a10ca | 3,656,634 |
def _interpretable(model):
# type: (Union[str, h2o.model.ModelBase]) -> bool
"""
Returns True if model_id is easily interpretable.
:param model: model or a string containing a model_id
:returns: bool
"""
return _get_algorithm(model) in ["glm", "gam", "rulefit"] | 4ae73e5b7ed98b61b56920985128212e3051c789 | 3,656,635 |
def apply_pb_correction(obs,
pb_sensitivity_curve,
cutoff_radius):
"""
Updates the primary beam response maps for cleaned images in an ObsInfo object.
Args:
obs (ObsInfo): Observation to generate maps for.
pb_sensitivity_curve: Primary beam sensitivity as a function of radius
in units of image pixels. (Should be 1.0 at the exact centre).
cutoff_radius: Radius at which to mask the output image (avoids
extremely high corrected values for noise fluctuations at large
radii). Units: image pixels.
"""
assert isinstance(obs, ObsInfo)
def update_pb_map_for_img(flux_map_path):
pbmap = generate_primary_beam_response_map(flux_map_path,
pb_sensitivity_curve,
cutoff_radius)
return pbmap
def process_clean_maps(clean_maps):
pbmap = update_pb_map_for_img(clean_maps.flux)
img_path = clean_maps.image
pb_img_path = img_path+'.pbcor'
generate_pb_corrected_image(img_path, pb_img_path,
pbmap)
clean_maps.pbcor = pb_img_path
if obs.maps_masked.ms.image:
process_clean_maps(obs.maps_masked.ms)
if obs.maps_open.ms.image:
process_clean_maps(obs.maps_open.ms)
if obs.maps_hybrid.ms.image:
process_clean_maps(obs.maps_hybrid.ms) | 02ee2913ce781f4a02e85910c69cfe5b534e62f4 | 3,656,636 |
def makeLoadParams(args):
"""
Create load parameters for start load request out of command line arguments.
Args:
args (dict): Parsed command line arguments.
"""
load_params = {'target': {},
'format': {'date_time': {},
'boolean': {}},
'load_options': {},
'advanced_options': {}}
add_param(load_params['target'], 'database', args.target_database)
add_param(load_params['target'], 'schema', args.target_schema)
add_param(load_params['target'], 'table', args.target_table)
if len(load_params['target']) == 0:
del load_params['target']
add_param(load_params['format'], 'type', args.type)
add_param(load_params['format'], 'field_separator', args.field_separator)
add_param(load_params['format'], 'trailing_field_separator',
args.trailing_field_separator, False)
add_param(load_params['format'], 'enclosing_character',
args.enclosing_character)
add_param(load_params['format'], 'escape_character', args.escape_character)
add_param(load_params['format'], 'null_value', args.null_value)
add_param(load_params['format'], 'has_header_row',
args.has_header_row, False)
add_param(load_params['format'], 'flexible', args.flexible, False)
add_param(load_params['format']['date_time'], 'converted_to_epoch',
args.date_converted_to_epoch, False)
add_param(load_params['format']['date_time'], 'date_format',
args.date_format)
add_param(load_params['format']['date_time'], 'time_format',
args.time_format)
add_param(load_params['format']['date_time'], 'date_time_format',
args.date_time_format)
add_param(load_params['format']['date_time'], 'second_fraction_start',
args.second_fraction_start)
add_param(load_params['format']['date_time'], 'skip_second_fraction',
args.skip_second_fraction, False)
if len(load_params['format']['date_time']) == 0:
del load_params['format']['date_time']
add_param(load_params['format']['boolean'], 'use_bit_values',
args.use_bit_boolean_values, False)
add_param(load_params['format']['boolean'], 'true_format', args.true_format)
add_param(load_params['format']['boolean'], 'false_format',
args.false_format)
if len(load_params['format']['boolean']) == 0:
del load_params['format']['boolean']
if len(load_params['format']) == 0:
del load_params['format']
add_param(load_params['load_options'], 'empty_target',
args.empty_target, False)
add_param(load_params['load_options'], 'max_ignored_rows',
args.max_ignored_rows)
if len(load_params['load_options']) == 0:
del load_params['load_options']
add_param(load_params['advanced_options'], 'validate_only',
args.validate_only, False)
add_param(load_params['advanced_options'], 'file_target_dir',
args.file_target_dir)
if len(load_params['advanced_options']) == 0:
del load_params['advanced_options']
print('Created load params: ', load_params)
return load_params | f1c0e9297775305c36acbb950bfc05e785bde87c | 3,656,637 |
from hash import HashTable
def empty_hash():
"""Initialize empty hash table."""
test_hash = HashTable()
return test_hash | 02700169c89427af4d2db123e110ec383d9332eb | 3,656,638 |
def denoise_sim(image, std, denoiser):
"""Simulate denoising problem
Args:
image (torch.Tensor): image tensor with shape (C, H, W).
std (float): standard deviation of additive Gaussian noise
on the scale [0., 1.].
denoiser: a denoiser instance (as in algorithms.denoiser).
The std argument for this denoiser is already specified
if applicable.
Returns:
denoised_image (torch.Tensor): tensor of denoised image
noisy_image (torch.Tensor): tensor of noisy image
"""
print('deploy.sim.denoise_sim: Simulating noisy image...')
noisy_image = gutil.add_noise(image, std)
print('deploy.sim.denoise_sim: Begin image denoising...')
denoised_image = denoiser(noisy_image, std=std)
return denoised_image, noisy_image | 216944b26c3ca0e04b8b5801766321fe60ee7e02 | 3,656,639 |
def _find_weektime(datetime, time_type='min'):
"""
Finds the minutes/seconds aways from midnight between Sunday and Monday.
Parameters
----------
datetime : datetime
The date and time that needs to be converted.
time_type : 'min' or 'sec'
States whether the time difference should be specified in seconds or minutes.
"""
if time_type == 'sec':
return datetime.weekday() * 24 * 60 * 60 + datetime.hour * 60 * 60 + datetime.minute * 60 + datetime.second
elif time_type == 'min':
return datetime.weekday() * 24 * 60 + datetime.hour * 60 + datetime.minute
else:
raise ValueError("Invalid time type specified.") | 2ed28166d239dabdc9f8811812e472810b10c7d7 | 3,656,640 |
from typing import List
from typing import Tuple
def linear_to_image_array(pixels:List[List[int]], size:Tuple[int,int]) -> np.ndarray:
"""\
Converts a linear array ( shape=(width*height, channels) ) into an array
usable by PIL ( shape=(height, width, channels) )."""
a = np.array(pixels, dtype=np.uint8)
split = np.split(pixels, [i*size[0] for i in range(1,size[1])])
return np.array(split, dtype=np.uint8) | 431170c71a3d6464be5dd5b9d248b2866ba3ac6a | 3,656,641 |
def stop_processes(hosts, pattern, verbose=True, timeout=60):
"""Stop the processes on each hosts that match the pattern.
Args:
hosts (list): hosts on which to stop the processes
pattern (str): regular expression used to find process names to stop
verbose (bool, optional): display command output. Defaults to True.
timeout (int, optional): command timeout in seconds. Defaults to 60
seconds.
Returns:
dict: a dictionary of return codes keys and accompanying NodeSet
values indicating which hosts yielded the return code.
Return code keys:
0 No processes matched the criteria / No processes killed.
1 One or more processes matched the criteria and a kill was
attempted.
"""
result = {}
log = getLogger()
log.info("Killing any processes on %s that match: %s", hosts, pattern)
if hosts is not None:
commands = [
"rc=0",
"if pgrep --list-full {}".format(pattern),
"then rc=1",
"sudo pkill {}".format(pattern),
"if pgrep --list-full {}".format(pattern),
"then sleep 5",
"pkill --signal KILL {}".format(pattern),
"fi",
"fi",
"exit $rc",
]
result = pcmd(hosts, "; ".join(commands), verbose, timeout, None)
return result | 898a358b5e61952d72be15eecb10b00ce8bd2efd | 3,656,642 |
def field_as_table_row(field):
"""Prints a newforms field as a table row.
This function actually does very little, simply passing the supplied
form field instance in a simple context used by the _field_as_table_row.html
template (which is actually doing all of the work).
See soc/templates/soc/templatetags/_field_as_table_row.html for the CSS
styles used by this template tag.
Usage:
{% load forms_helpers %}
...
<table>
{% field_as_table_row form.fieldname %}
...
</table>
Args:
field: a Django newforms field instance
Returns:
a simple context containing the supplied newforms field instance:
{ 'field': field }
"""
return {'field': field} | 74d120e2a46ae8465832d98ddf02848b5b2cc936 | 3,656,643 |
def get_samples(select_samples: list, avail_samples: list) -> list:
"""Get while checking the validity of the requested samples
:param select_samples: The selected samples
:param avail_samples: The list of all available samples based on the range
:return: The selected samples, verified
"""
# Sample number has to be positive
if True in [_ < 0 for _ in select_samples]:
raise ValueError(
"Number of samples with -ns has to be strictly positive!")
# Sample number has to be within the available sample
elif False in [_ in avail_samples for _ in select_samples]:
raise ValueError(
"Some or all selected samples are not available in the design")
return select_samples | e1c0c98697d2c504d315064cbdfbad379165d317 | 3,656,644 |
def createMemoLayer(type="", crs=4326, name="", fields={"id":"integer"}, index="no"):
"""
Créer une couche en mémoire en fonction des paramètres
:param type (string): c'est le type de geometrie "point", "linestring",
"polygon", "multipoint","multilinestring","multipolygon"
:param crs (int): systeme de projection CRS
:param fields (dict): {nom_champ : type_champ(longueur)} field=name : type(length,precision)
types : "integer", "double", "string(length)"
:param name (string): C'est le nom de la couche qui apparaitra dans la légende
:param index (string): indique si on créer un indice spatial
:return (QgsVectorLayer): on retourene un objet QgsVectorLayer
"""
# on créer l'uri et on ajoute tous les champs
uri="%s?crs=epsg:%s"%(type,crs)
for key, value in fields.items():
uri="%s&field=%s:%s"%(uri,key, value)
uri="%s&index=%s"%(uri,index)
# on créer l'objet QgsVectorLayer
memLayer = QgsVectorLayer(uri, name, "memory")
return memLayer | 713823d9b59b7c4ccf7bdd938a720d385629e02f | 3,656,645 |
import json
def load_templates(package):
"""
Returns a dictionary {name: template} for the given instrument.
Templates are defined as JSON objects, with stored in a file named
"<instrument>.<name>.json". All templates for an instrument should
be stored in a templates subdirectory, made into a package by inclusion
of an empty __init__.py file. They can then be loaded using::
from dataflow import core as df
from . import templates
...
instrument = df.Instrument(
...
templates=df.load_templates(templates),
)
"""
templates = {}
for filename in resources.contents(package):
if filename.endswith('.json'):
name = filename.split('.')[-2]
template = json.loads(resources.read_text(package, filename))
templates[name] = template
return templates | 6213eb6e8b7be0bb7057da49d02fe495d7db6660 | 3,656,646 |
def get_count_matrix(args):
"""首先获取数据库中全部文档的id,然后遍历id获取文档内容,再逐文档
进行分词,生成计数矩阵。"""
global DOC2IDX
with DocDB(args.db_path) as doc_db:
doc_ids = doc_db.get_doc_ids()
DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}
row, col, data = [], [], []
_count = partial(count, args)
for i in doc_ids:
b_row, b_col, b_data = _count(i)
row.extend(b_row)
col.extend(b_col)
data.extend(b_data)
# 创建稀疏矩阵,这里用的是按行压缩的方法(Compressed Sparse Row, csr)
# 关于什么是csr_matrix,参考:
# https://www.pianshen.com/article/7967656077/
# https://zhuanlan.zhihu.com/p/342942385
count_matrix = sp.csr_matrix((data, (row, col)), shape=(args.hash_size, len(doc_ids)))
count_matrix.sum_duplicates()
return count_matrix, (DOC2IDX, doc_ids) | 6279666c6dfdf66dba13edfe57e55525de15d894 | 3,656,647 |
def communication_round(model, clients, train_data, train_labels, train_people, val_data, val_labels, val_people,
val_all_labels, local_epochs, weights_accountant, individual_validation, local_operation):
"""
One round of communication between a 'server' and the 'clients'. Each client 'downloads' a global model and trains
a local model, updating its weights locally. When all clients have updated their weights, they are 'uploaded' to
the server and averaged.
:param model: Tensorflow Graph
:param clients: numpy array, array of unique client IDs
:param train_data: numpy array
:param train_labels: numpy array
:param train_people: numpy array
:param val_data: numpy array
:param val_labels: numpy array
:param val_people: numpy array
:param val_all_labels: numpy array
:param local_epochs: int, local epochs to be trained
:param weights_accountant: WeightsAccountant object
:param individual_validation: bool, if true, validation history for every local epoch in a federated setting
is stored (typically not necessary)
:param local_operation: string, valid arguments are "global_averaging", "localized_learning",
and "local_models"
:return:
Pandas DataFrame, training history
"""
# Split train and validation data into clients
train_data, train_labels = dL.split_data_into_clients_dict(train_people, train_data, train_labels)
if val_data is not None:
val_data, val_labels, val_people, val_all_labels = \
dL.split_data_into_clients_dict(val_people, val_data, val_labels, val_people, val_all_labels)
# Train each client
history = {}
for client in clients:
Output.print_client_id(client)
results = client_learning(model, client, local_epochs, train_data, train_labels, val_data, val_labels,
val_people, val_all_labels, weights_accountant, individual_validation)
# Append each client's results to the history dictionary
for key, val in results.items():
history.setdefault(key, []).extend(val)
# Pop general metrics from history as these are duplicated with client metrics, e.g. 'loss' == 'subject_43_loss'
for metric in model.metrics_names:
history.pop(metric, None)
history.pop("val_" + metric, None)
# If there is localization (e.g. the last layer of the model is not being averaged, indicated by less "shared
# weights" compared to total "default weights"), then we adapt local models to the new shared layers
if local_operation == 'localized_learning':
# Average all updates marked as "global"
weights_accountant.federated_averaging(layer_type='global')
# Decrease the learning rate for local adaptation only
K.set_value(model.optimizer.lr, K.get_value(model.optimizer.lr) / LR_FACTOR)
# Freeze the global layers
change_layer_status(model, 'global', 'freeze')
# Reconnect the Convolutional layers
for client in clients:
Output.print_client_id(client)
client_learning(model, client, local_epochs, train_data, train_labels, val_data, val_labels,
val_people, val_all_labels, weights_accountant, individual_validation)
# Unfreeze the global layers
change_layer_status(model, 'global', 'unfreeze')
# Increase the learning rate again
K.set_value(model.optimizer.lr, K.get_value(model.optimizer.lr) * LR_FACTOR)
elif local_operation == 'local_models':
print("No federated averaging.")
pass
elif local_operation == 'global_averaging':
weights_accountant.federated_averaging()
else:
raise ValueError('local_operation only accepts "global_averaging", "localized_learning", and "local_models"'
' as arguments. "{}" was given.'.format(local_operation))
return history | f8a8ef93845e09394cea6a2f6077a0ae2dfaed18 | 3,656,648 |
import collections
def _find_stop_area_mode(query_result, ref):
""" Finds the mode of references for each stop area.
The query results must have 3 columns: primary key, foreign key
reference and number of stop points within each area matching that
reference, in that order.
:param ref: Name of the reference column.
:returns: Two lists; one to be to be used with `bulk_update_mappings`
and the other strings for invalid areas.
"""
# Group by stop area and reference
stop_areas = collections.defaultdict(dict)
for row in query_result:
stop_areas[row[0]][row[1]] = row[2]
# Check each area and find mode matching reference
update_areas = []
invalid_areas = {}
for sa, count in stop_areas.items():
max_count = [k for k, v in count.items() if v == max(count.values())]
if len(max_count) == 1:
update_areas.append({"code": sa, ref: max_count[0]})
else:
invalid_areas[sa] = max_count
return update_areas, invalid_areas | e4677638b272e67d2ae21ee97f71f1f1700fd072 | 3,656,649 |
def get_all_funds_ranking(fund_type: str = 'all',
start_date: str = '-1y',
end_date: str = arrow.now(),
sort: str = 'desc',
subopts: str = '',
available: str = 1):
"""Get all funds ranking from 'fund.eastmoney.com'. (基金排行)
:param fund_type: (optional) fund type, default is `all`.
value: ct场内 gp股票 hh混合 zq债券 zs指数 bb保本 qdii lof fof
:param start_date: (optional) start date of the custom return, default is `-1y`.
value: -nd -nw -nm -ny cyear or YYYY-MM-DD
:param end_date: (optional) the end date of the results, default is `now`.
:param sort: (optional) results order, default is `desc`.
:param subopts: (optional) some suboptions. format is a list of options(`first,second`).
Suboptions for bonds(有关债券的子选项):
- first option is bonds type(债券类型).
value: cz长债 dz短债 hz混债 dkz定开债 kzz可转债
- second option is leverage ratio(杠杆比例).
value: 0-100 100-150 150-200 200+
Suboptions for stock index(有关指数的子选项):
- first option is index type(标的).
value: hs沪深 hy行业 dp大盘 zxp中小盘 gz股指 zz债指
- second option is stock index operation(运作方式).
value: bd被动 zq增强
Suboptions for QDII fonds.
- first option is fond type(基金类型).
vaule: qqgp全球股票 ytgp亚太股票 dzh大中华区 xxsc新兴市场 jzgj金砖国家
cssc成熟市场 us美国股票 qqidx全球指数 etf hh股债混合 zq债券 sp商品
:param available: (optional) `1` can buy, `0` including both, default is `1`.
:return: a list of the funds.
:rtype: `pd.DataFrame`.
"""
dtype = fund_type == 'ct' and 'fb' or 'kf'
begin = str2date(start_date).format('YYYY-MM-DD')
end = arrow.get(end_date).format('YYYY-MM-DD')
opt1, opt2 = _funds_ranking_subopts(fund_type, subopts)
params = dict(op='ph',dt=dtype,ft=fund_type,rs='',gs=0,sc='zzf',st=sort,pi=1,pn=10000) # 场内基金
fund_type != 'ct' and params.update(dict(sd=begin,ed=end,qdii=opt1,tabSubtype=opt2,dx=available))
resp = sess.get(api.all_funds_rank, params=params)
obj = js2obj(resp.text, 'rankData')
# dataframe
if fund_type == 'ct': # 场内基金
cols = 'code,name,1,date,nav,cnav,-1week,-1month,-3month,-6month,-1year,-2year,'\
'-3year,current_year,since_create,issue_date,,,,,,type'
newcols = cols.replace('1','type,issue_date',1).split(',issue_date,,')[0]
else: # 基金排行
cols = 'code,name,1,date,nav,cnav,percent,-1week,-1month,-3month,-6month,-1year,-2year,'\
'-3year,current_year,since_create,issue_date,,custom,2,,,,'
newcols = cols.replace('1','issue_date',1).replace('issue_date,,','').split(',2')[0]
df = pd.DataFrame([i.split(',')[:-1] for i in obj['datas']],
columns=cols.split(',')).ffill(None)[newcols.split(',')]
df['date'] = pd.to_datetime(df['date'])
df['issue_date'] = pd.to_datetime(df['issue_date'])
df[['nav','cnav']] = df[['nav','cnav']].applymap(lambda x:x and float(x) or None)
colnum = fund_type == 'ct'\
and range(df.columns.get_loc('-1week'), len(df.columns))\
or range(df.columns.get_loc('percent'), len(df.columns))
df.iloc[:,colnum] = df.iloc[:,colnum].applymap(lambda x:x and float(x)/100 or None)
return df | 55dd84c8f8830d6c60411de858a9aec1f14a30be | 3,656,650 |
from typing import List
from typing import Any
from re import T
def _conform_list(li: List[Any]) -> List[T]:
"""
Ensures that every element in *li* can conform to one type
:param li: list to conform
:return: conformed list
"""
conform_type = li[0].__class__
for i in li:
if isinstance(i, StrictType):
conform_type = i.__class__
break
base_type = (
conform_type.__base__ if conform_type.__base__ != object else None
) # do not let base_type be 'object'
if not all(type(i) == conform_type or type(i) == base_type for i in li):
raise Exception(f"{li} can not be conformed to the {conform_type}")
return [i if isinstance(i, conform_type) else conform_type(i) for i in li] | 29131a9f5979318e0fc50408b67938ffbd56fa5a | 3,656,651 |
def _255_to_tanh(x):
"""
range [0, 255] to range [-1, 1]
:param x:
:return:
"""
return (x - 127.5) / 127.5 | a60a67ee489093292fc58136a8f01387482fb162 | 3,656,652 |
import torch
def train_one_epoch(train_loader, model, criterion, optimizer, epoch, opt, num_train_samples, no_acc_eval=False):
""" model training
:param train_loader: train dataset loader
:param model: model
:param criterion: loss criterion
:param optimizer:
:param epoch: current epoch
:param num_train_samples: total number of samples in train_loader
:param no_acc_eval (bool): accuray eval in model training
:return:
"""
info = {}
losses = AverageMeter('Loss ', ':6.4g')
top1 = AverageMeter('Acc@1 ', ':6.2f')
top5 = AverageMeter('Acc@5 ', ':6.2f')
# switch to train mode
model.train()
lr_scheduler = global_utils.LearningRateScheduler(mode=opt.lr_mode,
lr=opt.lr,
num_training_instances=num_train_samples,
target_lr=opt.target_lr,
stop_epoch=opt.epochs,
warmup_epoch=opt.warmup,
stage_list=opt.lr_stage_list,
stage_decay=opt.lr_stage_decay)
lr_scheduler.update_lr(batch_size=epoch * num_train_samples)
optimizer.zero_grad()
batches_per_allreduce_count = 0
for i, (input_, target) in enumerate(train_loader):
if not opt.independent_training:
lr_scheduler.update_lr(batch_size=input_.shape[0] * opt.world_size)
else:
lr_scheduler.update_lr(batch_size=input_.shape[0])
current_lr = lr_scheduler.get_lr()
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr * opt.batches_per_allreduce
bool_label_smoothing = False
bool_mixup = False
if not opt.dist_mode == 'cpu':
input_ = input_.cuda(opt.gpu, non_blocking=True)
target = target.cuda(opt.gpu, non_blocking=True)
transformed_target = target
with torch.no_grad():
if hasattr(opt, 'label_smoothing') and opt.label_smoothing:
bool_label_smoothing = True
if hasattr(opt, 'mixup') and opt.mixup:
bool_mixup = True
if bool_label_smoothing and not bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes, smoothing_eps=0.1)
if not bool_label_smoothing and bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes)
input_, transformed_target = mixup(input_, transformed_target)
if bool_label_smoothing and bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes, smoothing_eps=0.1)
input_, transformed_target = mixup(input_, transformed_target)
# compute output
output = model(input_)
model_saved = model.module if hasattr(model, 'module') else model
logit_loss = criterion(output, transformed_target)
ts_feature_loss, ts_logit_loss = model_saved.compute_ts_distill_loss()
loss = logit_loss + opt.teacher_feature_weight * ts_feature_loss + opt.teacher_logit_weight * ts_logit_loss
# measure accuracy and record loss
input_size = int(input_.size(0))
if not no_acc_eval:
# pylint: disable=unbalanced-tuple-unpacking
acc1, acc5 = accuracy(output.data, target, topk=(1, 5))
top1.update(float(acc1[0]), input_size)
top5.update(float(acc5[0]), input_size)
else:
acc1 = [0]
acc5 = [0]
losses.update(float(loss), input_size)
if opt.apex:
if opt.dist_mode == 'horovod':
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
batches_per_allreduce_count += 1
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
else:
# if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
# if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.step()
else:
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
loss.backward()
batches_per_allreduce_count += 1
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.step()
if i % opt.print_freq == 0:
print(
f'<rank {opt.rank}> Train epoch={epoch}, i={i}, loss={float(loss):4g}, \
logit_loss={float(logit_loss):4g}, ts_feature_loss={float(ts_feature_loss):4g}, \
ts_logit_loss={float(ts_logit_loss):4g}, \
acc1={float(acc1[0]):4g}%, acc5={float(acc5[0]):4g}%, lr={current_lr:4g}')
top1_acc_avg = top1.avg
top5_acc_avg = top5.avg
losses_acc_avg = losses.avg
# if distributed, sync
if opt.dist_mode == 'horovod' and (not opt.independent_training):
sync_tensor = torch.tensor([top1.sum, top1.count, top5.sum, top5.count,
losses.sum, losses.count], dtype=torch.float32)
hvd.allreduce(sync_tensor, name='sync_tensor_topk_acc')
top1_acc_avg = (sync_tensor[0] / sync_tensor[1]).item()
top5_acc_avg = (sync_tensor[2] / sync_tensor[3]).item()
losses_acc_avg = (sync_tensor[4] / sync_tensor[5]).item()
elif opt.dist_mode == 'apex' and opt.distributed:
sync_tensor = torch.tensor([top1.sum, top1.count, top5.sum, top5.count,
losses.sum, losses.count], dtype=torch.float32).cuda()
dist.all_reduce(sync_tensor, op=dist.ReduceOp.SUM)
top1_acc_avg = (sync_tensor[0] / sync_tensor[1]).item()
top5_acc_avg = (sync_tensor[2] / sync_tensor[3]).item()
losses_acc_avg = (sync_tensor[4] / sync_tensor[5]).item()
else:
pass
info['losses_acc'] = losses_acc_avg
info['top1_acc'] = top1_acc_avg
info['top5_acc'] = top5_acc_avg
return info | 5b5efd1292322090abcb795fc633638f478f0afa | 3,656,654 |
import datetime
def Write(Variable, f):
"""Function to Convert None Strings to Strings and Format to write to file with ,"""
if isinstance(Variable, str) == False:
if isinstance(Variable, datetime.datetime) == True:
return f.write(f"{Variable.strftime('%Y-%m-%d')},")
else:
Variable = round(Variable, 2)
return f.write(f"{str(Variable)},")
elif isinstance(Variable, str) == True:
return f.write(f"{(Variable)},") | 9963c4117c7cc3f19d91331ed6c36e5733cffb56 | 3,656,655 |
def graphs_infos():
"""
Build and return a JSON file containing some information on all the graphs.
The json file is built with the following format:
[
For each graph in the database :
{
'graph_id': the id of the graph,
'name': the name of the graph,
'iso': the string 'true' or 'false' depending if the graph belongs to J or not
}
]
:return: a JSON file containing some information on all the graphs.
"""
return jsonify(gdb.get_graph_infos()) | ab6fee49188ad422e1e3a5e2763510ae791a840b | 3,656,656 |
def collect_compare(left, right):
"""
returns a tuple of four lists describing the file paths that have
been (in order) added, removed, altered, or left the same
"""
return collect_compare_into(left, right, [], [], [], []) | 2a29d7b896fb037a8784e7c82794d9b67eb2924a | 3,656,657 |
def _get_smallest_vectors(supercell, primitive, symprec):
"""
shortest_vectors:
Shortest vectors from an atom in primitive cell to an atom in
supercell in the fractional coordinates. If an atom in supercell
is on the border centered at an atom in primitive and there are
multiple vectors that have the same distance and different
directions, several shortest vectors are stored. The
multiplicity is stored in another array, "multiplicity".
[atom_super, atom_primitive, multiple-vectors, 3]
multiplicity:
Number of multiple shortest vectors (third index of "shortest_vectors")
[atom_super, atom_primitive]
"""
p2s_map = primitive.get_primitive_to_supercell_map()
size_super = supercell.get_number_of_atoms()
size_prim = primitive.get_number_of_atoms()
shortest_vectors = np.zeros((size_super, size_prim, 27, 3), dtype='double')
multiplicity = np.zeros((size_super, size_prim), dtype='intc')
reduced_bases = get_reduced_bases(supercell.get_cell(), symprec)
reduced_bases_inv = np.linalg.inv(reduced_bases)
primitive_lattice = primitive.get_cell()
primitive_lattice_inv = np.linalg.inv(primitive_lattice)
# matrix that converts fractional positions in the reduced bases into
# fractional positions in the primitive lattice
supercell_to_primitive_frac = reduced_bases.dot(primitive_lattice_inv)
# all positions are reduced into the cell formed by the reduced bases
supercell_fracs = np.dot(supercell.get_positions(), reduced_bases_inv)
supercell_fracs -= np.rint(supercell_fracs)
for s_index, s_pos in enumerate(supercell_fracs): # run in supercell
for j, p_index in enumerate(p2s_map): # run in primitive
p_pos = supercell_fracs[p_index]
# find smallest vectors equivalent under the supercell lattice
vectors = _get_equivalent_smallest_vectors_simple(s_pos - p_pos,
reduced_bases,
symprec)
# return primitive-cell-fractional vectors rather than supercell-fractional
vectors = [np.dot(v, supercell_to_primitive_frac) for v in vectors]
multiplicity[s_index][j] = len(vectors)
for k, elem in enumerate(vectors):
shortest_vectors[s_index][j][k] = elem
return shortest_vectors, multiplicity | 352d4e7ba9552fa4fe5abdb9eb45c4555dff603d | 3,656,658 |
def root():
"""Root endpoint that only checks if the server is running."""
return 'Server is running...' | ea9ecd1c736e9379795f361462ed54f464a4008b | 3,656,659 |
def clone_model(model, **new_values):
"""Clones the entity, adding or overriding constructor attributes.
The cloned entity will have exactly the same property values as the
original entity, except where overridden. By default, it will have no
parent entity or key name, unless supplied.
Args:
model: datastore_services.Model. Model to clone.
**new_values: dict(str: *). Keyword arguments to override when
invoking the cloned entity's constructor.
Returns:
datastore_services.Model. A cloned, and possibly modified, copy of self.
Subclasses of BaseModel will return a clone with the same type.
"""
# Reference implementation: https://stackoverflow.com/a/2712401/4859885.
cls = model.__class__
model_id = new_values.pop('id', model.id)
props = {k: v.__get__(model, cls) for k, v in cls._properties.items()} # pylint: disable=protected-access
props.update(new_values)
return cls(id=model_id, **props) | ed668632c8917ad685b86fb5c71146be7c9b3b96 | 3,656,660 |
def learn_laterals(frcs, bu_msg, perturb_factor, use_adjaceny_graph=False):
"""Given the sparse representation of each training example,
learn perturbation laterals. See train_image for parameters and returns.
"""
if use_adjaceny_graph:
graph = make_adjacency_graph(frcs, bu_msg)
graph = adjust_edge_perturb_radii(frcs, graph, perturb_factor=perturb_factor)
else:
graph = nx.Graph()
graph.add_nodes_from(range(frcs.shape[0]))
graph = add_underconstraint_edges(frcs, graph, perturb_factor=perturb_factor)
graph = adjust_edge_perturb_radii(frcs, graph, perturb_factor=perturb_factor)
edge_factors = np.array(
[(edge_source, edge_target, edge_attrs['perturb_radius'])
for edge_source, edge_target, edge_attrs in graph.edges_iter(data=True)])
return graph, edge_factors | 68333bca0fc3231470268ece6478b372767a6648 | 3,656,661 |
def get_info(ingest_ldd_src_dir):
"""Get LDD version and namespace id."""
# look in src directory for ingest LDD
ingest_ldd = find_primary_ingest_ldd(ingest_ldd_src_dir)
# get ingest ldd version
tree = ETree.parse(ingest_ldd[0])
root = tree.getroot()
ldd_version = root.findall(f'.//{{{PDS_NS}}}ldd_version_id')[0].text
ns_id = root.findall(f'.//{{{PDS_NS}}}namespace_id')[0].text
return ingest_ldd, ns_id, ldd_version | 92c4d6f8f18c4204d2a8483584b6f1409d9ee243 | 3,656,662 |
def generate_tfidf(corpus_df, dictionary):
"""Generates TFIDF matrix for the given corpus.
Parameters
----------
corpus_df : pd.DataFrame
The corpus dataframe.
dictionary : gensim.corpora.dictionary.Dictionary
Dictionary defining the vocabulary of the TFIDF.
Returns
-------
X : np.ndarray
TFIDF matrix with documents as rows and vocabulary as the columns.
"""
tfidf_model = TfidfModel(
corpus_df.bag_of_words.apply(lambda x: dictionary.doc2bow(x)))
model = tfidf_model[
corpus_df.bag_of_words.apply(lambda x: dictionary.doc2bow(x))]
X = corpus2csc(model, len(dictionary)).T
return X | 6c5cd6b569010c69b446223a099cfd745d51ce6c | 3,656,663 |
from typing import Tuple
from typing import Optional
import torch
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask | 8ecd84ca805112312d43bd8ba3f4c0aa3918800d | 3,656,665 |
from typing import Optional
from typing import List
from typing import Dict
from typing import Any
def fetch_data(
property: Property,
start_date: dt.date,
*,
end_date: Optional[dt.date] = None,
dimensions: Optional[List[Dimension]] = None,
) -> List[Dict[str, Any]]:
"""Query Google Search Console API for data.
Args:
property (Property): Property to request data for.
start_date (dt.date): Earliest day to request information for.
end_date (Optional[dt.date]): Latest day to request information for. Default to
``None``. Will be set to ``start_date`` if ``None``.
dimensions (Optional[List[Dimension]], optional): Dimensions to request from
API. Defaults to ``None``. Will be set to ``["page", "device"]`` if
``None``.
Returns:
List[Dict[str, Any]]: Response from API.
"""
if end_date is None:
end_date = start_date
if dimensions is None:
dimensions = ["page", "device"]
results = []
start_row = 0
ROW_LIMIT = 25000
while True:
request = {
"startDate": start_date.isoformat(),
"endDate": end_date.isoformat(),
"dimensions": dimensions,
"rowLimit": ROW_LIMIT,
"startRow": start_row,
"dataState": "all",
}
response = (
searchconsole_service.searchanalytics()
.query(siteUrl=property.url, body=request)
.execute()
)
start_row += ROW_LIMIT
result = response.get("rows", [])
results.extend(result)
if len(result) == 0:
break
return results | cb871f6e269005db9a338c4bf75949b8ba9ea04a | 3,656,667 |
def inport(port_type, disconnected_value):
"""Marks this field as an inport"""
assert port_type in port_types, \
"Got %r, expected one of %s" % (port_type, port_types)
tag = "inport:%s:%s" % (port_type, disconnected_value)
return tag | a9335d99b65a4944ef58f06b90f8978e7478ec13 | 3,656,669 |
def _empty_aggregate(*args: npt.ArrayLike, **kwargs) -> npt.ArrayLike:
"""Return unchaged array."""
return args[0] | c7f6ebc345517b10a3b65c5ac0f0bf060cdf7634 | 3,656,671 |
def kfpartial(fun, *args, **kwargs):
""" Allows to create partial functions with arbitrary arguments/keywords """
return partial(keywords_first(fun), *args, **kwargs) | 7f7dbbdf484e36c2734e47b448f081812cb8a326 | 3,656,672 |
def power_state_update(system_id, state):
"""Report to the region about a node's power state.
:param system_id: The system ID for the node.
:param state: Typically "on", "off", or "error".
"""
client = getRegionClient()
return client(
UpdateNodePowerState,
system_id=system_id,
power_state=state) | b05730fe9e45b3ee81adb7e8047b0b87e3bf7556 | 3,656,673 |
from typing import Any
def build_post307_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest:
"""Post redirected with 307, resulting in a 200 after redirect.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Simple boolean value true.
:paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Simple boolean value true.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = True # Optional. Default value is True.
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/http/redirect/307")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=url, headers=header_parameters, json=json, content=content, **kwargs) | 2c26cfed95a33fe700b83d7e1fa4eb93ef312721 | 3,656,674 |
def rm_ssp_storage(ssp_wrap, lus, del_unused_images=True):
"""Remove some number of LogicalUnits from a SharedStoragePool.
The changes are flushed back to the REST server.
:param ssp_wrap: SSP EntryWrapper representing the SharedStoragePool to
modify.
:param lus: Iterable of LU ElementWrappers or LUEnt EntryWrappers
representing the LogicalUnits to delete.
:param del_unused_images: If True, and a removed Disk LU was the last one
linked to its backing Image LU, the backing Image
LU is also removed.
:return: The (possibly) modified SSP wrapper.
"""
if _rm_lus(ssp_wrap.logical_units, lus,
del_unused_images=del_unused_images):
# Flush changes
ssp_wrap = ssp_wrap.update()
return ssp_wrap | 0c61becd8f9e23ac269ef0546abb0857facd89de | 3,656,675 |
def urp_detail_view(request, pk):
"""Renders the URP detail page
"""
urp = get_object_or_404(URP, pk=pk)
ctx = {
'urp': urp,
}
# if user is logged in as a student, check if user has already applied
if request.user.is_authenticated:
if request.user.uapuser.is_student:
ctx['applied'] = Application.objects.filter(applicant=request.user, urp=urp).exists()
else:
ctx['applied'] = True
return render(request, 'post/urp_detail.html', context=ctx) | 15e7e86cf2e47bccda52682bdf205e43d8a03f5f | 3,656,676 |
import functools
def squeeze_excite(input_name, squeeze_factor):
"""Returns a squeeze-excite block."""
ops = []
append = functools.partial(append_op, ops)
append(op_name="se/pool0",
op_type=OpType.AVG_POOL,
input_kwargs={"window_shape": 0},
input_names=[input_name])
append(op_name="se/dense1",
op_type=OpType.DENSE,
op_kwargs={"features": f"S:-1%{squeeze_factor}"})
append(op_name="se/swish2",
op_type=OpType.SWISH)
append(op_name="se/dense3",
op_type=OpType.DENSE,
op_kwargs={"features": f"S:-1*{squeeze_factor}"})
append(op_name="se/sigmoid4",
op_type=OpType.SIGMOID)
append(op_name="se/mul5",
op_type=OpType.MUL,
input_names=[input_name, ops[-1].name])
return ops | 907acc7f31db9ab4d70f976320fdd779b66b7160 | 3,656,677 |
def get_code_v2(fl = r'C:\Users\bogdan\code_seurat\WholeGenome_MERFISH\Coordinates_code_1000region.csv'):
"""
Given a .csv file with header this returns 2 dictionaries: tad_to_PR,PR_to_tad
"""
lst = [(ln[:-1].split(',')[0].replace('__','_'),['R'+R for R in ln[:-1].split(',')[3].split('--')])
for ln in open(fl,'r')][1:]
tad_to_PR = dict(lst)
PR_to_tad = {Rs_to_Rnm(Rs):nm for nm,Rs in lst}
return tad_to_PR,PR_to_tad | f5a9e1bbd1f404819a700ee43cff826333ce736c | 3,656,678 |
from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble
def run_source_lsq(vars, vs_list=vs_list):
"""
Script used to run_source and return the output file.
The function is called by AdaptiveLejaPCE.
"""
print('Read Parameters')
parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index')
# Define objective functions
# Use annual or monthly loads
def timeseries_sum(df, temp_scale = 'annual'):
"""
Obtain the sum of timeseries of different temporal scale.
temp_scale: str, default is 'Y', monthly using 'M'
"""
assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.'
if temp_scale == 'monthly':
sum_126001A = df.resample('M').sum()
else:
month_126001A = df.resample('M').sum()
sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year),
columns=df.columns)
for i in range(sum_126001A.shape[0]):
sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum()
return sum_126001A
# End timeseries_sum()
# import observation if the output.txt requires the use of obs.
date_range = pd.to_datetime(['2017/07/01', '2018/06/30'])
observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date')
observed_din.index = pd.to_datetime(observed_din.index)
observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x)
# loop over the vars and try to use parallel
parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short)
for i in range(vars.shape[1]):
parameter_df.iloc[i] = vars[:, i]
# set the time period of the results
retrieve_time = [pd.Timestamp('2017-07-01'), pd.Timestamp('2018-06-30')]
# define the modeling period and the recording variables
_, _, criteria, start_date, end_date = modeling_settings()
din = generate_observation_ensemble(vs_list,
criteria, start_date, end_date, parameter_df, retrieve_time)
# obtain the sum at a given temporal scale
# din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]])
din_126001A = timeseries_sum(din, temp_scale = 'annual')
obs_din = timeseries_sum(observed_din, temp_scale = 'annual')
din_126001A = pd.DataFrame(din_126001A,dtype='float')
obs_din = pd.DataFrame(obs_din,dtype='float')
resid = (obs_din - din_126001A).values
lsq = np.sum(resid ** 2, axis=0)
lsq = lsq.reshape(lsq.shape[0], 1)
print(f'Finish {lsq.shape[0]} run')
return lsq | e43679a0808108560714e32def9399ce45a6bd8e | 3,656,679 |
def finnegans_wake_unicode_chars():
"""Data fixture that returns a string of all unicode characters in Finnegan's Wake."""
return '¤·àáãéìóôþŒŠŸˆ–—‘’‚“”‡…‹' | 78205c9181545544a61ef1eab6c2f51d212dac13 | 3,656,680 |
def kit(): # simpler version
"""Open communication with the dev-kit once for all tests."""
return usp.Devkit() | 3001cbfeaf212e9a09e512c102eae6bffa263375 | 3,656,682 |
def givens_rotation(A):
"""Perform QR decomposition of matrix A using Givens rotation."""
(num_rows, num_cols) = np.shape(A)
# Initialize orthogonal matrix Q and upper triangular matrix R.
Q = np.identity(num_rows)
R = np.copy(A)
# Iterate over lower triangular matrix.
(rows, cols) = np.tril_indices(num_rows, -1, num_cols)
for (row, col) in zip(rows, cols):
# Compute Givens rotation matrix and
# zero-out lower triangular matrix entries.
if R[row, col] != 0:
(c, s) = _givens_rotation_matrix_entries(R[col, col], R[row, col])
G = np.identity(num_rows)
G[[col, row], [col, row]] = c
G[row, col] = s
G[col, row] = -s
R = np.dot(G, R)
Q = np.dot(Q, G.T)
return (Q, R) | 207cadc90c7c4aab76c7422d314b5470ce17251a | 3,656,683 |
from typing import Union
from pathlib import Path
from typing import Optional
import json
def lex_from_str(
*,
in_str: Union[str, Path],
grammar: str = "standard",
ir_file: Optional[Union[str, Path]] = None,
) -> JSONDict:
"""Run grammar of choice on input string.
Parameters
----------
in_str : Union[str, Path]
The string to be parsed.
grammar : str
Grammar to be used. Defaults to "standard".
ir_file : Optional[Union[str, Path]]
File to write intermediate representation to (JSON format).
None by default, which means file is not written out.
Returns
-------
The contents of the input string as a dictionary.
Raises
------
:exc:`ParselglossyError`
"""
try:
lexer = dispatch_grammar(grammar)
except KeyError:
raise ParselglossyError(f"Grammar {grammar} not available.")
ir = parse_string_to_dict(lexer, in_str)
if ir_file is not None:
ir_file = path_resolver(ir_file)
with ir_file.open("w") as out:
json.dump(ir, out, cls=ComplexEncoder, indent=4)
return ir | 5416bd56426012c56050a0dba2835385fa4177e5 | 3,656,684 |
def e() -> ProcessBuilder:
"""
Euler's number (e)
:return: The numerical value of Euler's number.
"""
return process('e', ) | f984b5de5a0b95109c9ec2fe5a2b30c880226b28 | 3,656,685 |
def get_or_create_anonymous_cart_from_token(token,
cart_queryset=Cart.objects.all()):
"""Returns open anonymous cart with given token or creates new.
:type cart_queryset: saleor.cart.models.CartQueryset
:type token: string
:rtype: Cart
"""
return cart_queryset.open().filter(token=token, user=None).get_or_create(
defaults={'user': None})[0] | 8ffb1f64b77c97b260502f1d4c689e3a4edc4f36 | 3,656,686 |
from typing import Any
def accept_data(x: Any) -> Any:
"""Accept any types of data and return it as convenient type.
Args:
x: Any type of data.
Returns:
Any: Accepted data.
"""
if isinstance(x, str):
return x
elif isinstance(x, list):
return x
elif isinstance(x, dict):
return x
elif isinstance(x, tuple):
return x
elif isinstance(x, set):
return x
elif isinstance(x, float):
return x
elif isinstance(x, int):
return x
elif isinstance(x, bool):
return x
elif isinstance(x, type(None)):
return x
else:
return x | 9862995eafb7015fc446466e2dbb7774be39f54b | 3,656,688 |
def custom_model_template(model_type: str, target: str, result0: str, result1: str) -> str:
"""Template for feature behaviour reason generated from DICE
Returns:
str: behaviour
"""
if model_type == 'classifier':
tipo = 'category'
elif model_type == 'regressor':
tipo = 'continuous'
behaviour = get_behaviour(tipo = tipo, result0 = result0, result1 = result1)
phrase = generic_type_template(tipo = tipo, name = target, behaviour = behaviour, result0 = result0, result1 = result1)
result = color.BLUE + f" the output of the model {phrase}." + color.END
return result | bbd43a462f6d9d65984dbd242c7fe8a5d2be5e39 | 3,656,689 |
def merge_dict_list(merged, x):
""" merge x into merged recursively.
x is either a dict or a list
"""
if type(x) is list:
return merged + x
for key in x.keys():
if key not in merged.keys():
merged[key] = x[key]
elif x[key] is not None:
merged[key] = merge_dict_list(merged[key], x[key])
return merged | 00685be39a0b1447c81ecd8de777ebab38aa9bfe | 3,656,690 |
def is_ref(variant, exclude_alleles=None):
"""Returns true if variant is a reference record.
Variant protos can encode sites that aren't actually mutations in the
sample. For example, the record ref='A', alt='.' indicates that there is
no mutation present (i.e., alt is the missing value).
Args:
variant: nucleus.genomics.v1.Variant.
exclude_alleles: list(str). The alleles in this list will be ignored.
Returns:
True if there are no actual alternate alleles.
"""
relevant_alts = _non_excluded_alts(variant.alternate_bases, exclude_alleles)
return not relevant_alts | 2c762bbf070f375b546f0902e3567ca5542cc774 | 3,656,691 |
def gomc_sim_completed_properly(job, control_filename_str):
"""General check to see if the gomc simulation was completed properly."""
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
# with open(f"workspace/{job.id}/{output_log_file}", "r") as fp:
with open(f"{output_log_file}", "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "Move" in line:
split_move_line = line.split()
if (
split_move_line[0] == "Move"
and split_move_line[1] == "Type"
and split_move_line[2] == "Mol."
and split_move_line[3] == "Kind"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool | 20635ba94b5176298216ad5807e6428a5fb957c2 | 3,656,692 |
from typing import Union
from typing import Optional
def rv_precision(
wavelength: Union[Quantity, ndarray],
flux: Union[Quantity, ndarray],
mask: Optional[ndarray] = None,
**kwargs,
) -> Quantity:
"""Calculate the theoretical RV precision achievable on a spectrum.
Parameters
----------
wavelength: array-like or Quantity
Wavelength of spectrum.
flux: array-like or Quantity
Flux of spectrum.
mask: array-like, Quantity or None
Masking function array to apply to the pixel weights.
kwargs:
Kwargs for sqrt_sum_wis
Returns
-------
RVrms: astropy.Quantity
Radial velocity precision of spectra in m/s.
"""
return c / sqrt_sum_wis(wavelength, flux, mask=mask, **kwargs) | 91d6a741d992bd915549becd371d29b6634b92ef | 3,656,693 |
def changenonetoNone(s):
"""Convert str 'None' to Nonetype
"""
if s=='None':
return None
else:
return s | 9f6af1580d8b47d2a7852e433f7ba8bbd5c7044d | 3,656,694 |
def quaternion_2_rotation_matrix(q):
"""
四元数转化为旋转矩阵
:param q:
:return: 旋转矩阵
"""
rotation_matrix = np.array([[np.square(q[0]) + np.square(q[1]) - np.square(q[2]) - np.square(q[3]),
2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])],
[2 * (q[1] * q[2] + q[0] * q[3]),
np.square(q[0]) - np.square(q[1]) + np.square(q[2]) - np.square(q[3]),
2 * (q[2] * q[3] - q[0] * q[1])],
[2 * (q[1] * q[3] - q[0] * q[2]), 2 * (q[2] * q[3] + q[0] * q[1]),
np.square(q[0]) - np.square(q[1]) - np.square(q[2]) + np.square(q[3])]],
dtype=np.float32)
return rotation_matrix | f2e420a1e0b6838fb2ce5f9288842e1ae39134c9 | 3,656,695 |
def sum(mat, axis, target=None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If a target is
not prvided, a new vector is created for storing the result.
"""
m = _eigenmat.get_leading_dimension(mat.p_mat)
n = _eigenmat.get_nonleading_dimension(mat.p_mat)
if axis == 0:
# sum along leading dimension
if not target:
target = empty((1, n))
elif axis == 1:
# sum along non-leading dimension
if not target:
target = empty((m, 1))
err_code = _eigenmat.sum_by_axis(mat.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target | 426ba7b2673a52663e04d3c6f07fb2f4e001244b | 3,656,696 |
from datetime import datetime
def convert_created_time_to_datetime(datestring):
"""
Args:
datestring (str): a string object either as a date or
a unix timestamp
Returns:
a pandas datetime object
"""
if len(datestring) == 30:
return pd.to_datetime(datestring)
else:
return pd.to_datetime(datetime.fromtimestamp(int(datestring[:10]))) | 2559d079b5b7174d192e3a5d9178701ae7080d3b | 3,656,697 |
def identify_word_classes(tokens, word_classes):
"""
Match word classes to the token list
:param list tokens: List of tokens
:param dict word_classes: Dictionary of word lists to find and tag with the
respective dictionary key
:return: Matched word classes
:rtype: list
"""
if word_classes is None:
word_classes = []
classes = set()
for key in word_classes:
for token in tokens:
if token.lower() in word_classes[key]:
classes.add(key)
return classes | ca7aa602d19ac196321af19c42a60df415c7d115 | 3,656,698 |
from typing import List
from typing import Tuple
def find_connecting_stops(routes) -> List[Tuple[Stop, List[Route]]]:
"""
Find all stops that connect more than one route.
Return [Stop, [Route]]
"""
stops = {}
for route in sorted(routes, key=Route.name):
for stop in route.stops():
id_ = stop.id()
if id_ not in stops:
stops[id_] = (stop, [])
last(stops[id_]).append(route)
return list(filter(lambda p: length(last(p)) > 1, stops.values())) | 599e9e5d3fc0a6d0de84a58f1549da9423f35af3 | 3,656,699 |
def freeze_loop(src, start, end, loopStart, loopEnd=None):
""" Freezes a range of frames form start to end using the frames
comprended between loopStart and loopEnd.
If no end frames are provided for the range or the loop,
start frames will be used instead.
"""
core = vs.get_core()
if loopEnd is None:
loopEnd = loopStart
if start < 0 or start > src.num_frames - 1:
raise ValueError('start frame out of bounds: {}.'.format(start))
if loopStart < 0 or loopStart > src.num_frames - 1:
raise ValueError('loop start frame out of bounds: {}.'.format(loopStart))
if end < start or end > src.num_frames - 1:
raise ValueError('end frame out of bounds: {}.'.format(end))
if loopEnd < loopStart or loopEnd > src.num_frames - 1:
raise ValueError('loop end out of bounds: {}.'.format(loopEnd))
loop = core.std.Loop(src[loopStart:loopEnd + 1], 0)
span = end - start + 1
if start != 0:
final = src[:start] + loop[:span]
else:
final = loop[:span]
if end < src.num_frames - 1:
final = final + src[end + 1:]
if src.num_frames != final.num_frames:
raise ValueError(
'input / output framecount missmatch (got: {}; expected: {}).'.format(
final.num_frames, src.num_frames))
return final | 67284a264ada601dbd01c30c1bf32f48ad9eb9d8 | 3,656,700 |
def timevalue(cflo, prate, base_date=0, utility=None):
"""
Computes the equivalent net value of a generic cashflow at time `base_date`
using the periodic interest rate `prate`. If `base_date` is 0, `timevalue`
computes the net present value of the
cashflow. If `base_date` is the index of the last element of `cflo`,
this function computes the equivalent future value.
Args:
cflo (pandas.Series, list of pandas.Series): Generic cashflow.
prate (pandas.Series): Periodic interest rate.
base_date (int, tuple): Time.
utility (function): Utility function.
Returns:
Float or list of floats.
**Examples.**
>>> cflo = cashflow([-732.54] + [100]*8, start='2000Q1', freq='Q')
>>> prate = interest_rate([2]*9, start='2000Q1', freq='Q')
>>> timevalue(cflo, prate) # doctest: +ELLIPSIS
0.00...
>>> prate = interest_rate([12]*5, start='2000Q1', freq='Q')
>>> cflo = cashflow([-200]+[100]*4, start='2000Q1', freq='Q')
>>> timevalue(cflo, prate) # doctest: +ELLIPSIS
103.73...
>>> timevalue(cflo, prate, 4) # doctest: +ELLIPSIS
163.22...
>>> prate = interest_rate([12]*5, start='2000Q1', freq='Q')
>>> cflo = cashflow([-200] + [100]*4, start='2000Q1', freq='Q')
>>> timevalue(cflo=cflo, prate=prate) # doctest: +ELLIPSIS
103.73...
>>> timevalue(cflo=[cflo, cflo], prate=prate) # doctest: +ELLIPSIS
0 103.734935
1 103.734935
dtype: float64
"""
if isinstance(cflo, pd.Series):
cflo = [cflo]
if not isinstance(prate, pd.Series):
raise TypeError("`prate` must be a pandas.Series")
verify_period_range(cflo + [prate])
retval = pd.Series([0] * len(cflo), dtype=np.float64)
factor = to_discount_factor(prate=prate, base_date=base_date)
for index, xcflo in enumerate(cflo):
netval = 0
for time, _ in enumerate(xcflo):
netval += xcflo[time] * factor[time]
retval[index] = netval
if len(retval) == 1:
return retval[0]
return retval | 704f6988d1995a8602314df08d1dcfbed549f1ed | 3,656,701 |
def munge(examples, multiplier, prob, loc_var, data_t, seed=0):
""" Generates a dataset from the original one
:param examples: Training examples
:type examples: 2d numpy array
:param multiplier: size multiplier
:type multiplier: int k
:param prob: probability of swapping values
:type prob: flt (0 to 1)
:param loc_var: local variance parameter
:type loc_var: flt
:param data_t: Identifies whether or not the attribute is continuous or nominal
:type data_t: Numpy array of strs
"""
np.random.seed(seed)
new_dataset = None
continuous = [True if x == FeatureType.CONTINUOUS else False for x in data_t]
nominal = np.logical_not(continuous)
data_c = examples[:, continuous].astype(float)
# Scales data linearly from 0 to 1
norm_data_c = normalize(data_c - np.min(data_c, axis=0), axis=0, norm='max')
data_n = examples[:, nominal]
indicies = nn(norm_data_c, data_n)
for i in range(multiplier):
T_prime = np.copy(examples)
# Runs through all the examples in the dataset
for j in range(examples.shape[0]):
index = indicies[j, 1] if indicies[j, 0] == j else indicies[j, 0]
pt1 = T_prime[j, :]
pt2 = T_prime[index, :]
# Runs through all features for an example and its nn
for k in range(len(data_t)):
# Swaps the two fields with probability prob
if np.random.ranf() < prob:
if data_t[k] == FeatureType.CONTINUOUS:
std = abs(float(pt1[k]) - float(pt2[k])) / loc_var
temp = float(pt1[k])
pt1[k] = np.random.normal(float(pt2[k]), std)
pt2[k] = np.random.normal(temp, std)
else:
temp = pt1[k]
pt1[k] = pt2[k]
pt2[k] = temp
# Combines the dataset to the final one
if new_dataset is None:
new_dataset = np.copy(T_prime)
else:
new_dataset = np.vstack((new_dataset, T_prime))
return new_dataset | 339d5cafedb8abd6094cde81004c5056a3830d26 | 3,656,702 |
def is_interested_source_code_file(afile):
"""
If a file is the source code file that we are interested.
"""
tokens = afile.split(".")
if len(tokens) > 1 and tokens[-1] in ("c", "cpp", "pl", "tmpl", "py", "s", "S"):
# we care about C/C++/perl/template/python/assembly source code files
return True
return False | 9bd77dc3b530262cc2bf8a32c0d050ea30077030 | 3,656,703 |
def recursively_extract(node, exfun, maxdepth=2):
"""
Transform a html ul/ol tree into a python list tree.
Converts a html node containing ordered and unordered lists and list items
into an object of lists with tree-like structure. Leaves are retrieved by
applying `exfun` function to the html nodes not containing any ul/ol list.
Args:
node: BeautifulSoup HTML node to traverse
exfun: function to apply to every string node found
maxdepth: maximal depth of lists to go in the node
Returns:
A tree-like python object composed of lists.
Examples:
>>> node_content = \
'''
<ol>
<li>Hase</li>
<li>Nase<ol><li>Eins</li><li>Zwei</li></ol></li>
</ol>'''
>>> node = BeautifulSoup(node_content, "lxml")
>>> recursively_extract(node, lambda x: x)
[<li>Hase</li>, [<li>Eins</li>, <li>Zwei</li>]]
>>> recursively_extract(node, lambda x: x.get_text())
['Hase', ['Eins', 'Zwei']]
"""
if node.name in ['ol', 'ul']:
lilist = node
else:
lilist = node.ol or node.ul
if lilist and maxdepth:
# apply 'recursively_extract' to every 'li' node found under this node
return [recursively_extract(li, exfun, maxdepth=(maxdepth - 1))
for li in lilist.find_all('li', recursive=False)]
# if this node doesn't contain 'ol' or 'ul' node, return the transformed
# leaf (using the 'exfun' function)
return exfun(node) | cc5732a786579172dda31958ad2bd468a4feef81 | 3,656,705 |
import math
def group_v2_deconv_decoder(latent_tensor,
output_shape,
hy_ncut=1,
group_feats_size=gin.REQUIRED,
lie_alg_init_scale=gin.REQUIRED,
lie_alg_init_type=gin.REQUIRED,
n_act_points=gin.REQUIRED,
is_training=True):
"""Convolutional decoder used in beta-VAE paper for the chairs data.
Based on row 3 of Table 1 on page 13 of "beta-VAE: Learning Basic Visual
Concepts with a Constrained Variational Framework"
(https://openreview.net/forum?id=Sy2fzU9gl)
Here we add an extra linear mapping for group features extraction.
Args:
latent_tensor: Input tensor of shape (batch_size,) to connect decoder to.
output_shape: Shape of the data.
group_feats_size: The dimension of group features.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
Output tensor of shape (batch_size, 64, 64, num_channels) with the [0,1]
pixel intensities.
group_feats: Group features.
"""
# del is_training
lie_alg_basis_ls = []
latent_dim = latent_tensor.get_shape().as_list()[-1]
latents_in_cut_ls = split_latents(latent_tensor, hy_ncut=hy_ncut) # [x0, x1]
mat_dim = int(math.sqrt(group_feats_size))
for i in range(latent_dim):
init = tf.initializers.random_normal(0, lie_alg_init_scale)
lie_alg_tmp = tf.get_variable('lie_alg_' + str(i),
shape=[1, mat_dim, mat_dim],
initializer=init)
if lie_alg_init_type == 'oth':
lie_alg_tmp = tf.matrix_band_part(lie_alg_tmp, 0, -1)
lie_alg_tmp = lie_alg_tmp - tf.transpose(lie_alg_tmp,
perm=[0, 2, 1])
lie_alg_basis_ls.append(lie_alg_tmp)
lie_alg_basis = tf.concat(lie_alg_basis_ls,
axis=0)[tf.newaxis,
...] # [1, lat_dim, mat_dim, mat_dim]
lie_alg = 0
lie_group = tf.eye(mat_dim, dtype=lie_alg_basis_ls[0].dtype)[tf.newaxis, ...]
for i, lie_alg_basis_i in enumerate(lie_alg_basis_ls):
lie_alg_tmp = lie_alg_basis_i * latent_tensor[:, i][..., tf.newaxis, tf.newaxis]
lie_alg = lie_alg + lie_alg_tmp
lie_group_tmp = tf.linalg.expm(
lie_alg_tmp) # [b, mat_dim, mat_dim]
lie_group = tf.matmul(lie_group_tmp, lie_group)
# if not is_training:
# lie_alg_mul = latent_tensor[
# ..., tf.newaxis, tf.
# newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim]
# lie_alg = tf.reduce_sum(lie_alg_mul, axis=1) # [b, mat_dim, mat_dim]
# lie_group = tf.linalg.expm(lie_alg) # [b, mat_dim, mat_dim]
# else:
# lie_group = tf.eye(
# mat_dim,
# dtype=latents_in_cut_ls[0].dtype)[tf.newaxis, ...]
# lie_alg = 0
# for latents_in_cut_i in latents_in_cut_ls:
# lie_alg_mul_tmp = latents_in_cut_i[
# ..., tf.newaxis, tf.newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim]
# lie_alg_tmp = tf.reduce_sum(
# lie_alg_mul_tmp,
# axis=1) # [b, mat_dim, mat_dim]
# lie_alg = lie_alg + lie_alg_tmp
# lie_group_tmp = tf.linalg.expm(
# lie_alg_tmp) # [b, mat_dim, mat_dim]
# lie_group = tf.matmul(lie_group,
# lie_group_tmp)
transed_act_points_tensor = tf.reshape(lie_group, [-1, mat_dim * mat_dim])
# lie_alg_mul = latent_tensor[
# ..., tf.newaxis, tf.
# newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim]
# lie_alg = tf.reduce_sum(lie_alg_mul, axis=1) # [b, mat_dim, mat_dim]
# lie_group = tf.linalg.expm(lie_alg) # [b, mat_dim, mat_dim]
# act_init = tf.initializers.random_normal(0, 0.01)
# act_points = tf.get_variable('act_points',
# shape=[1, mat_dim, n_act_points],
# initializer=act_init)
# transed_act_points = tf.matmul(lie_group, act_points)
# transed_act_points_tensor = tf.reshape(transed_act_points,
# [-1, mat_dim * n_act_points])
d1 = tf.layers.dense(transed_act_points_tensor, 256, activation=tf.nn.relu)
d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)
d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])
d3 = tf.layers.conv2d_transpose(
inputs=d2_reshaped,
filters=64,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d4 = tf.layers.conv2d_transpose(
inputs=d3,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d5 = tf.layers.conv2d_transpose(
inputs=d4,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d6 = tf.layers.conv2d_transpose(
inputs=d5,
filters=output_shape[2],
kernel_size=4,
strides=2,
padding="same",
)
return tf.reshape(d6, [-1] + output_shape), lie_group, lie_alg_basis | c098852a7d3e85be944494de74810e021d7fd106 | 3,656,706 |
def UncertaintyLossNet():
"""Creates Uncertainty weighted loss model https://arxiv.org/abs/1705.07115
"""
l1 = layers.Input(shape=())
l2 = layers.Input(shape=())
loss = UncertaintyWeightedLoss()([l1, l2])
model = Model(inputs=[l1, l2], outputs=loss)
return model | 5a6553edc321a6e307848e261692541cedea4ebb | 3,656,708 |
from typing import Iterable
import logging
from pathlib import Path
def inject_signals(
frame_files: Iterable[str],
channels: [str],
ifos: [str],
prior_file: str,
n_samples: int,
outdir: str,
fmin: float = 20,
waveform_duration: float = 8,
snr_range: Iterable[float] = [25, 50],
):
"""Injects simulated BBH signals into a frame, or set of corresponding
frames from different interferometers. Frames should have the same
start/stop time and the same sample rate
Args:
frame_files: list of paths to frames to be injected
channels: channel names of the strain data in each frame
ifos: list of interferometers corresponding to frames, e.g., H1, L1
prior_file: prior file for bilby to sample from
n_samples: number of signal to inject
outdir: output directory to which injected frames will be written
fmin: Minimum frequency for highpass filter
waveform_duration: length of injected waveforms
snr_range: desired signal SNR range
Returns:
Paths to the injected frames and the parameter file
"""
strains = [
TimeSeries.read(frame, ch) for frame, ch in zip(frame_files, channels)
]
logging.info("Read strain from frame files")
span = set([strain.span for strain in strains])
if len(span) != 1:
raise ValueError(
"Frame files {} and {} have different durations".format(
*frame_files
)
)
frame_start, frame_stop = next(iter(span))
frame_duration = frame_stop - frame_start
sample_rate = set([int(strain.sample_rate.value) for strain in strains])
if len(sample_rate) != 1:
raise ValueError(
"Frame files {} and {} have different sample rates".format(
*frame_files
)
)
sample_rate = next(iter(sample_rate))
fftlength = int(max(2, np.ceil(2048 / sample_rate)))
# set the non-overlapping times of the signals in the frames randomly
# leaves buffer at either end of the series so edge effects aren't an issue
signal_times = sorted(
np.random.choice(
np.arange(
waveform_duration,
frame_duration - waveform_duration,
waveform_duration,
),
size=n_samples,
replace=False,
)
)
# log and print out some simulation parameters
logging.info("Simulation parameters")
logging.info("Number of samples : {}".format(n_samples))
logging.info("Sample rate [Hz] : {}".format(sample_rate))
logging.info("High pass filter [Hz] : {}".format(fmin))
logging.info("Prior file : {}".format(prior_file))
# define a Bilby waveform generator
waveform_generator = bilby.gw.WaveformGenerator(
duration=waveform_duration,
sampling_frequency=sample_rate,
frequency_domain_source_model=lal_binary_black_hole,
parameter_conversion=convert_to_lal_binary_black_hole_parameters,
waveform_arguments={
"waveform_approximant": "IMRPhenomPv2",
"reference_frequency": 50,
"minimum_frequency": 20,
},
)
# sample GW parameters from prior distribution
priors = bilby.gw.prior.BBHPriorDict(prior_file)
sample_params = priors.sample(n_samples)
sample_params["geocent_time"] = signal_times
signals_list = []
snr_list = []
for strain, channel, ifo in zip(strains, channels, ifos):
# calculate the PSD
strain_psd = strain.psd(fftlength)
# generate GW waveforms
raw_signals = generate_gw(
sample_params,
waveform_generator=waveform_generator,
)
signals, snr = project_raw_gw(
raw_signals,
sample_params,
waveform_generator,
ifo,
get_snr=True,
noise_psd=strain_psd,
)
signals_list.append(signals)
snr_list.append(snr)
old_snr = np.sqrt(np.sum(np.square(snr_list), axis=0))
new_snr = np.random.uniform(snr_range[0], snr_range[1], len(snr_list[0]))
signals_list = [
signals * (new_snr / old_snr)[:, None] for signals in signals_list
]
sample_params["luminosity_distance"] = (
sample_params["luminosity_distance"] * old_snr / new_snr
)
snr_list = [snr * new_snr / old_snr for snr in snr_list]
outdir = Path(outdir)
frame_out_paths = [outdir / f.name for f in map(Path, frame_files)]
for strain, signals, frame_path in zip(
strains, signals_list, frame_out_paths
):
for i in range(n_samples):
idx1 = int(
(signal_times[i] - waveform_duration / 2.0) * sample_rate
)
idx2 = idx1 + waveform_duration * sample_rate
strain[idx1:idx2] += signals[i]
strain.write(frame_path)
# Write params and similar to output file
param_file = outdir / f"param_file_{frame_start}-{frame_stop}.h5"
with h5py.File(param_file, "w") as f:
# write signals attributes, snr, and signal parameters
params_gr = f.create_group("signal_params")
for k, v in sample_params.items():
params_gr.create_dataset(k, data=v)
# Save signal times as actual GPS times
f.create_dataset("GPS-start", data=signal_times + frame_start)
for i, ifo in enumerate(ifos):
ifo_gr = f.create_group(ifo)
ifo_gr.create_dataset("signal", data=signals_list[i])
ifo_gr.create_dataset("snr", data=snr_list[i])
# write frame attributes
f.attrs.update(
{
"size": n_samples,
"frame_start": frame_start,
"frame_stop": frame_stop,
"sample_rate": sample_rate,
"psd_fftlength": fftlength,
}
)
# Update signal attributes
f.attrs["waveform_duration"] = waveform_duration
f.attrs["flag"] = "GW"
return frame_out_paths, param_file | 204aca5dee78e885191907890fc064503ff61f57 | 3,656,709 |
async def lyric(id: int, endpoint: NeteaseEndpoint = Depends(requestClient)):
"""
## Name: `lyric`
> 歌词
---
### Required:
- ***int*** **`id`**
- Description: 单曲ID
"""
return await endpoint.lyric(id=id) | 331c0bced7bbd2523426522286a85f3cc6a3a29f | 3,656,710 |
def get_body(m):
"""extract the plain text body. return the body"""
if m.is_multipart():
body = m.get_body(preferencelist=('plain',)).get_payload(decode=True)
else:
body = m.get_payload(decode=True)
if isinstance(body, bytes):
return body.decode()
else:
return body | 7980c1471a0a09c793cb8124066a97caac21ae0d | 3,656,711 |
def density(mass, volume):
"""
Calculate density.
"""
return mass / volume * 1 | 53b1f76ba66695a9cd72be9186bcc374ee11f53b | 3,656,713 |
from typing import Union
from typing import Callable
import torch
def get_augmenter(augmenter_type: str,
image_size: ImageSizeType,
dataset_mean: DatasetStatType,
dataset_std: DatasetStatType,
padding: PaddingInputType = 1. / 8.,
pad_if_needed: bool = False,
subset_size: int = 2) -> Union[Module, Callable]:
"""
Args:
augmenter_type: augmenter type
image_size: (height, width) image size
dataset_mean: dataset mean value in CHW
dataset_std: dataset standard deviation in CHW
padding: percent of image size to pad on each border of the image. If a sequence of length 4 is provided,
it is used to pad left, top, right, bottom borders respectively. If a sequence of length 2 is provided, it is
used to pad left/right, top/bottom borders, respectively.
pad_if_needed: bool flag for RandomCrop "pad_if_needed" option
subset_size: number of augmentations used in subset
Returns: nn.Module for Kornia augmentation or Callable for torchvision transform
"""
if not isinstance(padding, tuple):
assert isinstance(padding, float)
padding = (padding, padding, padding, padding)
assert len(padding) == 2 or len(padding) == 4
if len(padding) == 2:
# padding of length 2 is used to pad left/right, top/bottom borders, respectively
# padding of length 4 is used to pad left, top, right, bottom borders respectively
padding = (padding[0], padding[1], padding[0], padding[1])
# image_size is of shape (h,w); padding values is [left, top, right, bottom] borders
padding = (
int(image_size[1] * padding[0]),
int(image_size[0] * padding[1]),
int(image_size[1] * padding[2]),
int(image_size[0] * padding[3])
)
augmenter_type = augmenter_type.strip().lower()
if augmenter_type == "simple":
return nn.Sequential(
K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed,
padding_mode='reflect'),
K.RandomHorizontalFlip(p=0.5),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type == "fixed":
return nn.Sequential(
K.RandomHorizontalFlip(p=0.5),
# K.RandomVerticalFlip(p=0.2),
K.RandomResizedCrop(size=image_size, scale=(0.8, 1.0), ratio=(1., 1.)),
RandomAugmentation(
p=0.5,
augmentation=F.GaussianBlur2d(
kernel_size=(3, 3),
sigma=(1.5, 1.5),
border_type='constant'
)
),
K.ColorJitter(contrast=(0.75, 1.5)),
# additive Gaussian noise
K.RandomErasing(p=0.1),
# Multiply
K.RandomAffine(
degrees=(-25., 25.),
translate=(0.2, 0.2),
scale=(0.8, 1.2),
shear=(-8., 8.)
),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type in ["validation", "test"]:
return nn.Sequential(
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type == "randaugment":
return nn.Sequential(
K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed,
padding_mode='reflect'),
K.RandomHorizontalFlip(p=0.5),
RandAugmentNS(n=subset_size, m=10),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
else:
raise NotImplementedError(f"\"{augmenter_type}\" is not a supported augmenter type") | 7b065d9bd7c9bc2cf3c0aa2fdf105c714df24705 | 3,656,715 |
def query(limit=None, username=None, ids=None, user=None):
"""# Retrieve Workspaces
Receive a generator of Workspace objects previously created in the Stark Bank API.
If no filters are passed and the user is an Organization, all of the Organization Workspaces
will be retrieved.
## Parameters (optional):
- limit [integer, default None]: maximum number of objects to be retrieved. Unlimited if None. ex: 35
- username [string, default None]: query by the simplified name that defines the workspace URL. This name is always unique across all Stark Bank Workspaces. Ex: "starkbankworkspace"
- ids [list of strings, default None]: list of ids to filter retrieved objects. ex: ["5656565656565656", "4545454545454545"]
- user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call
## Return:
- generator of Workspace objects with updated attributes
"""
return rest.get_stream(resource=_resource, limit=limit, username=username, ids=ids, user=user) | bc22336c7c76d549144e43b6d6c46793b1feedf9 | 3,656,716 |
def _add_output_tensor_nodes(net, preprocess_tensors, output_collection_name='inferece_op'):
"""
Adds output nodes for all preprocess_tensors.
:param preprocess_tensors: a dictionary containing the all predictions;
:param output_collection_name: Name of collection to add output tensors to.
:return: A tensor dict containing the added output tensor nodes.
"""
outputs = {}
outputs['roi_scores'] = tf.identity(net.all_rois_scores, name='rois_scores')
outputs['rois'] = tf.identity(net.all_rois, name='rois')
for output_key in outputs.keys():
tf.add_to_collection(output_collection_name, outputs[output_key])
return outputs | cdbb2b69a795bcc74925cce138e9d73bc4737276 | 3,656,717 |
def f_prob(times, lats, lons, members):
"""Probabilistic forecast containing also a member dimension."""
data = np.random.rand(len(members), len(times), len(lats), len(lons))
return xr.DataArray(
data,
coords=[members, times, lats, lons],
dims=["member", "time", "lat", "lon"],
attrs={"source": "test"},
) | 43fe73abb5667b0d29f36a4ee73e8d8ec1943ad0 | 3,656,718 |
def dunning_total_by_corpus(m_corpus, f_corpus):
"""
Goes through two corpora, e.g. corpus of male authors and corpus of female authors
runs dunning_individual on all words that are in BOTH corpora
returns sorted dictionary of words and their dunning scores
shows top 10 and lowest 10 words
:param m_corpus: Corpus object
:param f_corpus: Corpus object
:return: list of tuples (common word, (dunning value, m_corpus_count, f_corpus_count))
>>> from gender_analysis.analysis.dunning import dunning_total_by_corpus
>>> from gender_analysis.corpus import Corpus
>>> from gender_analysis.common import TEST_DATA_PATH
>>> path = TEST_DATA_PATH / 'sample_novels' / 'texts'
>>> csv_path = TEST_DATA_PATH / 'sample_novels' / 'sample_novels.csv'
>>> c = Corpus(path, csv_path=csv_path)
>>> m_corpus = c.filter_by_gender('male')
>>> f_corpus = c.filter_by_gender('female')
>>> result = dunning_total_by_corpus(m_corpus, f_corpus)
>>> print(result[0])
('she', (-12374.391057010947, 29382, 45907))
"""
wordcounter_male = m_corpus.get_wordcount_counter()
wordcounter_female = f_corpus.get_wordcount_counter()
totalmale_words = 0
totalfemale_words = 0
for male_word in wordcounter_male:
totalmale_words += wordcounter_male[male_word]
for female_word in wordcounter_female:
totalfemale_words += wordcounter_female[female_word]
dunning_result = {}
for word in wordcounter_male:
wordcount_male = wordcounter_male[word]
if word in wordcounter_female:
wordcount_female = wordcounter_female[word]
dunning_word = dunn_individual_word(totalmale_words, totalfemale_words,
wordcount_male, wordcount_female)
dunning_result[word] = (dunning_word, wordcount_male, wordcount_female)
dunning_result = sorted(dunning_result.items(), key=itemgetter(1))
return dunning_result | 324b0bb5e5f83451ca47cefed908cdd6dbc47c33 | 3,656,719 |
from typing import Optional
from typing import Callable
def get_int(prompt: Optional[str] = None,
min_value: Optional[int] = None,
max_value: Optional[int] = None,
condition: Optional[Callable[[int], bool]] = None,
default: Optional[int] = None) -> int:
"""Gets an int from the command line.
:param prompt: Input prompt.
:param min_value: Minimum value of the parsed int.
:param max_value: Maximum value of the parsed int.
:param condition: Condition the int must match.
:param default: Default value used if no characters are typed.
:return: Input int.
"""
input_int = None
input_str = None
while input_int is None:
try:
input_str = input(_prompt_from_message(prompt, default=default)).strip()
if default is not None and len(input_str) == 0:
input_str = default
input_int = int(input_str)
if (min_value is not None and input_int < min_value) or \
(max_value is not None and input_int > max_value) or \
(condition is not None and not condition(input_int)):
input_int = None
raise ValueError()
except ValueError:
_print_invalid_value(input_str)
return input_int | c6ea07b495330c74bd36523cf12dd3e208926ea5 | 3,656,723 |
def make_stream_callback(observer, raw, frame_size, start, stop):
"""
Builds a callback function for stream plying. The observer is an object
which implements methods 'observer.set_playing_region(b,e)' and
'observer.set_playing_end(e)'. raw is the wave data in a str object.
frame_size is the number of bytes times number of channels per frame.
start and stop indicate which slice of raw would be played.
"""
start_ref = [ start ]
def callback(in_data, frame_count, time_info, status):
start = start_ref[0]
last = min(stop, start + frame_count*frame_size)
data = raw[start:last]
start_ref[0] = last
if last == stop: observer.set_playing_end(last)
else: observer.set_playing_region(start, last)
return (data, pyaudio.paContinue)
return callback | c29f7998f848c51af57e42c92a62f80c7a0c2e70 | 3,656,724 |
import torch
def predictCNN(segments, artifacts, device:torch.device = torch.device("cpu")):
"""
Perform model predictions on unseen data
:param segments: list of segments (paragraphs)
:param artifacts: run artifacts to evaluate
:param device: torch device
:return category predictions
"""
# Retrieve artifacts
params = artifacts["params"]
label_encoder = artifacts["label_encoder"]
tokenizer = artifacts["tokenizer"]
model = artifacts["model"]
# Prepare dataset into model readable format
preprocessed_segments = [preprocess.cleanText(segment, lower=params.lower, stem=params.stem) for segment in segments]
X = np.array(tokenizer.texts_to_sequences(preprocessed_segments), dtype="object")
y_blank = np.zeros((len(X), len(label_encoder)))
dataset = CNNDataset(X=X, y=y_blank, max_filter_size=int(params.max_filter_size))
dataloader = dataset.create_dataloader(batch_size=int(params.batch_size))
# Get model predictions
trainer = Trainer(model=model, device=device)
_, y_prob = trainer.predict_step(dataloader)
y_pred = [np.where(prob >= float(params.threshold), 1, 0) for prob in y_prob]
categories = label_encoder.decode(y_pred)
predictions = [{"input_text": segments[i], "preprocessed_text": preprocessed_segments[i], "predicted_tags": categories[i]} for i in range(len(categories))]
return predictions | 27ebdccaecd675104c670c1839daf634c142c640 | 3,656,725 |
import re
def transform_url(url):
"""Normalizes url to '[email protected]:{username}/{repo}' and also
returns username and repository's name."""
username, repo = re.search(r'[/:](?P<username>[A-Za-z0-9-]+)/(?P<repo>[^/]*)', url).groups()
if url.startswith('git@'):
return url, username, repo
return '[email protected]:{username}/{repo}'.format(**locals()), username, repo | 8d6e7d903d7c68d2f4fb3927bd7a02128cc09caf | 3,656,726 |
from typing import Optional
def prettyprint(data: dict, command: str, modifier: Optional[str] = '') -> str:
"""
Prettyprint the JSON data we get back from the API
"""
output = ''
# A few commands need a little special treatment
if command == 'job':
command = 'jobs'
if 'data' in data and 'jobs' in data['data']:
output = prettyprint_jobs(data, command)
elif 'data' in data and 'files' in data['data']:
output = prettyprint_firmware(data, command)
elif 'job_id' in data:
output = prettyprint_job(data, command)
elif 'data' in data and 'groups' in data['data']:
output = prettyprint_groups(data, 'groups')
elif 'data' in data and 'version' in data['data']:
output = prettyprint_version(data, 'version')
elif 'data' in data and command == 'device':
output = prettyprint_device(data)
elif 'data' in data and command in data['data']:
output = prettyprint_command(data, command)
elif 'status' in data and data['status'] == 'error':
output = prettyprint_error(data)
else:
output = prettyprint_other(data)
if modifier != '':
output = prettyprint_modifier(output, modifier)
return output | 727a59b22b2624fec56e685cc3b84f065bbfeffd | 3,656,727 |
def kmor(X: np.array, k: int, y: float = 3, nc0: float = 0.1, max_iteration: int = 100, gamma: float = 10 ** -6):
"""K-means clustering with outlier removal
Parameters
----------
X
Your data.
k
Number of clusters.
y
Parameter for outlier detection. Increase this to make outlier removal subtle.
nc0
Maximum percentage of your data that can be assigned to outlier cluster.
max_iteration
Maximum number of iterations.
gamma
Used to check the convergence.
Returns
-------
numpy.array
Numpy array that contains the assigned cluster of each data point (0 to k, the cluster k is the outlier
cluster)
"""
n = X.shape[0]
n0 = int(nc0 * X.shape[0])
Z = X[np.random.choice(n, k)]
def calculate_dd(U, Z):
return np.linalg.norm(X - Z[U], axis=1) ** 2
def calculate_D(outliers, dd):
factor = y / (n - outliers.size)
return factor * np.sum(np.delete(dd, outliers))
def calculate_U(X):
def closest(p):
return np.argmin(np.linalg.norm(Z - p, axis=1))
return np.apply_along_axis(closest, 1, X)
outliers = np.array([])
U = calculate_U(X)
s = 0
p = 0
while True:
# Update U (Theorem 1)
dd = calculate_dd(U, Z)
D = calculate_D(outliers, dd)
dd2 = dd[dd > D]
outliers = np.arange(n)[dd > D][dd2.argsort()[::-1]]
outliers = outliers[:n0]
U = calculate_U(X)
# Update Z (Theorem 3)
is_outlier = np.isin(U, outliers)
def mean_group(i):
x = X[np.logical_and(U == i, ~is_outlier)]
# Empty group
if x.size == 0:
x = X[np.random.choice(n, 1)]
return x.mean(axis=0)
Z = np.array([mean_group(i) for i in range(k)])
# Update P
dd = calculate_dd(U, Z)
D = calculate_D(outliers, dd)
if outliers.size == 0:
p1 = np.sum(dd)
else:
p1 = np.sum(dd[~outliers]) + D * outliers.size
# Exit condition
s += 1
if abs(p1 - p) < gamma or s > max_iteration:
break
p = p1
print("s:", s, "p:", p)
U[outliers] = k
return U | 5ffa55d45d615586971b1ec502981f1a7ab27cbe | 3,656,728 |
def turnout_div(turnout_main, servo, gpo_provider):
"""Create a turnout set to the diverging route"""
turnout_main.set_route(True)
# Check that the route was set to the diverging route
assert(servo.get_angle() == ANGLE_DIV)
assert(gpo_provider.is_enabled())
return turnout_main | 542a747cc7f4cdc78b7ad046b0c4ce4a0a3cd33d | 3,656,730 |
def num_jewels(J: str, S: str) -> int:
"""
Time complexity: O(n + m)
Space complexity: O(n)
"""
jewels = set(J)
return sum(stone in jewels for stone in S) | f1a9632a791e3ef94699b566da61e27d9dc46b07 | 3,656,731 |
import socket
def internet(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
logger.info('Internet is there!!')
return True
except Exception as ex:
logger.warning('Internet is gone!!')
return False | 773f490baec40bf548ed2f13d1d1094c78b33366 | 3,656,732 |
import logging
def map_family_situation(code):
"""Maps French family situation"""
status = FamilySituation
mapping = {
"M": status.MARRIED.value,
"C": status.SINGLE.value,
"V": status.WIDOWED.value,
"D": status.DIVORCED.value,
"O": status.PACSED.value,
}
if code in mapping.keys():
return mapping[code]
else:
logging.warning("In {}, args {} not recognised".format("family_situation", code))
return code | ae5ac0c9ffadb31d25825e65fcb81d6ea9b0115f | 3,656,733 |
def transform(x, channels, img_shape, kernel_size=7, threshold=1e-4):
"""
----------
X : WRITEME
data with axis [b, 0, 1, c]
"""
for i in channels:
assert isinstance(i, int)
assert i >= 0 and i <= x.shape[3]
x[:, :, :, i] = lecun_lcn(x[:, :, :, i],
img_shape,
kernel_size,
threshold)
return x | c66725795585ea26dc9622ce42133a4a2f1445a8 | 3,656,734 |
import functools
def delete_files(files=[]):
"""This decorator deletes files before and after a function.
This is very useful for installation procedures.
"""
def my_decorator(func):
@functools.wraps(func)
def function_that_runs_func(self, *args, **kwargs):
# Inside the decorator
# Delete the files - prob don't exist yet
delete_paths(files)
# Run the function
stuff = func(self, *args, **kwargs)
# Delete the files if they do exist
delete_paths(files)
return stuff
return function_that_runs_func
return my_decorator | 09652e9dd527b6ae43cf47deb2eaf460de51552e | 3,656,735 |
def add_note(front, back, tag, model, deck, note_id=None):
"""
Add note with `front` and `back` to `deck` using `model`.
If `deck` doesn't exist, it is created.
If `model` doesn't exist, nothing is done.
If `note_id` is passed, it is used as the note_id
"""
model = mw.col.models.byName(model)
if model:
mw.col.decks.current()['mid'] = model['id']
else:
return None
# Creates or reuses deck with name passed using `deck`
did = mw.col.decks.id(deck)
deck = mw.col.decks.get(did)
note = mw.col.newNote()
note.model()['did'] = did
note.fields[0] = front
note.fields[1] = back
if note_id:
note.id = note_id
note.addTag(tag)
mw.col.addNote(note)
mw.col.save()
return note.id | e45528705dbd658dcb708259043f4a4b590e884b | 3,656,737 |
def indices_to_one_hot(data, nb_classes): #separate: embedding
"""Convert an iterable of indices to one-hot encoded labels."""
targets = np.array(data).reshape(-1)
return np.eye(nb_classes)[targets] | 36fdf0dbad51ae6d64c1a6bf783f083013686e40 | 3,656,738 |
from rdkit.Chem import rdMolTransforms
def translateToceroZcoord(moleculeRDkit):
"""
Translate the molecule to put the first atom in the origin of the coordinates
Parameters
----------
moleculeRDkit : RDkit molecule
An RDkit molecule
Returns
-------
List
List with the shift value applied to X, Y, Z
"""
conf = moleculeRDkit.GetConformer()
# avoid first atom overlap with dummy 3
if abs(conf.GetAtomPosition(0).x-1.0)<1e-3 and abs(conf.GetAtomPosition(0).y-1.0)<1e-3 and abs(conf.GetAtomPosition(0).z-0.0)<1e-3:
shiftX = conf.GetAtomPosition(0).x - 1.0
shiftY = conf.GetAtomPosition(0).y - 1.0
shiftZ = conf.GetAtomPosition(0).z
translationMatrix = np.array( [[1, 0, 0, -shiftX],
[0, 1, 0, -shiftY],
[0, 0, 1, -shiftZ],
[0, 0, 0, 1]], dtype=np.double)
rdMolTransforms.TransformConformer(conf, translationMatrix)
else:
shiftX = 0.0
shiftY = 0.0
shiftZ = 0.0
return [shiftX, shiftY, shiftZ] | cbe17cf023791517c01b0e52c11dde65532ab6d0 | 3,656,739 |
def standardize(mri):
"""
Standardize mean and standard deviation of each channel and z_dimension slice to mean 0 and standard
deviation 1.
Note: setting the type of the input mri to np.float16 beforehand causes issues, set it afterwards.
Args:
mri (np.array): input mri, shape (dim_x, dim_y, dim_z, num_channels)
Returns:
standardized_mri (np.array): standardized version of input mri
"""
standardized_mri = np.zeros(mri.shape)
# Iterate over channels
for c in range(mri.shape[3]):
# Iterate over the `z` depth dimension
for z in range(mri.shape[2]):
# Get a slice of the mri at channel c and z-th dimension
mri_slice = mri[:, :, z, c]
# Subtract the mean from mri_slice
centered = mri_slice - np.mean(mri_slice)
# Divide by the standard deviation (only if it is different from zero)
if np.std(centered) != 0:
centered_scaled = centered / np.std(centered)
# Update the slice of standardized mri with the centered and scaled mri
standardized_mri[:, :, z, c] = centered_scaled
return standardized_mri | 9c0847d1618023d83cdec48a1c43aae6efc1116f | 3,656,740 |
def current_floquet_kets(eigensystem, time):
"""
Get the Floquet basis kets at a given time. These are the
|psi_j(t)> = exp(-i energy[j] t) |phi_j(t)>,
using the notation in Marcel's thesis, equation (1.13).
"""
weights = np.exp(time * eigensystem.abstract_ket_coefficients)
weights = weights.reshape((1, -1, 1))
return np.sum(weights * eigensystem.k_eigenvectors, axis=1) | 60fdb845fc026bf3a109f05945b251a224b12092 | 3,656,741 |
def summary():
""" DB summary stats """
cur = get_cur()
res = []
try:
cur.execute('select count(study_id) as num_studies from study')
res = cur.fetchone()
except:
dbh.rollback()
finally:
cur.close()
if res:
return Summary(num_studies=res['num_studies'])
else:
return [] | e0159452df1909626d523896f1c2735fb4fc3e75 | 3,656,742 |
def rotate_affine(img, rot=None):
"""Rewrite the affine of a spatial image."""
if rot is None:
return img
img = nb.as_closest_canonical(img)
affine = np.eye(4)
affine[:3] = rot @ img.affine[:3]
return img.__class__(img.dataobj, affine, img.header) | 4a06c286dcfc0832558c74f2cbce54d6e8d7a2d4 | 3,656,744 |
import math
def validate_ttl(options):
"""
Check with Vault if the ttl is valid.
:param options: Lemur option dictionary
:return: 1. Boolean if the ttl is valid or not.
2. the ttl in hours.
"""
if 'validity_end' in options and 'validity_start' in options:
ttl = math.floor(abs(options['validity_end'] - options['validity_start']).total_seconds() / 3600)
elif 'validity_years' in options:
ttl = options['validity_years'] * 365 * 24
else:
ttl = 0
headers = {'X-Vault-Token': vault_auth.get_token()}
url = '{}/roles/{}'.format(current_app.config.get('VAULT_PKI_URL'), options['authority'].name)
res, resp = vault_read_request(url, headers)
if res:
max_ttl = resp.json()['data']['max_ttl']
text_file = open("max_ttl.txt", "wt")
n = text_file.write(str(max_ttl))
text_file.close()
if int(max_ttl) < ttl:
current_app.logger.info('Certificate TTL is above max ttl - ' + max_ttl)
return True, ttl
else:
return True, ttl
else:
current_app.logger.info('Vault: Failed to get Vault max TTL')
raise Exception('Vault: ' + resp) | 83d7d323ae4b3db28f41879f630982d24515fcb1 | 3,656,745 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.