content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_pilot(username='kimpilot', first_name='Kim', last_name='Pilot', email='[email protected]', password='secret'):
"""Returns a new Pilot (User) with the given properties."""
pilot_group, _ = Group.objects.get_or_create(name='Pilots')
pilot = User.objects.create_user(username, email, password, first_name=first_name, last_name=last_name)
pilot.groups.add(pilot_group)
return pilot | 6c173a94a97d64182dcb28b0cef510c0838a545f | 5,007 |
def dict_to_datasets(data_list, components):
"""add models and backgrounds to datasets
Parameters
----------
datasets : `~gammapy.modeling.Datasets`
Datasets
components : dict
dict describing model components
"""
models = dict_to_models(components)
datasets = []
for data in data_list["datasets"]:
dataset = DATASETS.get_cls(data["type"]).from_dict(data, components, models)
datasets.append(dataset)
return datasets | e021317aae6420833d46782b3a611d17fb7156dc | 5,008 |
def of(*args: _TSource) -> Seq[_TSource]:
"""Create sequence from iterable.
Enables fluent dot chaining on the created sequence object.
"""
return Seq(args) | eb8ea24c057939cf82f445099c953a84a4b51895 | 5,009 |
def find_anagrams(word_list: list) -> dict:
"""Finds all anagrams in a word list and returns it in a dictionary
with the letters as a key.
"""
d = dict()
for word in word_list:
unique_key = single(word)
if unique_key in d:
d[unique_key].append(word)
else:
d[unique_key] = [word]
return d | 5e3514344d396d11e8a540b5faa0c31ae3ee6dab | 5,010 |
import itertools
def resolver(state_sets, event_map):
"""Given a set of state return the resolved state.
Args:
state_sets(list[dict[tuple[str, str], str]]): A list of dicts from
type/state_key tuples to event_id
event_map(dict[str, FrozenEvent]): Map from event_id to event
Returns:
dict[tuple[str, str], str]: The resolved state map.
"""
# First split up the un/conflicted state
unconflicted_state, conflicted_state = _seperate(state_sets)
# Also fetch all auth events that appear in only some of the state sets'
# auth chains.
auth_diff = _get_auth_chain_difference(state_sets, event_map)
# Now order the conflicted state and auth_diff by power level (falling
# back to event_id to tie break consistently).
event_id_to_level = [
(_get_power_level_for_sender(event_id, event_map), event_id)
for event_id in set(itertools.chain(
itertools.chain.from_iterable(conflicted_state.values()),
auth_diff,
))
]
event_id_to_level.sort()
events_sorted_by_power = [eid for _, eid in event_id_to_level]
# Now we reorder the list to ensure that auth dependencies of an event
# appear before the event in the list
sorted_events = []
def add_to_list(event_id):
event = event_map[event_id]
for aid, _ in event.auth_events:
if aid in events_sorted_by_power:
events_sorted_by_power.remove(aid)
add_to_list(aid)
sorted_events.append(event_id)
# First, lets pick out all the events that (probably) require power
leftover_events = []
while events_sorted_by_power:
event_id = events_sorted_by_power.pop()
if _is_power_event(event_map[event_id]):
add_to_list(event_id)
else:
leftover_events.append(event_id)
# Now we go through the sorted events and auth each one in turn, using any
# previously successfully auth'ed events (falling back to their auth events
# if they don't exist)
overridden_state = {}
event_id_to_auth = {}
for event_id in sorted_events:
event = event_map[event_id]
auth_events = {}
for aid, _ in event.auth_events:
aev = event_map[aid]
auth_events[(aev.type, aev.state_key)] = aev
for key, eid in overridden_state.items():
auth_events[key] = event_map[eid]
try:
event_auth.check(
event, auth_events,
do_sig_check=False,
do_size_check=False
)
allowed = True
overridden_state[(event.type, event.state_key)] = event_id
except AuthError:
allowed = False
event_id_to_auth[event_id] = allowed
resolved_state = {}
# Now for each conflicted state type/state_key, pick the latest event that
# has passed auth above, falling back to the first one if none passed auth.
for key, conflicted_ids in conflicted_state.items():
sorted_conflicts = []
for eid in sorted_events:
if eid in conflicted_ids:
sorted_conflicts.append(eid)
sorted_conflicts.reverse()
for eid in sorted_conflicts:
if event_id_to_auth[eid]:
resolved_eid = eid
resolved_state[key] = resolved_eid
break
resolved_state.update(unconflicted_state)
# OK, so we've now resolved the power events. Now mainline them.
sorted_power_resolved = sorted(resolved_state.values())
mainline = []
def add_to_list_two(event_id):
ev = event_map[event_id]
for aid, _ in ev.auth_events:
if aid not in mainline and event_id_to_auth.get(aid, True):
add_to_list_two(aid)
if event_id not in mainline:
mainline.append(event_id)
while sorted_power_resolved:
ev_id = sorted_power_resolved.pop()
ev = event_map[ev_id]
if _is_power_event(ev):
add_to_list_two(ev_id)
mainline_map = {ev_id: i + 1 for i, ev_id in enumerate(mainline)}
def get_mainline_depth(event_id):
if event_id in mainline_map:
return mainline_map[event_id]
ev = event_map[event_id]
if not ev.auth_events:
return 0
depth = max(
get_mainline_depth(aid)
for aid, _ in ev.auth_events
)
return depth
leftover_events_map = {
ev_id: get_mainline_depth(ev_id)
for ev_id in leftover_events
}
leftover_events.sort(key=lambda ev_id: (leftover_events_map[ev_id], ev_id))
for event_id in leftover_events:
event = event_map[event_id]
auth_events = {}
for aid, _ in event.auth_events:
aev = event_map[aid]
auth_events[(aev.type, aev.state_key)] = aev
for key, eid in overridden_state.items():
auth_events[key] = event_map[eid]
try:
event_auth.check(
event, auth_events,
do_sig_check=False,
do_size_check=False
)
allowed = True
overridden_state[(event.type, event.state_key)] = event_id
except AuthError:
allowed = False
event_id_to_auth[event_id] = allowed
for key, conflicted_ids in conflicted_state.items():
sorted_conflicts = []
for eid in leftover_events:
if eid in conflicted_ids:
sorted_conflicts.append(eid)
sorted_conflicts.reverse()
for eid in sorted_conflicts:
if event_id_to_auth[eid]:
resolved_eid = eid
resolved_state[key] = resolved_eid
break
resolved_state.update(unconflicted_state)
return resolved_state | 90b8f78e46e13904a9c898cda417378964667ff8 | 5,011 |
def parse_study(study):
"""Parse study
Args:
study (object): object from DICOMDIR level 1 object (children of patient_record)
Returns:
children_object
appending_keys
"""
#study_id = study.StudyID
study_date = study.StudyDate
study_time = study.StudyTime
study_des = study.StudyDescription
return study.children, study_date, study_time, study_des | d0e85d991e4f2f13e6f2bd87c0823858ea9c83bc | 5,012 |
def list_organizational_units_for_parent_single_page(self, **kwargs):
"""
This will continue to call list_organizational_units_for_parent until there are no more pages left to retrieve.
It will return the aggregated response in the same structure as list_organizational_units_for_parent does.
:param self: organizations client
:param kwargs: these are passed onto the list_organizational_units_for_parent method call
:return: organizations_client.list_organizational_units_for_parent.response
"""
return slurp(
'list_organizational_units_for_parent',
self.list_organizational_units_for_parent,
'OrganizationalUnits',
'NextToken', 'NextToken',
**kwargs
) | 73e942d59026830aac528b9dd358f08ebe8a66b3 | 5,013 |
def daemon(target, name=None, args=None, kwargs=None, after=None):
"""
Create and start a daemon thread.
It is same as `start()` except that it sets argument `daemon=True`.
"""
return start(target, name=name, args=args, kwargs=kwargs,
daemon=True, after=after) | 27d608c9cc5be1ab45abe9666e52bbbf89a1f066 | 5,014 |
import threading
def add_image_to_obj(obj, img, *args, **kwargs):
"""
"""
# skip everything if there is no image
if img == None:
return None
# find out of the object is an artist or an album
# then add the artist or the album to the objects
objs = {}
if isinstance(obj, Artist):
objs['artist'] = obj
t = 'artist'
elif isinstance(obj, Album):
objs['album'] = obj
t = 'album'
# delete old objects in S3 if editing:
reprocess = kwargs.pop('reprocess', False)
editing = kwargs.pop('edit', False)
if editing:
prefix = f"images/{obj.__class__.__name__}/{str(obj.uri)}/"
# delete the old objects from the database and S3
if settings.USE_S3:
s3 = boto3.resource('s3')
image_mngr = getattr(obj, f"{t}_image")
images = image_mngr.all()
for item in images:
if item.file:
if settings.USE_S3:
s3.Object(settings.AWS_STORAGE_BUCKET_NAME, item.file.name)#.delete()
# else: # delete file locally... who cares...
if reprocess:
if not item.is_original:
item.delete()
else:
item.delete()
def process_image(image_obj):
width, height = get_image_dimensions(image_obj.file.file)
image_obj.width = width
image_obj.height = height
image_obj.save()
# post processing, creating duplicates, etc...
# create new thread...
t = threading.Thread(target=resize_image_async, args=[image_obj])
t.setDaemon(True)
t.start()
return image_obj
# create the object
if type(img) == str:
image_obj = Image.objects.create(reference=img, is_original=True, height=1, width=1, **objs)
elif type(img) == dict:
image_obj = Image.objects.create(reference=img['image'], is_original=True, height=image['height'], width=image['width'], **objs)
else: # image is the file
if reprocess:
image_obj = Image.objects.filter(**{f"{t}": obj, 'is_original': True}).first()
else:
image_obj = Image.objects.create(file=img, is_original=True, height=1, width=1, **objs) # image is stored in S3
image_obj = process_image(image_obj)
return image_obj | 60b2f9eb871e5b4943b4ab68c817afdd8cf47cab | 5,015 |
from datetime import datetime
def rr_category_ad(context, ad_zone, ad_category, index=0):
"""
Returns a rr advert from the specified category based on index.
Usage:
{% load adzone_tags %}
{% rr_category_ad 'zone_slug' 'my_category_slug' 1 %}
"""
to_return = {'random_int': randint(1000000, 10000000)}
# Retrieve a rr ad for the category and zone
ad = AdBase.objects.get_rr_ad(ad_zone, ad_category, index)
to_return['ad'] = ad
# Record a impression for the ad
if settings.ADZONE_LOG_AD_IMPRESSIONS and 'from_ip' in context and ad:
from_ip = context.get('from_ip')
try:
AdImpression.objects.create(
ad=ad, impression_date=datetime.now(), source_ip=from_ip)
except Exception:
pass
return to_return | db78853ebdf64267e2cca217589ac309706333a1 | 5,016 |
def decoderCNN(x, layers):
""" Construct the Decoder
x : input to decoder
layers : the number of filters per layer (in encoder)
"""
# Feature unpooling by 2H x 2W
for _ in range(len(layers) - 1, 0, -1):
n_filters = layers[_]
x = Conv2DTranspose(n_filters, (3, 3), strides=(2, 2), padding='same', use_bias=False,
kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Last unpooling, restore number of channels
x = Conv2DTranspose(1, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
return x | 9c48c5242b0793c71385f9137729393d17d2db06 | 5,017 |
import torch
def binary_dice_iou_score(
y_pred: torch.Tensor,
y_true: torch.Tensor,
mode="dice",
threshold=None,
nan_score_on_empty=False,
eps=1e-7,
ignore_index=None,
) -> float:
"""
Compute IoU score between two image tensors
:param y_pred: Input image tensor of any shape
:param y_true: Target image of any shape (must match size of y_pred)
:param mode: Metric to compute (dice, iou)
:param threshold: Optional binarization threshold to apply on @y_pred
:param nan_score_on_empty: If true, return np.nan if target has no positive pixels;
If false, return 1. if both target and input are empty, and 0 otherwise.
:param eps: Small value to add to denominator for numerical stability
:param ignore_index:
:return: Float scalar
"""
assert mode in {"dice", "iou"}
# Make binary predictions
if threshold is not None:
y_pred = (y_pred > threshold).to(y_true.dtype)
if ignore_index is not None:
mask = (y_true != ignore_index).to(y_true.dtype)
y_true = y_true * mask
y_pred = y_pred * mask
intersection = torch.sum(y_pred * y_true).item()
cardinality = (torch.sum(y_pred) + torch.sum(y_true)).item()
if mode == "dice":
score = (2.0 * intersection) / (cardinality + eps)
else:
score = intersection / (cardinality - intersection + eps)
has_targets = torch.sum(y_true) > 0
has_predicted = torch.sum(y_pred) > 0
if not has_targets:
if nan_score_on_empty:
score = np.nan
else:
score = float(not has_predicted)
return score | 9d4b751dbdd9c3b7e5f2490c7f7cd8ac08868233 | 5,018 |
def get_uname_arch():
"""
Returns arch of the current host as the kernel would interpret it
"""
global _uname_arch # pylint: disable=global-statement
if not _uname_arch:
_uname_arch = detect_uname_arch()
return _uname_arch | b30946675f6cad155eab3f81b711618551b49f44 | 5,019 |
from typing import Tuple
def _getSTSToken() -> Tuple[str, BosClient, str]:
"""
Get the token to upload the file
:return:
"""
if not Define.hubToken:
raise Error.ArgumentError('Please provide a valid token', ModuleErrorCode, FileErrorCode, 4)
config = _invokeBackend("circuit/genSTS", {"token": Define.hubToken})
bosClient = BosClient(
BceClientConfiguration(
credentials=BceCredentials(
str(
config['accessKeyId']),
str(
config['secretAccessKey'])),
endpoint='http://bd.bcebos.com',
security_token=str(
config['sessionToken'])))
return Define.hubToken, bosClient, config['dest'] | 553844ce8530911bab70fc823bdec65b058b70a4 | 5,020 |
import pickle
def load_pickle(filename):
"""Load Pickfle file"""
filehandler = open(filename, 'rb')
return pickle.load(filehandler) | f93b13616f94c31bc2673232de14b834a8163c5f | 5,022 |
import json
def columnize(s, header=None, width=40):
"""Dump an object and make each line the given width
The input data will run though `json.loads` in case it is a JSON object
Args:
s (str): Data to format
header (optional[str]): Header to prepend to formatted results
width (optional[int]): Max width of the resulting lines
Returns:
list[str]: List of formatted lines
"""
try:
j = json.loads(s)
except: # Assume that the value is a string
j = s
s = pformat(j, width=40)
ls = [l.ljust(width) for l in s.splitlines()]
if header is not None:
ls.insert(0, header.ljust(width))
ls.insert(1, '-' * width)
return ls | 36343f682677f04d0b3670882539e58b48146c46 | 5,023 |
def create_eeg_epochs(config):
"""Create the data with each subject data in a dictionary.
Parameter
----------
subject : string of subject ID e.g. 7707
trial : HighFine, HighGross, LowFine, LowGross
Returns
----------
eeg_epoch_dataset : dataset of all the subjects with different conditions
"""
eeg_epoch_dataset = {}
for subject in config['subjects']:
data = nested_dict()
for trial in config['trials']:
epochs = eeg_epochs_dataset(subject, trial, config)
data['eeg'][trial] = epochs
eeg_epoch_dataset[subject] = data
return eeg_epoch_dataset | a33abcb056b9e94a637e58a42936b886e90a94f2 | 5,024 |
def to_newick(phylo):
"""
Returns a string representing the simplified Newick code of the input.
:param: `PhyloTree` instance.
:return: `str` instance.
"""
return phylo_to_newick_node(phylo).newick | 814610413223e37a6417ff8525262f0beb2e8091 | 5,025 |
import functools
def pipe(*functions):
"""
pipes functions one by one in the provided order
i.e. applies arg1, then arg2, then arg3, and so on
if any arg is None, just skips it
"""
return functools.reduce(
lambda f, g: lambda x: f(g(x)) if g else f(x),
functions[::-1],
lambda x: x) if functions else None | f58afedd5c7fe83edd605b12ca0e468657a78b56 | 5,026 |
import torch
def initialize_graph_batch(batch_size):
""" Initialize a batch of empty graphs to begin the generation process.
Args:
batch_size (int) : Batch size.
Returns:
generated_nodes (torch.Tensor) : Empty node features tensor (batch).
generated_edges (torch.Tensor) : Empty edge features tensor (batch).
generated_n_nodes (torch.Tensor) : Number of nodes per graph in `nodes` and `edges`
(batch), currently all 0.
"""
# define tensor shapes
node_shape = ([batch_size + 1] + C.dim_nodes)
edge_shape = ([batch_size + 1] + C.dim_edges)
# initialize tensors
nodes = torch.zeros(node_shape, dtype=torch.float32, device="cuda")
edges = torch.zeros(edge_shape, dtype=torch.float32, device="cuda")
n_nodes = torch.zeros(batch_size + 1, dtype=torch.int64, device="cuda")
# add a dummy non-empty graph at top, since models cannot receive as input
# purely empty graphs
nodes[0] = torch.ones(([1] + C.dim_nodes), device="cuda")
edges[0, 0, 0, 0] = 1
n_nodes[0] = 1
return nodes, edges, n_nodes | f7ae56b3a0d728dd0fd4b40a3e45e960f65bcf31 | 5,028 |
def TimestampFromTicks(ticks):
"""Construct an object holding a timestamp value from the given ticks value
(number of seconds since the epoch).
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:rtype: :class:`datetime.datetime`
"""
return Timestamp(*localtime(ticks)[:6]) | ccb377b793d600d1a98363e35fc6cd041517b50a | 5,029 |
from typing import Counter
def extract_object_token(data, num_tokens, obj_list=[], verbose=True):
""" Builds a set that contains the object names. Filters infrequent tokens. """
token_counter = Counter()
for img in data:
for region in img['objects']:
for name in region['names']:
if not obj_list or name in obj_list:
token_counter.update([name])
tokens = set()
# pick top N tokens
token_counter_return = {}
for token, count in token_counter.most_common():
tokens.add(token)
token_counter_return[token] = count
if len(tokens) == num_tokens:
break
if verbose:
print(('Keeping %d / %d objects'
% (len(tokens), len(token_counter))))
return tokens, token_counter_return | c35ea7a9eaa2f259c9b38b47e3c982b9ee11682b | 5,030 |
def test_lambda_expressions():
"""Lambda 表达式"""
# 这个函数返回两个参数的和:lambda a, b: a+b
# 与嵌套函数定义一样,lambda函数可以引用包含范围内的变量。
def make_increment_function(delta):
"""本例使用 lambda 表达式返回函数"""
return lambda number: number + delta
increment_function = make_increment_function(42)
assert increment_function(0) == 42
assert increment_function(1) == 43
assert increment_function(2) == 44
# lambda 的另一种用法是将一个小函数作为参数传递。
pairs = [(1, 'one'), (2, 'two'), (3, 'three'), (4, 'four')]
# 按文本键对排序。
pairs.sort(key=lambda pair: pair[1])
assert pairs == [(4, 'four'), (1, 'one'), (3, 'three'), (2, 'two')] | e727df25b2165bb0cd7c9cce47700e86d37a2a1a | 5,031 |
def _generate_with_relative_time(initial_state, condition, iterate, time_mapper) -> Observable:
"""Generates an observable sequence by iterating a state from an
initial state until the condition fails.
Example:
res = source.generate_with_relative_time(0, lambda x: True, lambda x: x + 1, lambda x: 0.5)
Args:
initial_state: Initial state.
condition: Condition to terminate generation (upon returning
false).
iterate: Iteration step function.
time_mapper: Time mapper function to control the speed of
values being produced each iteration, returning relative times, i.e.
either floats denoting seconds or instances of timedelta.
Returns:
The generated sequence.
"""
def subscribe(observer, scheduler=None):
scheduler = scheduler or timeout_scheduler
mad = MultipleAssignmentDisposable()
state = [initial_state]
has_result = [False]
result = [None]
first = [True]
time = [None]
def action(scheduler, _):
if has_result[0]:
observer.on_next(result[0])
try:
if first[0]:
first[0] = False
else:
state[0] = iterate(state[0])
has_result[0] = condition(state[0])
if has_result[0]:
result[0] = state[0]
time[0] = time_mapper(state[0])
except Exception as e:
observer.on_error(e)
return
if has_result[0]:
mad.disposable = scheduler.schedule_relative(time[0], action)
else:
observer.on_completed()
mad.disposable = scheduler.schedule_relative(0, action)
return mad
return Observable(subscribe) | d3f5549f94125065b387515299014b5701411be8 | 5,032 |
def is_prime(num):
"""判断一个数是不是素数"""
for factor in range(2, int(num ** 0.5) + 1):
if num % factor == 0:
return False
return True if num != 1 else False | c0e8435b046a87dd15278149f5e1af7258634a01 | 5,033 |
def ptcorr(y1, y2, dim=-1, eps=1e-8, **kwargs):
"""
Compute the correlation between two PyTorch tensors along the specified dimension(s).
Args:
y1: first PyTorch tensor
y2: second PyTorch tensor
dim: dimension(s) along which the correlation is computed. Any valid PyTorch dim spec works here
eps: offset to the standard deviation to avoid exploding the correlation due to small division (default 1e-8)
**kwargs: passed to final numpy.mean operatoin over standardized y1 * y2
Returns: correlation tensor
"""
y1 = (y1 - y1.mean(dim=dim, keepdim=True)) / (y1.std(dim=dim, keepdim=True) + eps)
y2 = (y2 - y2.mean(dim=dim, keepdim=True)) / (y2.std(dim=dim, keepdim=True) + eps)
return (y1 * y2).mean(dim=dim, **kwargs) | 140cad4de4452edeb5ea0fb3e50267c66df948c1 | 5,034 |
import typing
def discretize_time_difference(
times, initial_time, frequency, integer_timestamps=False
) -> typing.Sequence[int]:
"""method that discretizes sequence of datetimes (for prediction slices)
Arguments:
times {Sequence[datetime] or Sequence[float]} -- sequence of datetime objects
initial_time {datetime or float} -- last datetime instance from training set
(to offset test datetimes)
frequency {str} -- string alias representing granularity of pd.datetime object
Keyword Arguments:
integer_timestamps {bool} -- whether timestamps are integers or datetime values
Returns:
typing.Sequence[int] -- prediction intervals expressed at specific time granularity
"""
# take differences to convert to deltas
time_differences = times - initial_time
# edge case for integer timestamps
if integer_timestamps:
return time_differences.values.astype(int)
# convert to seconds representation
if type(time_differences.iloc[0]) is pd._libs.tslibs.timedeltas.Timedelta:
time_differences = time_differences.apply(lambda t: t.total_seconds())
if frequency == "YS":
return [round(x / S_PER_YEAR_0) for x in time_differences]
elif frequency == "MS" or frequency == 'M':
return [round(x / S_PER_MONTH_31) for x in time_differences]
elif frequency == "W":
return [round(x / S_PER_WEEK) for x in time_differences]
elif frequency == "D":
return [round(x / S_PER_DAY) for x in time_differences]
elif frequency == "H":
return [round(x / S_PER_HR) for x in time_differences]
else:
return [round(x / SECONDS_PER_MINUTE) for x in time_differences] | 871726102dbdedfbc92570bff4f73faf1054e986 | 5,035 |
def pathlines(u_netcdf_filename,v_netcdf_filename,w_netcdf_filename,
startx,starty,startz,startt,
t,
grid_object,
t_max,delta_t,
u_netcdf_variable='UVEL',
v_netcdf_variable='VVEL',
w_netcdf_variable='WVEL',
u_grid_loc='U',v_grid_loc='V',w_grid_loc='W',
u_bias_field=None,
v_bias_field=None,
w_bias_field=None):
"""!A three-dimensional lagrangian particle tracker. The velocity fields must be four dimensional (three spatial, one temporal) and have units of m/s.
It should work to track particles forwards or backwards in time (set delta_t <0 for backwards in time). But, be warned, backwards in time hasn't been thoroughly tested yet.
Because this is a very large amount of data, the fields are passed as netcdffile handles.
The variables are:
* ?_netcdf_filename = name of the netcdf file with ?'s data in it.
* start? = intial value for x, y, z, or t.
* t = vector of time levels that are contained in the velocity data.
* grid_object is m.grid if you followed the standard naming conventions.
* ?_netcdf_variable = name of the "?" variable field in the netcdf file.
* t_max = length of time to track particles for, in seconds. This is always positive
* delta_t = timestep for particle tracking algorithm, in seconds. This can be positive or negative.
* ?_grid_loc = where the field "?" is located on the C-grid. Possibles options are, U, V, W, T and Zeta.
* ?_bias_field = bias to add to that velocity field omponent. If set to -mean(velocity component), then only the time varying portion of that field will be used.
"""
if u_grid_loc == 'U':
x_u = grid_object['Xp1'][:]
y_u = grid_object['Y'][:]
z_u = grid_object['Z'][:]
elif u_grid_loc == 'V':
x_u = grid_object['X'][:]
y_u = grid_object['Yp1'][:]
z_u = grid_object['Z'][:]
elif u_grid_loc == 'W':
x_u = grid_object['X'][:]
y_u = grid_object['Y'][:]
z_u = grid_object['Zl'][:]
elif u_grid_loc == 'T':
x_u = grid_object['X'][:]
y_u = grid_object['Y'][:]
z_u = grid_object['Z'][:]
elif u_grid_loc == 'Zeta':
x_u = grid_object['Xp1'][:]
y_u = grid_object['Yp1'][:]
z_u = grid_object['Z'][:]
else:
print 'u_grid_loc not set correctly. Possible options are: U,V,W,T, and Zeta'
return
if v_grid_loc == 'U':
x_v = grid_object['Xp1'][:]
y_v = grid_object['Y'][:]
z_v = grid_object['Z'][:]
elif v_grid_loc == 'V':
x_v = grid_object['X'][:]
y_v = grid_object['Yp1'][:]
z_v = grid_object['Z'][:]
elif v_grid_loc == 'W':
x_v = grid_object['X'][:]
y_v = grid_object['Y'][:]
z_v = grid_object['Zl'][:]
elif v_grid_loc == 'T':
x_v = grid_object['X'][:]
y_v = grid_object['Y'][:]
z_v = grid_object['Z'][:]
elif v_grid_loc == 'Zeta':
x_v = grid_object['Xp1'][:]
y_v = grid_object['Yp1'][:]
z_v = grid_object['Z'][:]
else:
print 'v_grid_loc not set correctly. Possible options are: U,V,W,T, and Zeta'
return
if w_grid_loc == 'U':
x_w = grid_object['Xp1'][:]
y_w = grid_object['Y'][:]
z_w = grid_object['Z'][:]
elif w_grid_loc == 'V':
x_w = grid_object['X'][:]
y_w = grid_object['Yp1'][:]
z_w = grid_object['Z'][:]
elif w_grid_loc == 'W':
x_w = grid_object['X'][:]
y_w = grid_object['Y'][:]
z_w = grid_object['Zl'][:]
elif w_grid_loc == 'T':
x_w = grid_object['X'][:]
y_w = grid_object['Y'][:]
z_w = grid_object['Z'][:]
elif w_grid_loc == 'Zeta':
x_w = grid_object['Xp1'][:]
y_w = grid_object['Yp1'][:]
z_w = grid_object['Z'][:]
else:
print 'w_grid_loc not set correctly. Possible options are: U,V,W,T, and Zeta'
return
len_x_u = len(x_u)
len_y_u = len(y_u)
len_z_u = len(z_u)
len_x_v = len(x_v)
len_y_v = len(y_v)
len_z_v = len(z_v)
len_x_w = len(x_w)
len_y_w = len(y_w)
len_z_w = len(z_w)
len_t = len(t)
if u_bias_field is None:
u_bias_field = np.zeros_like(grid_object['wet_mask_U'][:])
if v_bias_field is None:
v_bias_field = np.zeros_like(grid_object['wet_mask_V'][:])
if w_bias_field is None:
w_bias_field = np.zeros_like(grid_object['wet_mask_W'][:])
x_stream = np.ones((int(np.fabs(t_max/delta_t))+2))*startx
y_stream = np.ones((int(np.fabs(t_max/delta_t))+2))*starty
z_stream = np.ones((int(np.fabs(t_max/delta_t))+2))*startz
t_stream = np.ones((int(np.fabs(t_max/delta_t))+2))*startt
t_RK = startt #set the initial time to be the given start time
z_RK = startz
y_RK = starty
x_RK = startx
i=0
u_netcdf_filehandle = netCDF4.Dataset(u_netcdf_filename)
v_netcdf_filehandle = netCDF4.Dataset(v_netcdf_filename)
w_netcdf_filehandle = netCDF4.Dataset(w_netcdf_filename)
t_index = np.searchsorted(t,t_RK)
t_index_new = np.searchsorted(t,t_RK) # this is later used to test if new data needs to be read in.
if t_index == 0:
raise ValueError('Given time value is outside the given velocity fields - too small')
elif t_index == len_t:
raise ValueError('Given time value is outside the given velocity fields - too big')
# load fields in ready for the first run through the loop
# u
u_field,x_index_u,y_index_u,z_index_u = indices_and_field(x_u,y_u,z_u,
x_RK,y_RK,z_RK,t_index,
len_x_u,len_y_u,len_z_u,len_t,
u_netcdf_filehandle,u_netcdf_variable,u_bias_field)
u_field,x_index_u_new,y_index_u_new,z_index_u_new = indices_and_field(x_u,y_u,z_u,
x_RK,y_RK,z_RK,t_index,
len_x_u,len_y_u,len_z_u,len_t,
u_netcdf_filehandle,u_netcdf_variable,u_bias_field)
# v
v_field,x_index_v,y_index_v,z_index_v = indices_and_field(x_v,y_v,z_v,
x_RK,y_RK,z_RK,t_index,
len_x_v,len_y_v,len_z_v,len_t,
v_netcdf_filehandle,v_netcdf_variable,v_bias_field)
v_field,x_index_v_new,y_index_v_new,z_index_v_new = indices_and_field(x_v,y_v,z_v,
x_RK,y_RK,z_RK,t_index,
len_x_v,len_y_v,len_z_v,len_t,
v_netcdf_filehandle,v_netcdf_variable,v_bias_field)
# w
w_field,x_index_w,y_index_w,z_index_w = indices_and_field(x_w,y_w,z_w,
x_RK,y_RK,z_RK,t_index,
len_x_w,len_y_w,len_z_w,len_t,
w_netcdf_filehandle,w_netcdf_variable,w_bias_field)
w_field,x_index_w_new,y_index_w_new,z_index_w_new = indices_and_field(x_w,y_w,z_w,
x_RK,y_RK,z_RK,t_index,
len_x_w,len_y_w,len_z_w,len_t,
w_netcdf_filehandle,w_netcdf_variable,w_bias_field)
# Prepare for spherical polar grids
deg_per_m = np.array([1,1])
# Runge-Kutta fourth order method to estimate next position.
while i < np.fabs(t_max/delta_t):
#t_RK < t_max + startt:
if grid_object['grid_type']=='polar':
# use degrees per metre and convert all the velocities to degrees / second# calculate degrees per metre at current location - used to convert the m/s velocities in to degrees/s
deg_per_m = np.array([1./(1852.*60.),np.cos(starty*np.pi/180.)/(1852.*60.)])
# Compute indices at location given
if (y_index_u_new==y_index_u and
x_index_u_new==x_index_u and
z_index_u_new==z_index_u and
y_index_v_new==y_index_v and
x_index_v_new==x_index_v and
z_index_v_new==z_index_v and
y_index_w_new==y_index_w and
x_index_w_new==x_index_w and
z_index_w_new==z_index_w and
t_index_new == t_index):
# the particle hasn't moved out of the grid cell it was in.
# So the loaded field is fine; there's no need to reload it.
pass
else:
t_index = np.searchsorted(t,t_RK)
if t_index == 0:
raise ValueError('Given time value is outside the given velocity fields - too small')
elif t_index == len_t:
raise ValueError('Given time value is outside the given velocity fields - too big')
# for u
u_field,x_index_u,y_index_u,z_index_u = indices_and_field(x_u,y_u,z_u,
x_RK,y_RK,z_RK,t_index,
len_x_u,len_y_u,len_z_u,len_t,
u_netcdf_filehandle,u_netcdf_variable,u_bias_field)
# for v
v_field,x_index_v,y_index_v,z_index_v = indices_and_field(x_v,y_v,z_v,
x_RK,y_RK,z_RK,t_index,
len_x_v,len_y_v,len_z_v,len_t,
v_netcdf_filehandle,v_netcdf_variable,v_bias_field)
# for w
w_field,x_index_w,y_index_w,z_index_w = indices_and_field(x_w,y_w,z_w,
x_RK,y_RK,z_RK,t_index,
len_x_w,len_y_w,len_z_w,len_t,
w_netcdf_filehandle,w_netcdf_variable,w_bias_field)
# Interpolate velocities to initial location
u_loc = quadralinear_interp(x_RK,y_RK,z_RK,t_RK,
u_field,
x_u,y_u,z_u,t,
len_x_u,len_y_u,len_z_u,len_t,
x_index_u,y_index_u,z_index_u,t_index)
v_loc = quadralinear_interp(x_RK,y_RK,z_RK,t_RK,
v_field,
x_v,y_v,z_v,t,len_x_v,len_y_v,len_z_v,len_t,
x_index_v,y_index_v,z_index_v,t_index)
w_loc = quadralinear_interp(x_RK,y_RK,z_RK,t_RK,
w_field,
x_w,y_w,z_w,t,len_x_w,len_y_w,len_z_w,len_t,
x_index_w,y_index_w,z_index_w,t_index)
u_loc = u_loc*deg_per_m[1]
v_loc = v_loc*deg_per_m[0]
dx1 = delta_t*u_loc
dy1 = delta_t*v_loc
dz1 = delta_t*w_loc
u_loc1 = quadralinear_interp(x_RK + 0.5*dx1,y_RK + 0.5*dy1,z_RK + 0.5*dz1,t_RK + 0.5*delta_t,
u_field,
x_u,y_u,z_u,t,len_x_u,len_y_u,len_z_u,len_t,
x_index_u,y_index_u,z_index_u,t_index)
v_loc1 = quadralinear_interp(x_RK + 0.5*dx1,y_RK + 0.5*dy1,z_RK + 0.5*dz1,t_RK + 0.5*delta_t,
v_field,
x_v,y_v,z_v,t,len_x_v,len_y_v,len_z_v,len_t,
x_index_v,y_index_v,z_index_v,t_index)
w_loc1 = quadralinear_interp(x_RK + 0.5*dx1,y_RK + 0.5*dy1,z_RK + 0.5*dz1,t_RK + 0.5*delta_t,
w_field,
x_w,y_w,z_w,t,len_x_w,len_y_w,len_z_w,len_t,
x_index_w,y_index_w,z_index_w,t_index)
u_loc1 = u_loc1*deg_per_m[1]
v_loc1 = v_loc1*deg_per_m[0]
dx2 = delta_t*u_loc1
dy2 = delta_t*v_loc1
dz2 = delta_t*w_loc1
u_loc2 = quadralinear_interp(x_RK + 0.5*dx2,y_RK + 0.5*dy2,z_RK + 0.5*dz2,t_RK + 0.5*delta_t,
u_field,
x_u,y_u,z_u,t,len_x_u,len_y_u,len_z_u,len_t,
x_index_u,y_index_u,z_index_u,t_index)
v_loc2 = quadralinear_interp(x_RK + 0.5*dx2,y_RK + 0.5*dy2,z_RK + 0.5*dz2,t_RK + 0.5*delta_t,
v_field,
x_v,y_v,z_v,t,len_x_v,len_y_v,len_z_v,len_t,
x_index_v,y_index_v,z_index_v,t_index)
w_loc2 = quadralinear_interp(x_RK + 0.5*dx2,y_RK + 0.5*dy2,z_RK + 0.5*dz2,t_RK + 0.5*delta_t,
w_field,
x_w,y_w,z_w,t,len_x_w,len_y_w,len_z_w,len_t,
x_index_w,y_index_w,z_index_w,t_index)
u_loc2 = u_loc2*deg_per_m[1]
v_loc2 = v_loc2*deg_per_m[0]
dx3 = delta_t*u_loc2
dy3 = delta_t*v_loc2
dz3 = delta_t*w_loc2
u_loc3 = quadralinear_interp(x_RK + dx3,y_RK + dy3,z_RK + dz3,t_RK + delta_t,
u_field,
x_u,y_u,z_u,t,len_x_u,len_y_u,len_z_u,len_t,
x_index_u,y_index_u,z_index_u,t_index)
v_loc3 = quadralinear_interp(x_RK + dx3,y_RK + dy3,z_RK + dz3,t_RK + delta_t,
v_field,
x_v,y_v,z_v,t,len_x_v,len_y_v,len_z_v,len_t,
x_index_v,y_index_v,z_index_v,t_index)
w_loc3 = quadralinear_interp(x_RK + dx3,y_RK + dy3,z_RK + dz3,t_RK + delta_t,
w_field,
x_w,y_w,z_w,t,len_x_w,len_y_w,len_z_w,len_t,
x_index_w,y_index_w,z_index_w,t_index)
u_loc3 = u_loc3*deg_per_m[1]
v_loc3 = v_loc3*deg_per_m[0]
dx4 = delta_t*u_loc3
dy4 = delta_t*v_loc3
dz4 = delta_t*w_loc3
#recycle the variables to keep the code clean
x_RK = x_RK + (dx1 + 2*dx2 + 2*dx3 + dx4)/6
y_RK = y_RK + (dy1 + 2*dy2 + 2*dy3 + dy4)/6
z_RK = z_RK + (dz1 + 2*dz2 + 2*dz3 + dz4)/6
t_RK += delta_t
i += 1
x_stream[i] = x_RK
y_stream[i] = y_RK
z_stream[i] = z_RK
t_stream[i] = t_RK
t_index_new = np.searchsorted(t,t_RK)
x_index_w_new = np.searchsorted(x_w,x_RK)
y_index_w_new = np.searchsorted(y_w,y_RK)
if z_RK < 0:
z_index_w_new = np.searchsorted(-z_w,-z_RK)
else:
z_index_w_new = np.searchsorted(z_w,z_RK)
x_index_v_new = np.searchsorted(x_v,x_RK)
y_index_v_new = np.searchsorted(y_v,y_RK)
if z_RK < 0:
z_index_v_new = np.searchsorted(-z_v,-z_RK)
else:
z_index_v_new = np.searchsorted(z_v,z_RK)
x_index_u_new = np.searchsorted(x_u,x_RK)
y_index_u_new = np.searchsorted(y_u,y_RK)
if z_RK < 0:
z_index_u_new = np.searchsorted(-z_u,-z_RK)
else:
z_index_u_new = np.searchsorted(z_u,z_RK)
u_netcdf_filehandle.close()
v_netcdf_filehandle.close()
w_netcdf_filehandle.close()
return x_stream,y_stream,z_stream,t_stream | 2c7da1a6de8157c690fb6ea57e30906108728711 | 5,036 |
def firing_rate(x, theta=0.5, alpha=0.12):
""" Sigmoidal firing rate function
Parameters
----------
x : float
Mean membrane potential.
theta : float
Inflection point (mean firing activity) of sigmoidal curve (default
value 0.12)
alpha : float
Steepness of sigmoidal curve (default value 0.12)
Returns
-------
f : float
Firing rate of x.
"""
expo = np.exp((theta - x) / alpha)
f = 1 / (1 + expo)
return f | ddb4ce078f8613a088971d4ed0a4a71d746772b5 | 5,037 |
def map_points(pois, sample_size=-1, kwd=None, show_bbox=False, tiles='OpenStreetMap', width='100%', height='100%'):
"""Returns a Folium Map displaying the provided points. Map center and zoom level are set automatically.
Args:
pois (GeoDataFrame): A GeoDataFrame containing the POIs to be displayed.
sample_size (int): Sample size (default: -1; show all).
kwd (string): A keyword to filter by (optional).
show_bbox (bool): Whether to show the bounding box of the GeoDataFrame (default: False).
tiles (string): The tiles to use for the map (default: `OpenStreetMap`).
width (integer or percentage): Width of the map in pixels or percentage (default: 100%).
height (integer or percentage): Height of the map in pixels or percentage (default: 100%).
Returns:
A Folium Map object displaying the given POIs.
"""
# Set the crs to WGS84
pois = to_wgs84(pois)
# Filter by keyword
if kwd is None:
pois_filtered = pois
else:
pois_filtered = filter_by_kwd(pois, kwd)
# Pick a sample
if sample_size > 0 and sample_size < len(pois_filtered.index):
pois_filtered = pois_filtered.sample(sample_size)
# Automatically center the map at the center of the bounding box enclosing the POIs.
bb = bbox(pois_filtered)
map_center = [bb.centroid.y, bb.centroid.x]
# Initialize the map
m = folium.Map(location=map_center, tiles=tiles, width=width, height=height)
# Automatically set the zoom level
m.fit_bounds(([bb.bounds[1], bb.bounds[0]], [bb.bounds[3], bb.bounds[2]]))
# Create the marker cluster
locations = list(zip(pois_filtered.geometry.y.tolist(),
pois_filtered.geometry.x.tolist(),
pois_filtered.id.tolist(),
pois_filtered.name.tolist(),
pois_filtered.kwds.tolist()))
callback = """\
function (row) {
var icon, marker;
icon = L.AwesomeMarkers.icon({
icon: 'map-marker', markerColor: 'blue'});
marker = L.marker(new L.LatLng(row[0], row[1]));
marker.setIcon(icon);
var popup = L.popup({height: '300'});
popup.setContent(row[2] + '<br/>' + row[3] + '<br/>' + row[4]);
marker.bindPopup(popup);
return marker;
};
"""
m.add_child(folium.plugins.FastMarkerCluster(locations, callback=callback))
# Add pois to a marker cluster
#coords, popups = [], []
#for idx, poi in pois.iterrows():
# coords.append([poi.geometry.y, poi.geometry.x)]
# label = str(poi['id']) + '<br>' + str(poi['name']) + '<br>' + ' '.join(poi['kwds'])
# popups.append(folium.IFrame(label, width=300, height=100))
#poi_layer = folium.FeatureGroup(name='pois')
#poi_layer.add_child(MarkerCluster(locations=coords, popups=popups))
#m.add_child(poi_layer)
# folium.GeoJson(pois, tooltip=folium.features.GeoJsonTooltip(fields=['id', 'name', 'kwds'],
# aliases=['ID:', 'Name:', 'Keywords:'])).add_to(m)
if show_bbox:
folium.GeoJson(bb).add_to(m)
folium.LatLngPopup().add_to(m)
return m | cb8e2a32b62ca364e54c90b94d2d4c3da74fc12a | 5,038 |
def read_data_file():
"""
Reads Data file from datafilename given name
"""
datafile = open(datafilename, 'r')
old = datafile.read()
datafile.close()
return old | 5aa6aa7cbf0305ca51c026f17e29188e472e61f3 | 5,039 |
def squared_loss(y_hat, y):
"""均方损失。"""
return (y_hat - y.reshape(y_hat.shape))**2 / 2 | 4f796ed753de6ed77de50578271a4eca04fc1ffb | 5,040 |
import re
def normalize_string(string, lowercase=True, convert_arabic_numerals=True):
"""
Normalize the given string for matching.
Example::
>>> normalize_string("tétéà 14ème-XIV, foobar")
'tetea XIVeme xiv, foobar'
>>> normalize_string("tétéà 14ème-XIV, foobar", False)
'tetea 14eme xiv, foobar'
:param string: The string to normalize.
:param lowercase: Whether to convert string to lowercase or not. Defaults
to ``True``.
:param convert_arabic_numerals: Whether to convert arabic numerals to roman
ones. Defaults to ``True``.
:return: The normalized string.
"""
# ASCIIfy the string
string = unidecode.unidecode(string)
# Replace any non-alphanumeric character by space
# Keep some basic punctuation to keep syntaxic units
string = re.sub(r"[^a-zA-Z0-9,;:]", " ", string)
# Convert to lowercase
if lowercase:
string = string.lower()
# Convert arabic numbers to roman numbers
if convert_arabic_numerals:
string = convert_arabic_to_roman_in_text(string)
# Collapse multiple spaces, replace tabulations and newlines by space
string = re.sub(r"\s+", " ", string)
# Trim whitespaces
string = string.strip()
return string | b6772b47f4cc049e09d37c97710a4f37e5a50a7c | 5,041 |
from typing import Sequence
def find_sub_expression(
expression: Sequence[SnailfishElement],
) -> Sequence[SnailfishElement]:
"""Finds the outermost closed sub-expression in a subsequence."""
num_open_braces = 1
pos = 0
while num_open_braces > 0:
pos += 1
if expression[pos] == "[":
num_open_braces += 1
elif expression[pos] == "]":
num_open_braces -= 1
return expression[: pos + 1] | 11d91c38c66fc8c9ce1e58297fcfbb290c18b968 | 5,042 |
import importlib
def get_rec_attr(obj, attrstr):
"""Get attributes and do so recursively if needed"""
if attrstr is None:
return None
if "." in attrstr:
attrs = attrstr.split('.', maxsplit=1)
if hasattr(obj, attrs[0]):
obj = get_rec_attr(getattr(obj, attrs[0]), attrs[1])
else:
try:
obj = get_rec_attr(importlib.import_module(obj.__name__ + "." + attrs[0]), attrs[1])
except ImportError:
raise
else:
if hasattr(obj, attrstr):
obj = getattr(obj, attrstr)
return obj | a6831d48c79b8c58542032385a5c56373fd45321 | 5,044 |
def _get_message_mapping(types: dict) -> dict:
"""
Return a mapping with the type as key, and the index number.
:param types: a dictionary of types with the type name, and the message type
:type types: dict
:return: message mapping
:rtype: dict
"""
message_mapping = {}
entry_index = 2 # based on the links found, they normally start with 2?
for _type, message in types.items():
message_mapping[_type] = entry_index
entry_index += 1
return message_mapping | a098e0386aa92c41d4d404154b0b2a87ce9365ce | 5,045 |
def cd(path):
"""
Change location to the provided path.
:param path: wlst directory to which to change location
:return: cmo object reference of the new location
:raises: PyWLSTException: if a WLST error occurs
"""
_method_name = 'cd'
_logger.finest('WLSDPLY-00001', path, class_name=_class_name, method_name=_method_name)
try:
result = wlst.cd(path)
except (wlst.WLSTException, offlineWLSTException), e:
raise exception_helper.create_pywlst_exception('WLSDPLY-00002', path, _get_exception_mode(e),
_format_exception(e), error=e)
_logger.finest('WLSDPLY-00003', path, result, class_name=_class_name, method_name=_method_name)
return result | fbb8d9ac0a9a4c393d06d0c15bfd15154b0a5c0a | 5,047 |
def plt_roc_curve(y_true, y_pred, classes, writer, total_iters):
"""
:param y_true:[[1,0,0,0,0], [0,1,0,0], [1,0,0,0,0],...]
:param y_pred: [0.34,0.2,0.1] , 0.2,...]
:param classes:5
:return:
"""
fpr = {}
tpr = {}
roc_auc = {}
roc_auc_res = []
n_classes = len(classes)
for i in range(n_classes):
fpr[classes[i]], tpr[classes[i]], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[classes[i]] = auc(fpr[classes[i]], tpr[classes[i]])
roc_auc_res.append(roc_auc[classes[i]])
fig = plt.figure()
lw = 2
plt.plot(fpr[classes[i]], tpr[classes[i]], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[classes[i]])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic beat {}'.format(classes[i]))
plt.legend(loc="lower right")
writer.add_figure('test/roc_curve_beat_{}'.format(classes[i]), fig, total_iters)
plt.close()
fig.clf()
fig.clear()
return roc_auc_res | 5e02d83f5a7cd4e8c8abbc3afe89c25271e3944e | 5,048 |
def get_deps(sentence_idx: int, graph: DependencyGraph):
"""Get the indices of the dependants of the word at index sentence_idx
from the provided DependencyGraph"""
return list(chain(*graph.nodes[sentence_idx]['deps'].values())) | 9eb00fc5719cc1fddb22ea457cc6b49a385eb51d | 5,049 |
import functools
def incr(func):
"""
Increment counter
"""
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _incr(counter, num):
salt.utils.process.appendproctitle("test_{}".format(name))
for _ in range(0, num):
counter.value += 1
attrname = "incr_" + name
setattr(self, attrname, _incr)
self.addCleanup(delattr, self, attrname)
return wrapper | 036ae9eac9d223a34871737a7294fad027c2d3c9 | 5,050 |
import operator
def index(a: protocols.SupportsIndex) -> int:
"""
Return _a_ converted to an integer. Equivalent to a.__index__().
Example:
>>> class Index:
... def __index__(self) -> int:
... return 0
>>> [1][Index()]
1
Args:
a:
"""
return operator.index(a) | 52f2fbdb8d65b12cb53761647b2c13d3cb368272 | 5,051 |
def col_to_num(col_str):
""" Convert base26 column string to number. """
expn = 0
col_num = 0
for char in reversed(col_str):
col_num += (ord(char) - ord('A') + 1) * (26 ** expn)
expn += 1
return col_num | d6bb00d3ef77c48338df635a254cf3ca5503bb73 | 5,052 |
def process_chunk(chunk, verbose=False):
"""Return a tuple of chunk kind, task-create links, task-create times, task-leave times and the chunk's graph"""
# Make function for looking up event attributes
get_attr = attr_getter(chunk.attr)
# Unpack events from chunk
(_, (first_event, *events, last_event)), = chunk.items()
if verbose and len(events) > 0:
print(chunk)
# Make the graph representing this chunk
g = ig.Graph(directed=True)
prior_node = g.add_vertex(event=first_event)
# Used to save taskgroup-enter event to match to taskgroup-leave event
taskgroup_enter_event = None
# Match master-enter event to corresponding master-leave
master_enter_event = first_event if get_attr(first_event, 'region_type') == 'master' else None
if chunk.kind == 'parallel':
parallel_id = get_attr(first_event, 'unique_id')
prior_node["parallel_sequence_id"] = (parallel_id, get_attr(first_event, 'endpoint'))
task_create_nodes = deque()
task_links = deque()
task_crt_ts = deque()
task_leave_ts = deque()
if type(first_event) is Enter and get_attr(first_event, 'region_type') in ['initial_task']:
task_crt_ts.append((get_attr(first_event, 'unique_id'), first_event.time))
k = 1
for event in chain(events, (last_event,)):
if get_attr(event, 'region_type') in ['implicit_task']:
if type(event) is Enter:
task_links.append((get_attr(event, 'encountering_task_id'), get_attr(event, 'unique_id')))
task_crt_ts.append((get_attr(event, 'unique_id'), event.time))
elif type(event) is Leave:
task_leave_ts.append((get_attr(event, 'unique_id'), event.time))
continue
# The node representing this event
node = g.add_vertex(event=event)
# Add task-leave time
if type(event) is Leave and get_attr(event, 'region_type') == 'explicit_task':
task_leave_ts.append((get_attr(event, 'unique_id'), event.time))
# Add task links and task crt ts
if (type(event) is Enter and get_attr(event, 'region_type') == 'implicit_task') \
or (type(event) is ThreadTaskCreate):
task_links.append((get_attr(event, 'encountering_task_id'), get_attr(event, 'unique_id')))
task_crt_ts.append((get_attr(event, 'unique_id'), event.time))
# Match taskgroup-enter/-leave events
if get_attr(event, 'region_type') in ['taskgroup']:
if type(event) is Enter:
taskgroup_enter_event = event
elif type(event) is Leave:
if taskgroup_enter_event is None:
raise ValueError("taskgroup-enter event was None")
node['taskgroup_enter_event'] = taskgroup_enter_event
taskgroup_enter_event = None
# Match master-enter/-leave events
if get_attr(event, 'region_type') in ['master']:
if type(event) is Enter:
master_enter_event = event
elif type(event) is Leave:
if master_enter_event is None:
raise ValueError("master-enter event was None")
node['master_enter_event'] = master_enter_event
master_enter_event = None
# Label nodes in a parallel chunk by their position for easier merging
if (chunk.kind == 'parallel'
and type(event) is not ThreadTaskCreate
and get_attr(event, 'region_type') != 'master'):
node["parallel_sequence_id"] = (parallel_id, k)
k += 1
if get_attr(event, 'region_type') == 'parallel':
# Label nested parallel regions for easier merging...
if event is not last_event:
node["parallel_sequence_id"] = (get_attr(event, 'unique_id'), get_attr(event, 'endpoint'))
# ... but distinguish from a parallel chunk's terminating parallel-end event
else:
node["parallel_sequence_id"] = (parallel_id, get_attr(event, 'endpoint'))
# Add edge except for (single begin -> single end) and (parallel N begin -> parallel N end)
if events_bridge_region(prior_node['event'], node['event'], ['single_executor', 'single_other', 'master'], get_attr) \
or (events_bridge_region(prior_node['event'], node['event'], ['parallel'], get_attr)
and get_attr(node['event'], 'unique_id') == get_attr(prior_node['event'], 'unique_id')):
pass
else:
g.add_edge(prior_node, node)
# For task_create add dummy nodes for easier merging
if type(event) is ThreadTaskCreate:
node['task_cluster_id'] = (get_attr(event, 'unique_id'), 'enter')
dummy_node = g.add_vertex(event=event, task_cluster_id=(get_attr(event, 'unique_id'), 'leave'))
task_create_nodes.append(dummy_node)
continue
elif len(task_create_nodes) > 0:
task_create_nodes = deque()
prior_node = node
if chunk.kind == 'explicit_task' and len(events) == 0:
g.delete_edges([0])
# Require at least 1 edge between start and end nodes if there are no internal nodes, except for empty explicit
# task chunks
if chunk.kind != "explicit_task" and len(events) == 0 and g.ecount() == 0:
g.add_edge(g.vs[0], g.vs[1])
return chunk.kind, task_links, task_crt_ts, task_leave_ts, g | f2430377bc592b2a317b6db627cc39c185f64177 | 5,053 |
def delazi_wgs84(lat1, lon1, lat2, lon2):
"""delazi_wgs84(double lat1, double lon1, double lat2, double lon2)"""
return _Math.delazi_wgs84(lat1, lon1, lat2, lon2) | 3ced7e7dc3fd8dd7ced621a536c43bfb9062d89d | 5,054 |
def clipped_zoom(x: np.ndarray, zoom_factor: float) -> np.ndarray:
"""
Helper function for zoom blur.
Parameters
----------
x
Instance to be perturbed.
zoom_factor
Zoom strength.
Returns
-------
Cropped and zoomed instance.
"""
h = x.shape[0]
ch = int(np.ceil(h / float(zoom_factor))) # ceil crop height(= crop width)
top = (h - ch) // 2
x = zoom(x[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
trim_top = (x.shape[0] - h) // 2 # trim off any extra pixels
return x[trim_top:trim_top + h, trim_top:trim_top + h] | befbe10493bd4acc63c609432c1c00ac3eeab652 | 5,055 |
def NextFchunk(ea):
"""
Get next function chunk
@param ea: any address
@return: the starting address of the next function chunk or BADADDR
@note: This function enumerates all chunks of all functions in the database
"""
func = idaapi.get_next_fchunk(ea)
if func:
return func.startEA
else:
return BADADDR | b70136da02d6b689fdc3ce7e946aeff87841cb46 | 5,056 |
def share_to_group(request, repo, group, permission):
"""Share repo to group with given permission.
"""
repo_id = repo.id
group_id = group.id
from_user = request.user.username
if is_org_context(request):
org_id = request.user.org.org_id
group_repo_ids = seafile_api.get_org_group_repoids(org_id, group.id)
else:
group_repo_ids = seafile_api.get_group_repoids(group.id)
if repo.id in group_repo_ids:
return False
try:
if is_org_context(request):
org_id = request.user.org.org_id
seafile_api.add_org_group_repo(repo_id, org_id, group_id,
from_user, permission)
else:
seafile_api.set_group_repo(repo_id, group_id, from_user,
permission)
return True
except Exception as e:
logger.error(e)
return False | 20cdd294692a71e44a635f5fb9dd8ab3a77f95c4 | 5,057 |
def change_config(python, backend, cheatsheet, asciiart):
"""
Show/update configuration (Python, Backend, Cheatsheet, ASCIIART).
"""
asciiart_file = "suppress_asciiart"
cheatsheet_file = "suppress_cheatsheet"
python_file = 'PYTHON_MAJOR_MINOR_VERSION'
backend_file = 'BACKEND'
if asciiart is not None:
if asciiart:
delete_cache(asciiart_file)
console.print('[bright_blue]Enable ASCIIART![/]')
else:
touch_cache_file(asciiart_file)
console.print('[bright_blue]Disable ASCIIART![/]')
if cheatsheet is not None:
if cheatsheet:
delete_cache(cheatsheet_file)
console.print('[bright_blue]Enable Cheatsheet[/]')
elif cheatsheet is not None:
touch_cache_file(cheatsheet_file)
console.print('[bright_blue]Disable Cheatsheet[/]')
if python is not None:
write_to_cache_file(python_file, python)
console.print(f'[bright_blue]Python default value set to: {python}[/]')
if backend is not None:
write_to_cache_file(backend_file, backend)
console.print(f'[bright_blue]Backend default value set to: {backend}[/]')
def get_status(file: str):
return "disabled" if check_if_cache_exists(file) else "enabled"
console.print()
console.print("[bright_blue]Current configuration:[/]")
console.print()
console.print(f"[bright_blue]* Python: {read_from_cache_file(python_file)}[/]")
console.print(f"[bright_blue]* Backend: {read_from_cache_file(backend_file)}[/]")
console.print(f"[bright_blue]* ASCIIART: {get_status(asciiart_file)}[/]")
console.print(f"[bright_blue]* Cheatsheet: {get_status(cheatsheet_file)}[/]")
console.print() | b76f40546981c66f8b068c12fa1c1701b532ee7f | 5,058 |
def get_memcached_usage(socket=None):
"""
Returns memcached statistics.
:param socket: Path to memcached's socket file.
"""
cmd = 'echo \'stats\' | nc -U {0}'.format(socket)
output = getoutput(cmd)
curr_items = None
bytes_ = None
rows = output.split('\n')[:-1]
for row in rows:
row = row.split()
if row[1] == 'curr_items':
curr_items = int(row[2])
if row[1] == 'bytes':
bytes_ = int(row[2])
return (bytes_, curr_items) | fcabd77bbf0186498753a4630c50ed7fd900cf96 | 5,059 |
def dataset_config():
"""Return a DatasetConfig for testing."""
return hubs.DatasetConfig(factory=Dataset, flag=True) | 15d8c33e5706c07c03589adb945bdaee3b1dd18a | 5,060 |
import itertools
def combinations():
"""Produce all the combinations for different items."""
combined = itertools.combinations('ABC', r=2)
combined = [''.join(possibility) for possibility in combined]
return combined | 501060cf9c7de9b4b4453940e017ad30cec2f84f | 5,061 |
def geo_exps_MD(n_nodes, radius, l_0, l_1, K=40, thinRatio=1,
gammas=10, max_iter=100, nSamp=50, Niid=1, seed=0):
"""Solves the Connected Subgraph Detection problem and calculates AUC using
Mirror Descent Optimisation for a random geometric graph.
Parameters
----------
n_nodes : int
Number of nodes for the random graph.
radius : float
Distance threshold value.
l_0 : float
Base rate.
l_1 : float
Anomalous rate.
K : int
Anomaly size.
thinRatio : float
Ratio of max semi axis length to min semi axis length. Determines if graph is an ellipsoid or a sphere.
gammas : int or np.array
Conductance rates.
max_iter : int
Number of iterations.
nSamp : int
Number of samples.
Niid : int
Number of iid runs.
seed : int
Random seed.
Returns
-------
scores_noise : np.array
List of shape (nSamp, gammas_n) with AUC scores of optimisation.
"""
graph = Geo_graph_3d(n_nodes=n_nodes, radius=radius, seed=seed)
A, pts = graph.Adj, graph.pos_array
if type(gammas) == int:
gammas = np.logspace(-3, np.log10(2), gammas)
gammas_n = gammas.shape[0]
yy, S = genMeasurements(pts, K, l_0, l_1, nSamp, thinRatio)
s = S[0]
scores_noise = np.zeros((Niid, nSamp, gammas_n), dtype='float32')
for niid in range(Niid):
print('No of iid run: {}'.format(niid+1))
scores = np.zeros((nSamp, gammas_n))
with trange(nSamp, ncols=100) as tqdm:
for ns in tqdm:
ys = yy[:,ns]
c = ys / np.linalg.norm(ys) * np.sqrt(ys.shape[0])
C = c.reshape(-1,1) @ c.reshape(1,-1)
for gind in range(gammas_n):
tqdm.set_description('MD || Run = {} gamma = {:2f}'.format(niid+1, gammas[gind]))
M = runOpt_md(A=A, C=C, gamma=gammas[gind], s=s, max_iter=max_iter)
scores[ns, gind] = np.trace(ys.reshape(-1,1) @ ys.reshape(1,-1) @ M)
tqdm.set_postfix(Loss='{:8f}'.format(np.trace(C.T @ M)))
scores_noise[niid] = scores
return scores_noise.mean(0) | 9e3831975915b6dffecbb1142dbd01cd26a255ca | 5,063 |
from typing import Tuple
def validate_sig_integrity(signer_info: cms.SignedData,
cert: x509.Certificate,
expected_content_type: str,
actual_digest: bytes) -> Tuple[bool, bool]:
"""
Validate the integrity of a signature for a particular signerInfo object
inside a CMS signed data container.
.. warning::
This function does not do any trust checks, and is considered
"dangerous" API because it is easy to misuse.
:param signer_info:
A :class:`cms.SignerInfo` object.
:param cert:
The signer's certificate.
.. note::
This function will not attempt to extract certificates from
the signed data.
:param expected_content_type:
The expected value for the content type attribute (as a Python string,
see :class:`cms.ContentType`).
:param actual_digest:
The actual digest to be matched to the message digest attribute.
:return:
A tuple of two booleans. The first indicates whether the provided
digest matches the value in the signed attributes.
The second indicates whether the signature of the digest is valid.
"""
signature_algorithm: cms.SignedDigestAlgorithm = \
signer_info['signature_algorithm']
digest_algorithm_obj = signer_info['digest_algorithm']
md_algorithm = digest_algorithm_obj['algorithm'].native
signature = signer_info['signature'].native
# signed_attrs comes with some context-specific tagging.
# We need to re-tag it with a universal SET OF tag.
signed_attrs = signer_info['signed_attrs'].untag()
if not signed_attrs:
embedded_digest = None
prehashed = True
signed_data = actual_digest
else:
prehashed = False
# check the CMSAlgorithmProtection attr, if present
try:
cms_algid_protection, = find_cms_attribute(
signed_attrs, 'cms_algorithm_protection'
)
signed_digest_algorithm = \
cms_algid_protection['digest_algorithm'].native
if signed_digest_algorithm != digest_algorithm_obj.native:
raise SignatureValidationError(
"Digest algorithm does not match CMS algorithm protection "
"attribute."
)
signed_sig_algorithm = \
cms_algid_protection['signature_algorithm'].native
if signed_sig_algorithm is None:
raise SignatureValidationError(
"CMS algorithm protection attribute not valid for signed "
"data"
)
elif signed_sig_algorithm != signature_algorithm.native:
raise SignatureValidationError(
"Signature mechanism does not match CMS algorithm "
"protection attribute."
)
except KeyError:
pass
except SignatureValidationError:
raise
except ValueError:
raise SignatureValidationError(
'Multiple CMS protection attributes present'
)
try:
content_type, = find_cms_attribute(signed_attrs, 'content_type')
content_type = content_type.native
if content_type != expected_content_type:
raise SignatureValidationError(
f'Content type {content_type} did not match expected value '
f'{expected_content_type}'
)
except SignatureValidationError:
raise
except (KeyError, ValueError):
raise SignatureValidationError(
'Content type not found in signature, or multiple content-type '
'attributes present.'
)
try:
embedded_digest, = find_cms_attribute(
signed_attrs, 'message_digest'
)
embedded_digest = embedded_digest.native
except (KeyError, ValueError):
raise SignatureValidationError(
'Message digest not found in signature, or multiple message '
'digest attributes present.'
)
signed_data = signed_attrs.dump()
try:
_validate_raw(
signature, signed_data, cert, signature_algorithm, md_algorithm,
prehashed=prehashed
)
valid = True
except InvalidSignature:
valid = False
intact = (
actual_digest == embedded_digest
if embedded_digest is not None else valid
)
return intact, valid | 36e64173d8612c9ca3e95cb0566222140c56c17d | 5,064 |
def linemod_dpt(path):
"""
read a depth image
@return uint16 image of distance in [mm]"""
dpt = open(path, "rb")
rows = np.frombuffer(dpt.read(4), dtype=np.int32)[0]
cols = np.frombuffer(dpt.read(4), dtype=np.int32)[0]
return (np.fromfile(dpt, dtype=np.uint16).reshape((rows, cols)) / 1000.).astype(np.float32) | e2538520ba3bd82ada339b816c4d1a067bbd4000 | 5,065 |
from typing import Iterable
from typing import Optional
def findNode(nodes: Iterable[AstNode], name: str) -> Optional[SExpr]:
"""
Finds a node with given name in a list of nodes
"""
for node in nodes:
if isinstance(node, Atom):
continue
if len(node.items) == 0:
continue
nameNode = node.items[0]
if isinstance(nameNode, Atom) and nameNode.value == name:
return node
return None | 5b3f53e98269e6d00cb2dc11dd75d81dfed98f30 | 5,066 |
def search(keywords=None, servicetype=None, waveband=None):
"""
execute a simple query to the RegTAP registry.
Parameters
----------
keywords : list of str
keyword terms to match to registry records.
Use this parameter to find resources related to a
particular topic.
servicetype : str
the service type to restrict results to.
Allowed values include
'conesearch',
'sia' ,
'ssa',
'slap',
'tap'
waveband : str
the name of a desired waveband; resources returned
will be restricted to those that indicate as having
data in that waveband. Allowed values include
'radio',
'millimeter',
'infrared',
'optical',
'uv',
'euv',
'x-ray'
'gamma-ray'
Returns
-------
RegistryResults
a container holding a table of matching resource (e.g. services)
See Also
--------
RegistryResults
"""
if not any((keywords, servicetype, waveband)):
raise dalq.DALQueryError(
"No search parameters passed to registry search")
joins = set(["rr.interface"])
joins = set(["rr.resource"])
wheres = list()
if keywords:
joins.add("rr.res_subject")
joins.add("rr.resource")
wheres.extend(["({})".format(" AND ".join("""
(
1=ivo_nocasematch(res_subject, '%{0}%') OR
1=ivo_hasword(res_description, '{0}') OR
1=ivo_hasword(res_title, '{0}')
)""".format(tap.escape(keyword)) for keyword in keywords
))])
if servicetype:
servicetype = _service_type_map.get(servicetype, servicetype)
joins.add("rr.interface")
wheres.append("standard_id LIKE 'ivo://ivoa.net/std/{}%'".format(
tap.escape(servicetype)))
wheres.append("intf_type = 'vs:paramhttp'")
else:
wheres.append("""(
standard_id LIKE 'ivo://ivoa.net/std/conesearch%' OR
standard_id LIKE 'ivo://ivoa.net/std/sia%' OR
standard_id LIKE 'ivo://ivoa.net/std/ssa%' OR
standard_id LIKE 'ivo://ivoa.net/std/slap%' OR
standard_id LIKE 'ivo://ivoa.net/std/tap%'
)""")
if waveband:
joins.add("rr.resource")
wheres.append("1 = ivo_hashlist_has('{}', waveband)".format(
tap.escape(waveband)))
query = """SELECT DISTINCT rr.interface.*, rr.capability.*, rr.resource.*
FROM rr.capability
{}
{}
""".format(
''.join("NATURAL JOIN {} ".format(j) for j in joins),
("WHERE " if wheres else "") + " AND ".join(wheres)
)
service = tap.TAPService(REGISTRY_BASEURL)
query = tap.TAPQuery(service.baseurl, query, maxrec=service.hardlimit)
query.RESULTS_CLASS = RegistryResults
return query.execute() | 855e00f2a001995de40beddb6334bdd8ddb8be77 | 5,067 |
def all_are_independent_from_all(program, xs, ys):
"""
Returns true iff all xs are statistially independent from all ys, where the xs are from the current iteration
and the ys are from the previous iteration.
"""
for x in xs:
if not is_independent_from_all(program, x, ys):
return False
return True | 50f091530e322b741465b222da70080463e4f142 | 5,069 |
def is_str_str_dict(x):
"""Tests if something is a str:str dictionary"""
return isinstance(x, dict) and all(
isinstance(k, str) and isinstance(v, str) for k, v in x.items()
) | ce6230714c0526764f2cc67e4dedf598acd28169 | 5,071 |
def _ensureListLike(item):
"""
Return the item if it is a list or tuple, otherwise add it to a list and
return that.
"""
return item if (isinstance(item, list) or isinstance(item, tuple)) \
else [item] | 1c602a1fcf8dd6a5b4583264e63e38747f5b0d50 | 5,072 |
import io
def get_file_from_gitlab(gitpkg, path, ref="master"):
"""Retrieves a file from a Gitlab repository, returns a (StringIO) file."""
return io.StringIO(gitpkg.files.get(file_path=path, ref=ref).decode()) | 7eccad01a538bdd99651b0792aff150f73e82cdd | 5,073 |
def tsne(x, no_dims=2, initial_dims=50, perplexity=30.0, max_iter=1000):
"""Runs t-SNE on the dataset in the NxD array x
to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(x, no_dims, perplexity),
where x is an NxD NumPy array.
"""
# Check inputs
if isinstance(no_dims, float):
print("Error: array x should have type float.")
return -1
if round(no_dims) != no_dims:
print("Error: number of dimensions should be an integer.")
return -1
# 初始化参数和变量
x = pca(x, initial_dims).real
(n, d) = x.shape
initial_momentum = 0.5
final_momentum = 0.8
eta = 500
min_gain = 0.01
y = np.random.randn(n, no_dims)
dy = np.zeros((n, no_dims))
iy = np.zeros((n, no_dims))
gains = np.ones((n, no_dims))
# 对称化
P = seach_prob(x, 1e-5, perplexity)
P = P + np.transpose(P)
P = P / np.sum(P)
# early exaggeration
P = P * 4
P = np.maximum(P, 1e-12)
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_y = np.sum(np.square(y), 1)
num = 1 / (1 + np.add(np.add(-2 * np.dot(y, y.T), sum_y).T, sum_y))
num[range(n), range(n)] = 0
Q = num / np.sum(num)
Q = np.maximum(Q, 1e-12)
# Compute gradient
PQ = P - Q
for i in range(n):
dy[i,:] = np.sum(np.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (y[i,:] - y), 0)
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dy > 0) != (iy > 0)) + (gains * 0.8) * ((dy > 0) == (iy > 0))
gains[gains < min_gain] = min_gain
iy = momentum * iy - eta * (gains * dy)
y = y + iy
y = y - np.tile(np.mean(y, 0), (n, 1))
# Compute current value of cost function
if (iter + 1) % 100 == 0:
if iter > 100:
C = np.sum(P * np.log(P / Q))
else:
C = np.sum( P/4 * np.log( P/4 / Q))
print("Iteration ", (iter + 1), ": error is ", C)
# Stop lying about P-values
if iter == 100:
P = P / 4
print("finished training!")
return y | 348c83048190830dd10982e9fa1426db06e983fc | 5,074 |
def add_corp():
"""
添加投顾信息页面,可以让用户手动添加投顾
:by zhoushaobo
:return:
"""
if request.method == 'GET':
fof_list = cache.get(str(current_user.id))
return render_template("add_corp.html", fof_list=fof_list)
if request.method == 'POST':
name = request.form['name']
alias = request.form['alias']
register_capital = request.form['register_capital']
status = request.form['status']
site = request.form['site']
desc = request.form['description']
corp = Invest_corp(name=name, alias=alias, review_status=int(status), address=site, description=desc,
registered_capital=register_capital)
db.session.add(corp)
db.session.commit()
return redirect(url_for('f_app.invest_corp')) | 9d70ac010bdf5a3102635eaf1acf75f43689f82e | 5,075 |
def nll_loss(output: Tensor, target: Tensor):
"""
Negative log likelihood loss function.
## Parameters
output: `Tensor` - model's prediction
target: `Target` - training sample targets
## Example usage
```python
from beacon.tensor import Tensor
from beacon.functional import functions as F
output = Tensor([[0.2, 0.7, 0.1], [0.4, 0.45, 0.15]], requires_grad=True)
target = Tensor([[0, 1, 0], [1, 0, 0]], requires_grad=True)
loss = F.nll_loss(output, target)
```
"""
output, target = fn.to_tensor(output), fn.to_tensor(target)
output = fn.clip(output, 1e-7, 1 - 1e-7)
return -target * fn.log(output) | 339ef1300c42ad6923e044e7011615b934923e23 | 5,076 |
from typing import List
from typing import Optional
async def album_upload(sessionid: str = Form(...),
files: List[UploadFile] = File(...),
caption: str = Form(...),
usertags: Optional[List[Usertag]] = Form([]),
location: Optional[Location] = Form(None),
clients: ClientStorage = Depends(get_clients)
) -> Media:
"""Upload album to feed
"""
cl = clients.get(sessionid)
return await album_upload_post(
cl, files, caption=caption,
usertags=usertags,
location=location) | 303aba7ee57e61082197fe18330663c5c0c51c76 | 5,077 |
def mocked_requests_get(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
if str(args[0]).startswith('https://api.ring.com/clients_api/session'):
return MockResponse({
"profile": {
"authentication_token": "12345678910",
"email": "[email protected]",
"features": {
"chime_dnd_enabled": False,
"chime_pro_enabled": True,
"delete_all_enabled": True,
"delete_all_settings_enabled": False,
"device_health_alerts_enabled": True,
"floodlight_cam_enabled": True,
"live_view_settings_enabled": True,
"lpd_enabled": True,
"lpd_motion_announcement_enabled": False,
"multiple_calls_enabled": True,
"multiple_delete_enabled": True,
"nw_enabled": True,
"nw_larger_area_enabled": False,
"nw_user_activated": False,
"owner_proactive_snoozing_enabled": True,
"power_cable_enabled": False,
"proactive_snoozing_enabled": False,
"reactive_snoozing_enabled": False,
"remote_logging_format_storing": False,
"remote_logging_level": 1,
"ringplus_enabled": True,
"starred_events_enabled": True,
"stickupcam_setup_enabled": True,
"subscriptions_enabled": True,
"ujet_enabled": False,
"video_search_enabled": False,
"vod_enabled": False},
"first_name": "Home",
"id": 999999,
"last_name": "Assistant"}
}, 201)
elif str(args[0])\
.startswith("https://api.ring.com/clients_api/ring_devices"):
return MockResponse({
"authorized_doorbots": [],
"chimes": [
{
"address": "123 Main St",
"alerts": {"connection": "online"},
"description": "Downstairs",
"device_id": "abcdef123",
"do_not_disturb": {"seconds_left": 0},
"features": {"ringtones_enabled": True},
"firmware_version": "1.2.3",
"id": 999999,
"kind": "chime",
"latitude": 12.000000,
"longitude": -70.12345,
"owned": True,
"owner": {
"email": "[email protected]",
"first_name": "Marcelo",
"id": 999999,
"last_name": "Assistant"},
"settings": {
"ding_audio_id": None,
"ding_audio_user_id": None,
"motion_audio_id": None,
"motion_audio_user_id": None,
"volume": 2},
"time_zone": "America/New_York"}],
"doorbots": [
{
"address": "123 Main St",
"alerts": {"connection": "online"},
"battery_life": 4081,
"description": "Front Door",
"device_id": "aacdef123",
"external_connection": False,
"features": {
"advanced_motion_enabled": False,
"motion_message_enabled": False,
"motions_enabled": True,
"people_only_enabled": False,
"shadow_correction_enabled": False,
"show_recordings": True},
"firmware_version": "1.4.26",
"id": 987652,
"kind": "lpd_v1",
"latitude": 12.000000,
"longitude": -70.12345,
"motion_snooze": None,
"owned": True,
"owner": {
"email": "[email protected]",
"first_name": "Home",
"id": 999999,
"last_name": "Assistant"},
"settings": {
"chime_settings": {
"duration": 3,
"enable": True,
"type": 0},
"doorbell_volume": 1,
"enable_vod": True,
"live_view_preset_profile": "highest",
"live_view_presets": [
"low",
"middle",
"high",
"highest"],
"motion_announcement": False,
"motion_snooze_preset_profile": "low",
"motion_snooze_presets": [
"none",
"low",
"medium",
"high"]},
"subscribed": True,
"subscribed_motions": True,
"time_zone": "America/New_York"}]
}, 200)
elif str(args[0]).startswith("https://api.ring.com/clients_api/doorbots"):
return MockResponse([{
"answered": False,
"created_at": "2017-03-05T15:03:40.000Z",
"events": [],
"favorite": False,
"id": 987654321,
"kind": "motion",
"recording": {"status": "ready"},
"snapshot_url": ""
}], 200) | 41a54452593cd23e8ea86f1fbdc0c5e92845482f | 5,078 |
def count_disordered(arr, size):
"""Counts the number of items that are out of the expected
order (monotonous increase) in the given list."""
counter = 0
state = {
"expected": next(item for item in range(size) if item in arr),
"checked": []
}
def advance_state():
state["expected"] += 1
while True:
in_arr = state["expected"] in arr
is_overflow = state["expected"] > size
not_checked = state["expected"] not in state["checked"]
if not_checked and (in_arr or is_overflow):
return
state["expected"] += 1
for val in arr:
if val == state["expected"]:
advance_state()
else:
counter += 1
state["checked"].append(val)
return counter | bb708e7d862ea55e81207cd7ee85e634675b3992 | 5,079 |
def test_alignment():
"""Ensure A.M. cosine's peaks are aligned across joint slices."""
if skip_all:
return None if run_without_pytest else pytest.skip()
N = 1025
J = 7
Q = 16
Q_fr = 2
F = 4
# generate A.M. cosine ###################################################
f1, f2 = 8, 256
t = np.linspace(0, 1, N, 1)
a = (np.cos(2*np.pi * f1 * t) + 1) / 2
c = np.cos(2*np.pi * f2 * t)
x = a * c
# scatter ################################################################
for out_3D in (True, False):
for sampling_psi_fr in ('resample', 'exclude'):
if sampling_psi_fr == 'exclude' and out_3D:
continue # incompatible
for J_fr in (3, 5):
out_type = ('dict:array' if out_3D else
'dict:list') # for convenience
test_params = dict(out_3D=out_3D,
sampling_filters_fr=(sampling_psi_fr, 'resample'))
test_params_str = '\n'.join(f'{k}={v}' for k, v in
test_params.items())
jtfs = TimeFrequencyScattering1D(
J, N, Q, J_fr=J_fr, Q_fr=Q_fr, F=F, average=True, average_fr=True,
aligned=True, out_type=out_type, frontend=default_backend,
pad_mode='zero', pad_mode_fr='zero', **pad_kw, **test_params)
Scx = jtfs(x)
Scx = drop_batch_dim_jtfs(Scx)
Scx = jtfs_to_numpy(Scx)
# assert peaks share an index #################################
def max_row_idx(c):
coef = c['coef'] if 'list' in out_type else c
return np.argmax(np.sum(coef**2, axis=-1))
first_coef = Scx['psi_t * psi_f_up'][0]
mx_idx = max_row_idx(first_coef)
for pair in Scx:
if pair in ('S0', 'S1'): # joint only
continue
for i, c in enumerate(Scx[pair]):
mx_idx_i = max_row_idx(c)
assert abs(mx_idx_i - mx_idx) < 2, (
"{} != {} -- Scx[{}][{}]\n{}").format(
mx_idx_i, mx_idx, pair, i, test_params_str)
if J_fr == 3:
# assert not all J_pad_frs are same so test covers this case
assert_pad_difference(jtfs, test_params_str) | a0f664a153c1af5942d39d54c75bcb8a3b3b660a | 5,081 |
import base64
def request_text(photo_file, max_results=5):
"""
Request the Google service to find text in an image
:param photo_file: The filename (or path) of the image in a local directory
:param max_results: The requested maximum number of results
:return: A list of text entries found in the image
Note: The argument max_results does not modify the number of results for text detection
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open(photo_file, 'rb') as phf:
image_content = base64.b64encode(phf.read())
service_request = service.images().annotate(body={
'requests': [{'image': {'content': image_content.decode('UTF-8')},
'features': [{'type': 'TEXT_DETECTION', 'maxResults': max_results}]
}]
})
response = service_request.execute()
text_list = response['responses'][0].get('textAnnotations', None)
if text_list is None:
return []
else:
text_vec = map(lambda s: s['description'].strip().strip('\n'), text_list)
return text_vec | 3af646e81fb71f89ffab2a9f20f979cdbaaf29a6 | 5,082 |
def config(request):
"""render a ProsperConfig object for testing"""
return p_config.ProsperConfig(request.config.getini('app_cfg')) | 4222d7d2a56020883e0a196f4c531b44d2f50dd5 | 5,083 |
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need
to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
If the value passed is None writes only the attribute (eg. required)
"""
ret_arr = []
for k, v in attrs.items():
if v is None:
ret_arr.append(u' %s' % k)
else:
ret_arr.append(u' %s="%s"' % (k, conditional_escape(v)))
return u''.join(ret_arr) | 03175fd9b2d0574bd3634d43821e38177924ed0e | 5,084 |
import json
def json_io_dump(filename, data):
""" Dumps the the JSON data and returns it as a dictionary from filename
:arg filename <string> - Filename of json to point to
:arg data - The already formatted data to dump to JSON
"""
with open(filename, encoding='utf-8', mode='w') as json_file:
json.dump(data, json_file)
return True | e0ae7187ac29669330109ae39ebcac33c1e30ab6 | 5,085 |
import requests
import json
def get_restaurants(_lat, _lng):
"""緯度: lat 経度: lng"""
response = requests.get(URL.format(API_KEY, _lat, _lng))
result = json.loads(response.text)
lat_lng = []
for restaurant in result['results']['shop']:
lat = float(restaurant['lat'])
lng = float(restaurant['lng'])
lat_lng.append((lat, lng, restaurant['name']))
r = []
for lat, lng, name in lat_lng:
r2 = []
difference = (_lat - lat) * 3600
r2.append(int(difference * byou))
difference = (lng - _lng) * 3600
r2.append(int(difference * byou))
r2.append(name)
r.append(r2)
return r | 35884258210174cca0ffcf73dc3451dae07d5712 | 5,086 |
def geomfill_GetCircle(*args):
"""
:param TConv:
:type TConv: Convert_ParameterisationType
:param ns1:
:type ns1: gp_Vec
:param ns2:
:type ns2: gp_Vec
:param nplan:
:type nplan: gp_Vec
:param pt1:
:type pt1: gp_Pnt
:param pt2:
:type pt2: gp_Pnt
:param Rayon:
:type Rayon: float
:param Center:
:type Center: gp_Pnt
:param Poles:
:type Poles: TColgp_Array1OfPnt
:param Weigths:
:type Weigths: TColStd_Array1OfReal &
:rtype: void
:param TConv:
:type TConv: Convert_ParameterisationType
:param ns1:
:type ns1: gp_Vec
:param ns2:
:type ns2: gp_Vec
:param dn1w:
:type dn1w: gp_Vec
:param dn2w:
:type dn2w: gp_Vec
:param nplan:
:type nplan: gp_Vec
:param dnplan:
:type dnplan: gp_Vec
:param pts1:
:type pts1: gp_Pnt
:param pts2:
:type pts2: gp_Pnt
:param tang1:
:type tang1: gp_Vec
:param tang2:
:type tang2: gp_Vec
:param Rayon:
:type Rayon: float
:param DRayon:
:type DRayon: float
:param Center:
:type Center: gp_Pnt
:param DCenter:
:type DCenter: gp_Vec
:param Poles:
:type Poles: TColgp_Array1OfPnt
:param DPoles:
:type DPoles: TColgp_Array1OfVec
:param Weigths:
:type Weigths: TColStd_Array1OfReal &
:param DWeigths:
:type DWeigths: TColStd_Array1OfReal &
:rtype: bool
:param TConv:
:type TConv: Convert_ParameterisationType
:param ns1:
:type ns1: gp_Vec
:param ns2:
:type ns2: gp_Vec
:param dn1w:
:type dn1w: gp_Vec
:param dn2w:
:type dn2w: gp_Vec
:param d2n1w:
:type d2n1w: gp_Vec
:param d2n2w:
:type d2n2w: gp_Vec
:param nplan:
:type nplan: gp_Vec
:param dnplan:
:type dnplan: gp_Vec
:param d2nplan:
:type d2nplan: gp_Vec
:param pts1:
:type pts1: gp_Pnt
:param pts2:
:type pts2: gp_Pnt
:param tang1:
:type tang1: gp_Vec
:param tang2:
:type tang2: gp_Vec
:param Dtang1:
:type Dtang1: gp_Vec
:param Dtang2:
:type Dtang2: gp_Vec
:param Rayon:
:type Rayon: float
:param DRayon:
:type DRayon: float
:param D2Rayon:
:type D2Rayon: float
:param Center:
:type Center: gp_Pnt
:param DCenter:
:type DCenter: gp_Vec
:param D2Center:
:type D2Center: gp_Vec
:param Poles:
:type Poles: TColgp_Array1OfPnt
:param DPoles:
:type DPoles: TColgp_Array1OfVec
:param D2Poles:
:type D2Poles: TColgp_Array1OfVec
:param Weigths:
:type Weigths: TColStd_Array1OfReal &
:param DWeigths:
:type DWeigths: TColStd_Array1OfReal &
:param D2Weigths:
:type D2Weigths: TColStd_Array1OfReal &
:rtype: bool
"""
return _GeomFill.geomfill_GetCircle(*args) | f00ade1b203e819c6ae946c31c8c9821f2a79744 | 5,087 |
def dwconv3x3_block(in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bias=False,
activation=(lambda: nn.ReLU(inplace=True)),
activate=True):
"""
3x3 depthwise version of the standard convolution block with ReLU6 activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
return conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
activation=activation,
activate=activate) | eb2330206510369f8d81d0fc58d2578cf212a1df | 5,088 |
import json
def predict() -> str:
"""
Creates route for model prediction for given number of inputs.
:return: predicted price
"""
try:
input_params = process_input(request.data)
print(input_params)
predictions = regressor.predict(input_params)
return json.dumps({"predicted_price": predictions.tolist()})
except (KeyError, json.JSONDecodeError, AssertionError):
return json.dumps({"error": "CHECK INPUT"}), 400
except:
return json.dumps({"error": "PREDICTION FAILED"}), 500 | ee58cbecf6d44a65f94cb3becd4d6cfe2d30ef30 | 5,089 |
def all_h2h_pairs_all_lanes(matches_df, file_name=''):
"""Produces all head to head win rates for all lane matchups -- even across different lanes
(eg. TOP_SOLO Renekton vs MID_SOLO Xerath)."""
df = pd.DataFrame()
lanes = dc.get_lanes_roles()
for lane1 in lanes:
print(lane1)
for lane2 in lanes:
print(lane1 + '_' + lane2)
temp = all_h2h_pairs_fixed_lane(matches_df, lane1, lane2)
df[lane1 + '_' + lane2 + '_wr'] = temp['win_rate']
df[lane1 + '_' + lane2 + '_gp'] = temp['games_played']
df[lane1 + '_' + lane2 + '_wins'] = temp['wins']
if file_name != '':
df.to_csv(file_name)
return df | f6ffd40985455515767c1aa6286dc9998b7bdb7d | 5,090 |
import requests
def reload_rules(testcase, rest_url):
"""
:param TestCase self: TestCase object
:param str rest_url: http://host:port
:rtype: dict
"""
resp = requests.get(rest_url + "/rest/reload").json()
print("Reload rules response: {}".format(resp))
testcase.assertEqual(resp.get("success"), True)
return resp | e747668ba8ad5f58f0307194b0008469dd3593c1 | 5,091 |
def encryptMessage(key: str, message: str) -> str:
"""Vigenère cipher encryption
Wrapper function that encrypts given message with given key using the Vigenère cipher.
Args:
key: String encryption key to encrypt with Vigenère cipher.
message: Message string to encrypt.
Returns:
Encrypted message string.
"""
return translateMessage(key, message, 'encrypt') | 428372d8443579ac691d43a5542de850b49966ce | 5,092 |
def rae(label, pred):
"""computes the relative absolute error
(condensed using standard deviation formula)"""
#compute the root of the sum of the squared error
numerator = np.mean(np.abs(label - pred), axis=None)
#numerator = np.sum(np.abs(label - pred), axis = None)
#compute AE if we were to simply predict the average of the previous values
denominator = np.mean(np.abs(label - np.mean(label, axis=None)), axis=None)
#denominator = np.sum(np.abs(label - np.mean(label, axis = None)), axis=None)
return numerator / denominator | bff280ba243fd494347643233870524c008c7473 | 5,093 |
def subset_sum(arr, target_sum, i, cache):
"""
Returns whether any subset(not contiguous) of the array has sum equal to target sum.
"""
if target_sum == 0:
return True, {}
if i < 0:
return False, {}
if target_sum in cache[i]:
return cache[i][target_sum]
# Either include this element or not!
sub_ans, sub_ans_indices = subset_sum(arr, target_sum, i - 1, cache)
if not sub_ans and target_sum >= arr[i]:
sub_ans, sub_ans_indices = subset_sum(arr, target_sum - arr[i], i - 1, cache)
sub_ans_indices = set(sub_ans_indices)
sub_ans_indices.add(i)
if not sub_ans:
sub_ans_indices = {}
cache[i][target_sum] = sub_ans, sub_ans_indices
return cache[i][target_sum] | aa90d7eb4ffa3a457a5f27733de56a82df450861 | 5,094 |
from typing import Type
import types
from typing import Optional
def set_runtime_parameter_pb(
pb: pipeline_pb2.RuntimeParameter,
name: Text,
ptype: Type[types.Property],
default_value: Optional[types.Property] = None
) -> pipeline_pb2.RuntimeParameter:
"""Helper function to fill a RuntimeParameter proto.
Args:
pb: A RuntimeParameter proto to be filled in.
name: Name to be set at pb.name.
ptype: The Python type to be set at pb.type.
default_value: Optional. If provided, it will be pb.default_value.
Returns:
A RuntimeParameter proto filled with provided values.
"""
pb.name = name
if ptype == int:
pb.type = pipeline_pb2.RuntimeParameter.Type.INT
if default_value:
pb.default_value.int_value = default_value
elif ptype == float:
pb.type = pipeline_pb2.RuntimeParameter.Type.DOUBLE
if default_value:
pb.default_value.double_value = default_value
elif ptype == str:
pb.type = pipeline_pb2.RuntimeParameter.Type.STRING
if default_value:
pb.default_value.string_value = default_value
else:
raise ValueError("Got unsupported runtime parameter type: {}".format(ptype))
return pb | 4c6394f60774c42b0a6be8d55b57a67b8fc6b1d5 | 5,095 |
def get_loader(content_type):
"""Returns loader class for specified content type.
:type content_type: constants.ContentType
:param content_type: Content type.
:returns: Loader class for specified content type.
:raise ValueError: If no loader found for specified content type.
"""
for loader_cls in ALL_LOADERS:
content_types = loader_cls.content_types
if not isinstance(loader_cls.content_types, (list, tuple)):
content_types = [content_types]
if content_type in content_types:
return loader_cls
raise ValueError('Loader for content type "{0}" not found'
.format(content_type)) | 0d7e37ff17a48e8bed3a4abb7ce9734579fe9100 | 5,096 |
from typing import List
from typing import Dict
import torch
def get_basis_script(max_degree: int,
use_pad_trick: bool,
spherical_harmonics: List[Tensor],
clebsch_gordon: List[List[Tensor]],
amp: bool) -> Dict[str, Tensor]:
"""
Compute pairwise bases matrices for degrees up to max_degree
:param max_degree: Maximum input or output degree
:param use_pad_trick: Pad some of the odd dimensions for a better use of Tensor Cores
:param spherical_harmonics: List of computed spherical harmonics
:param clebsch_gordon: List of computed CB-coefficients
:param amp: When true, return bases in FP16 precision
"""
basis = {}
idx = 0
# Double for loop instead of product() because of JIT script
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
key = f'{d_in},{d_out}'
K_Js = []
for freq_idx, J in enumerate(range(abs(d_in - d_out), d_in + d_out + 1)):
Q_J = clebsch_gordon[idx][freq_idx]
K_Js.append(torch.einsum('n f, k l f -> n l k', spherical_harmonics[J].float(), Q_J.float()))
basis[key] = torch.stack(K_Js, 2) # Stack on second dim so order is n l f k
if amp:
basis[key] = basis[key].half()
if use_pad_trick:
basis[key] = F.pad(basis[key], (0, 1)) # Pad the k dimension, that can be sliced later
idx += 1
return basis | 9afbe8973541b8b1562f2d336d13b19dae9245fc | 5,097 |
def get_iterative_process_for_minimal_sum_example():
"""Returns an iterative process for a sum example.
This iterative process contains the fewest components required to compile to
`forms.MapReduceForm`.
"""
@computations.federated_computation
def init_fn():
"""The `init` function for `tff.templates.IterativeProcess`."""
zero = computations.tf_computation(lambda: [0, 0])
return intrinsics.federated_eval(zero, placements.SERVER)
@computations.tf_computation(tf.int32)
def work(client_data):
del client_data # Unused
return 1, 1
@computations.federated_computation([
computation_types.FederatedType([tf.int32, tf.int32], placements.SERVER),
computation_types.FederatedType(tf.int32, placements.CLIENTS),
])
def next_fn(server_state, client_data):
"""The `next` function for `tff.templates.IterativeProcess`."""
del server_state # Unused
# No call to `federated_map` with prepare.
# No call to `federated_broadcast`.
client_updates = intrinsics.federated_map(work, client_data)
unsecure_update = intrinsics.federated_sum(client_updates[0])
secure_update = intrinsics.federated_secure_sum_bitwidth(
client_updates[1], 8)
new_server_state = intrinsics.federated_zip(
[unsecure_update, secure_update])
# No call to `federated_map` with an `update` function.
server_output = intrinsics.federated_value([], placements.SERVER)
return new_server_state, server_output
return iterative_process.IterativeProcess(init_fn, next_fn) | 40ea1b07f2eeccaaff3cc0657207ef445985f795 | 5,098 |
def get_r_port_p_d_t(p):
"""玄関ポーチに設置された照明設備の使用時間率
Args:
p(int): 居住人数
Returns:
ndarray: r_port_p_d_t 日付dの時刻tにおける居住人数がp人の場合の玄関ポーチに設置された照明設備の使用時間率
"""
return get_r_i_p_d_t(19, p) | abdc6f9201594ca946ff2deb25cdf8c1e1d98839 | 5,099 |
import re
def remove_comments_from_json(string):
"""
Removes comments from a JSON string, supports // and /* formats. From Stack Overflow.
@param str string: Original text.
@return: Text without comments.
@rtype: str
"""
pattern = r"((?<!\\)\".*?(?<!\\)\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*$)"
# first group captures quoted strings (double or single)
# second group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE | re.DOTALL)
def _replacer(match):
# if the 2nd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(2) is not None:
return "" # so we will return empty to remove the comment
else: # otherwise, we will return the 1st group
return match.group(1) # captured quoted-string
return regex.sub(_replacer, string) | d5629e680bc1890458cbd40ea3f68d9d1629a8d0 | 5,100 |
def visualize_depth(visual_dict, disp_or_error="disp", dataset="KITTI"):
"""visual_dict:"left_img": raw left image
"depth_error" or "disp"
"est_left_img": image reprojected from right
"depth": output depth
"photo_error": photometric error
all tensor should be normalized to [0, 1]befor input with
shape [C, H, W] with .detach()
disp_or_error: output "disp"arity when used in training or "error"
dataset: from "KITTI" "CS"
"""
for k, v, in visual_dict.items():
v = v.unsqueeze(0)
if dataset == "KITTI":
v = F.interpolate(v, [375, 1242], mode="bilinear",
align_corners=False)
elif dataset == "CS":
v = F.interpolate(v, [384, 1000], mode="bilinear",
align_corners=False)
v = v.cpu().squeeze(0).permute(1, 2, 0).numpy()
visual_dict[k] = v
left_img = visual_dict["left_img"] * 255
est_left_img = visual_dict["est_left_img"] * 255
if disp_or_error == "error":
error = visual_dict["depth_error"][..., 0]
normal_error = mpl.colors.Normalize(vmin=0,
vmax=1)
mapper_error = cm.ScalarMappable(norm=normal_error, cmap='coolwarm')
error = (mapper_error.to_rgba(error)[:, :, :3] * 255)
else:
error = visual_dict["disp"] * 255
error = cv.applyColorMap(error.astype(np.uint8),
cv.COLORMAP_OCEAN)
depth = visual_dict["depth"][..., 0]
disp = 1 / depth
vmin = np.percentile(disp, 5)
normal_disp = mpl.colors.Normalize(vmin=vmin, vmax=disp.max())
mapper_disp = cm.ScalarMappable(norm=normal_disp, cmap='magma')
depth_color = (mapper_disp.to_rgba(disp)[:, :, :3] * 255)
photo_error = visual_dict["photo_error"] * 255
photo_error = cv.applyColorMap(photo_error.astype(np.uint8), cv.COLORMAP_JET)
photo_error = cv.cvtColor(photo_error, cv.COLOR_RGB2BGR)
fused_img = (left_img + est_left_img)/2
photoerror_img = left_img + 0.5 * photo_error
photoerror_img = photoerror_img / np.max(photoerror_img)
photoerror_img *= 255
depth_img = left_img + 0.8 * depth_color
depth_img = depth_img / np.max(depth_img)
depth_img *= 255
img1 = np.vstack([left_img, est_left_img, depth_color, photo_error])
img2 = np.vstack([error, fused_img, depth_img, photoerror_img])
all_img = np.hstack([img1, img2]).astype(np.uint8)
all_img = cv.cvtColor(all_img, cv.COLOR_RGB2BGR)
return all_img | 0c1f9bc74e9ff4548e8e1c9f052e14d0c8bc8d4a | 5,101 |
def put_s3_object(bucket, key_name, local_file):
"""Upload a local file in the execution environment to S3
Parameters
----------
bucket: string, required
S3 bucket that will holds the attachment
key_name: string, required
S3 key is the destination of attachment
local_file: string, required
Location of the attachment to process
Returns
-------
boolean (True if successful, False if not successful)
"""
tracer.put_metadata('object', f's3://{bucket}/{key_name}')
try:
s3_resource.Bucket(bucket).upload_file(local_file, key_name)
result = True
tracer.put_annotation('ATTACHMENT_UPLOAD', 'SUCCESS')
except Exception as e:
logger.error(str(e))
tracer.put_annotation('ATTACHMENT_UPLOAD', 'FAILURE')
result = False
return(result) | d566b430541ec22c10e4a173bcd0a53e244ca252 | 5,102 |
import re
def parse_parionssport(url):
"""
Get ParionsSport odds from url
"""
if "parionssport" not in sb.TOKENS:
try:
token = get_parionssport_token()
sb.TOKENS["parionssport"] = token
except OpenSSL.crypto.Error:
return {}
if "paris-" in url.split("/")[-1] and "?" not in url:
sport = url.split("/")[-1].split("paris-")[-1]
return parse_sport_parionssport(sport)
regex = re.findall(r'\d+', url)
if regex:
id_league = regex[-1]
try:
return parse_parionssport_api("p" + str(id_league))
except TypeError:
return {}
return {} | 26cc749d0f951f3785ff322eeacd212bc55d1714 | 5,103 |
def get_index(channel_urls=(), prepend=True, platform=None,
use_local=False, use_cache=False, unknown=False, prefix=False):
"""
Return the index of packages available on the channels
If prepend=False, only the channels passed in as arguments are used.
If platform=None, then the current platform is used.
If prefix is supplied, then the packages installed in that prefix are added.
"""
if use_local:
channel_urls = ['local'] + list(channel_urls)
channel_urls = normalize_urls(channel_urls, platform)
if prepend:
channel_urls.extend(get_channel_urls(platform))
channel_urls = prioritize_channels(channel_urls)
index = fetch_index(channel_urls, use_cache=use_cache, unknown=unknown)
if prefix:
priorities = {c: p for c, p in itervalues(channel_urls)}
maxp = max(itervalues(priorities)) + 1 if priorities else 1
for dist, info in iteritems(install.linked_data(prefix)):
fn = info['fn']
schannel = info['schannel']
prefix = '' if schannel == 'defaults' else schannel + '::'
priority = priorities.get(schannel, maxp)
key = prefix + fn
if key in index:
# Copy the link information so the resolver knows this is installed
index[key] = index[key].copy()
index[key]['link'] = info.get('link') or True
else:
# only if the package in not in the repodata, use local
# conda-meta (with 'depends' defaulting to [])
info.setdefault('depends', [])
info['priority'] = priority
index[key] = info
return index | 176ac7147b6e32133ee07d9e302679294c9b24ce | 5,104 |
def is__invsign1(*args):
"""
is__invsign1(ea) -> bool
"""
return _ida_nalt.is__invsign1(*args) | ee13bc4cd76d134a65c4c71355d1a5449eb27fa4 | 5,105 |
def every(n_steps):
"""Returns True every n_steps, for use as *_at functions in various places."""
return lambda step: step % n_steps == 0 | 02fc6bc59fa6f223b681539baeae32c40bd9577e | 5,106 |
def read_files(mousefile,humanfile):
""" Read into anndata objects and return """
mouse = sc.read_10x_h5(mousefile)
if humanfile != None:
human = sc.read_10x_h5(humanfile)
else:
human = None
return(mouse,human) | fc6bc99160b22c0b4c8c9b48e9f99ee51ca61b62 | 5,107 |
def calc_batch_size(num_examples, batches_per_loop, batch_size):
"""Reduce the batch size if needed to cover all examples without a remainder."""
assert batch_size > 0
assert num_examples % batches_per_loop == 0
while num_examples % (batch_size * batches_per_loop) != 0:
batch_size -= 1
return batch_size | 3c394813a98a8414645f633a519001937247e8b0 | 5,108 |
def upload_volume(request, *args, **kwargs):
"""
User upload volume data, delete the original data first.
"""
if not (request.user and request.user.is_authenticated()):
raise PermissionDenied()
user = request.user
assert 'pid' in kwargs
pid = kwargs['pid']
assert 'pk' in kwargs
id = kwargs['pk']
volume = Volume.objects.get(project__id=pid, id=id)
# Check whether the user is the member of this project
if not check_member_in_project(volume.project, user):
raise PermissionDenied(detail="User {} is not in project {}."
.format(user.username, volume.project.name))
if not request.FILES.get('file'):
raise ParseError(detail="There is no upload file.")
logger.info("User {} upload files to volume {}-{}.".format(
user.username, volume.project.name, volume.name))
filename = get_upload_volume_filename(volume, user)
save_upload_file_to_disk(request.FILES['file'], filename)
client = NFSLocalClient()
volume_dir = get_volume_direction_on_nfs(volume)
# Clear the dir first
client.removedir(volume_dir)
client.makedir(volume_dir)
client.copy_file_to_remote_and_untar(filename, volume_dir)
remove_file_from_disk(filename)
return JsonResponse({"detail": "success"}) | 8b2c7630473ca2f1aa309cb763fc018562115761 | 5,109 |
def has_admin_access(user):
"""Check if a user has admin access."""
return user == 'admin' | d178861bee504f6f3026c9e495d56cc8d2d7c3d3 | 5,110 |
def get_compss_type(value, depth=0):
# type: (object, int) -> int
""" Retrieve the value type mapped to COMPSs types.
:param value: Value to analyse.
:param depth: Collections depth.
:return: The Type of the value.
"""
# First check if it is a PSCO since a StorageNumpy can be detected
# as a numpy object.
if has_id(value):
# If has method getID maybe is a PSCO
try:
if get_id(value) not in [None, 'None']:
# the 'getID' + id == criteria for persistent object
return TYPE.EXTERNAL_PSCO
else:
return TYPE.OBJECT
except TypeError:
# A PSCO class has been used to check its type (when checking
# the return). Since we still don't know if it is going to be
# persistent inside, we assume that it is not. It will be checked
# later on the worker side when the task finishes.
return TYPE.OBJECT
# If it is a numpy scalar, we manage it as all objects to avoid to
# infer its type wrong. For instance isinstance(np.float64 object, float)
# returns true
if np and isinstance(value, np.generic):
return TYPE.OBJECT
if isinstance(value, (bool, str, int, PYCOMPSS_LONG, float)):
value_type = type(value)
if value_type is bool:
return TYPE.BOOLEAN
elif value_type is str:
# Char does not exist as char, only strings.
# Files will be detected as string, since it is a path.
# The difference among them is defined by the parameter
# decoration as FILE.
return TYPE.STRING
elif value_type is int:
if IS_PYTHON3:
if value < PYTHON_MAX_INT: # noqa
return TYPE.INT
else:
return TYPE.LONG
else:
return TYPE.INT
elif value_type is PYCOMPSS_LONG:
return TYPE.LONG
elif value_type is float:
return TYPE.DOUBLE
elif depth > 0 and is_basic_iterable(value):
return TYPE.COLLECTION
elif depth > 0 and is_dict(value):
return TYPE.DICT_COLLECTION
else:
# Default type
return TYPE.OBJECT | c272cb6b2cdca159de08182dfac67b00b94b0d77 | 5,111 |
def set_namedtuple_defaults(namedtuple, default=None):
"""
Set *all* of the fields for a given nametuple to a singular value.
Modifies the tuple in place, but returns it anyway.
More info:
https://stackoverflow.com/a/18348004
:param namedtuple: A constructed collections.namedtuple
:param default: The default value to set.
:return: the modified namedtuple
"""
namedtuple.__new__.__defaults__ = (default,) * len(namedtuple._fields)
return namedtuple | 1cade18cbdf5a4ae945ae246b94676572810d1e8 | 5,112 |
def test_tuple_get_item_merge():
"""Test composite function can be merged from pattern containing TupleGetItem nodes."""
pattern_table = [
("bn_relu", make_bn_relu_pattern())
]
def before():
x = relay.var('x', shape=(1, 8))
gamma = relay.var("gamma", shape=(8,))
beta = relay.var("beta", shape=(8,))
moving_mean = relay.var("moving_mean", shape=(8,))
moving_var = relay.var("moving_var", shape=(8,))
bn_node = relay.nn.batch_norm(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = bn_node[0]
r = relay.nn.relu(tuple_get_item_node)
return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
def expected():
x = relay.var('x', shape=(1, 8))
beta = relay.var("beta", shape=(8,))
gamma = relay.var("gamma", shape=(8,))
moving_mean = relay.var("moving_mean", shape=(8,))
moving_var = relay.var("moving_var", shape=(8,))
# bn_relu function
in_1 = relay.var('x1', shape=(1, 8))
in_2 = relay.var('gamma1', shape=(8,))
in_3 = relay.var('beta1', shape=(8,))
in_4 = relay.var('moving_mean1', shape=(8,))
in_5 = relay.var('moving_var1', shape=(8,))
bn_node = relay.nn.batch_norm(in_1, in_2, in_3, in_4, in_5)
tuple_get_item_node = bn_node[0]
relu_node = relay.nn.relu(tuple_get_item_node)
bn_relu = relay.Function([in_1, in_2, in_3, in_4, in_5], relu_node)
bn_relu = bn_relu.with_attr("Composite", "bn_relu")
bn_relu = bn_relu.with_attr("PartitionedFromPattern",
"nn.batch_norm_TupleGetItem0_nn.relu_")
# merged function
r = relay.Call(bn_relu, [x, gamma, beta, moving_mean, moving_var])
return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
check_result(pattern_table, before(), expected()) | 2a170843b643451268026d9f0f18901bce944597 | 5,113 |
def prepare_data_for_storage(major_version, minor_version, patch_version):
"""Prepares data to store to file.
"""
temp = Template(
u'''/*Copyright (c) 2016, Ford Motor Company\n'''
u'''All rights reserved.\n'''
u'''Redistribution and use in source and binary forms, with or without\n'''
u'''modification, are permitted provided that the following conditions are met:\n'''
u'''Redistributions of source code must retain the above copyright notice, this\n'''
u'''list of conditions and the following disclaimer.\n'''
u'''Redistributions in binary form must reproduce the above copyright notice,\n'''
u'''this list of conditions and the following\n'''
u'''disclaimer in the documentation and/or other materials provided with the\n'''
u'''distribution.\n'''
u'''Neither the name of the Ford Motor Company nor the names of its contributors\n'''
u'''may be used to endorse or promote products derived from this software\n'''
u'''without specific prior written permission.\n'''
u'''THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n'''
u'''AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n'''
u'''IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n'''
u'''ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n'''
u'''LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n'''
u'''CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n'''
u'''SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n'''
u'''INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n'''
u'''CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n'''
u'''ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n'''
u'''POSSIBILITY OF SUCH DAMAGE.\n'''
u'''*/\n'''
u'''#ifndef GENERATED_MSG_VERSION_H\n'''
u'''#define GENERATED_MSG_VERSION_H\n\n'''
u'''namespace application_manager {\n\n'''
u'''const uint16_t major_version = $m_version;\n'''
u'''const uint16_t minor_version = $min_version;\n'''
u'''const uint16_t patch_version = $p_version;\n'''
u'''} // namespace application_manager\n'''
u'''#endif // GENERATED_MSG_VERSION_H''')
data_to_file = temp.substitute(m_version = major_version, min_version = minor_version, p_version = patch_version)
return data_to_file | b3411398179472e0e4975c442bdc0dea2ecc1556 | 5,114 |
Subsets and Splits