filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_11908 | #import math
import numpy as np
import scipy.optimize
from . import factor
from .material import char_mat_strength
__all__ = [
"pipe_ovality",
"pipe_char_elastic_pressure",
"pipe_char_plastic_pressure",
"char_collapse_pressure_num",
"char_collapse_pressure",
"pipe_collapse_unity",
"pipe_collapse_all"
]
# from .pipe_collapse import pipe_char_elastic_pressure
# from .pipe_collapse import pipe_char_plastic_pressure
# from .pipe_collapse import pipe_ovality
# from .pipe_collapse import char_collapse_pressure
# from .pipe_collapse import pipe_collapse_unity
# def pipeCollapse(t,D,P_c,SMYS,nu=0.3,E=207.*10**9, f_o=None):
# '''DNV-OS-F101:2010 Sec.5 D401, collapse due to external pressure '''
# P_el = 2*E*(t/D)**3/(1-nu**2)
# P_p = f_y*alpha_fab*(2*t/D)
# if not f_o:
# f_o = (D_max-D_min)/D
# if f_o<0.005: f_o = 0.005
# return (P_c-P_el)*(P_c**2-P_p**2) - P_c*P_el*P_p*f_o*D/t
def pipe_ovality(D, D_max=None, D_min=None) -> "O_0":
"""Calculate pipe ovality.
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.4.2 eq:5.14 page:96 $O_0$
"""
if D_max is None:
D_max = D
if D_min is None:
D_min = D
O_0 = (D_max - D_min) / D
if O_0 < 0.005:
O_0 = 0.005
return O_0
def pipe_char_elastic_pressure(t, D, nu=0.3, E=207.0*10**9) -> "p_el":
"""Calculate p_el.
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.4.2 eq:5.12 page:96 $p_{el}$
"""
p_el = 2*E*(t/D)**3/(1-nu**2)
return p_el
def pipe_char_plastic_pressure(t, D, f_y, alpha_fab) -> "p_p":
"""Calculate characteristic plastic pressure p_p.
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.4.2 eq:5.13 page:96 $p_p$
"""
p_p = f_y*alpha_fab*(2*t/D)
return p_p
def p_c_zerofunc(p_c, p_el, p_p, O_0, D, t):
return (p_c-p_el)*(p_c**2-p_p**2) - p_c*p_el*p_p*O_0*D/t
def p_c_fprime(p_c, p_el, p_p, O_0, D, t):
return 3*p_c**2 - 2*p_c*p_el - p_p**2 - p_el*p_p**O_0*D/t
def char_collapse_pressure_num(p_el, p_p, O_0, D, t, p_c_0=1.e5) -> "p_c":
"""Calculate p_c numerically using Newton's method.
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.4.2 eq:5.11 page:95 $p_c$
"""
p_c = scipy.optimize.newton(p_c_zerofunc, p_c_0, p_c_fprime,
args=(p_el, p_p, O_0, D, t))
return p_c
def char_collapse_pressure(p_el, p_p, O_0, D, t) -> "p_c":
"""Calculate p_c analytically using solution of cubic equation given
in DNVGL-ST-F101.
Reference:
DNVGL-ST-F101 (2017-12)
sec:13.4.7 eq:13.10 page:299 $p_c$
"""
b = -p_el
c = -(p_p**2 + p_el*p_p*O_0*D/t)
d = p_el * p_p**2
u = 1/3 * (-1/3 * b**2 + c)
v = 1/2 * (2/27 * b**3 - 1/3 * b*c + d)
phi = np.arccos(-v / np.sqrt(-u**3))
y = -2 * np.sqrt(-u) * np.cos(phi/3 + 60*np.pi/180)
p_c = y - 1/3 * b
return p_c
def pipe_collapse_unity(p_e, p_c, gamma_m, gamma_SCLB, p_min=0
) -> "pipe_collapse_uty":
"""Calculate pipe collapse unity value.
Local buckling – system collapse (external over pressure only).
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.4.1 eq:5.10 page:95 $p_{lt}$
"""
# if gamma_m is None:
# gamma_m = factor.gamma_m_map[limit_state]
# if gamma_SCLB is None:
# gamma_SCLB = factor.gamma_SCLB_map[SC]
pipe_collapse_uty = (p_e - p_min) * gamma_m * gamma_SCLB / p_c
return pipe_collapse_uty
def external_pressure(depth, rho_water, g=9.81) -> "p_e":
p_e = abs(depth) * rho_water * g
return p_e
def pipe_collapse_all(t, D, E, nu, SMYS, h_l, rho_water,
gamma_m, alpha_fab, alpha_U, gamma_SCLB,
material=None, T=None, f_ytemp=None,
D_max=None, D_min=None, p_min=0, g=9.81
) -> "{}":
O_0 = pipe_ovality(D, D_max, D_min)
p_el = pipe_char_elastic_pressure(t, D, nu, E)
#_alpha_U = factor.alpha_U_map(alpha_U)
f_y = char_mat_strength(SMYS, material, T, f_ytemp, alpha_U)
#_alpha_fab = factor.alpha_fab_map(alpha_fab)
p_p = pipe_char_plastic_pressure(t, D, f_y, alpha_fab)
p_c = char_collapse_pressure(p_el, p_p, O_0, D, t)
p_e = external_pressure(abs(h_l), rho_water, g)
pipe_collapse_uty = pipe_collapse_unity(p_e, p_c, gamma_m, gamma_SCLB, p_min)
return {
"O_0": O_0,
"p_el": p_el,
"p_p": p_p,
"p_c": p_c,
"p_e": p_e,
"pipe_collapse_uty": pipe_collapse_uty,
}
if __name__ == "__main__":
p_c_0 = 1025*9.81*1
t = 0.0212
t_corr = 0.0005
t_fab = 0.001
t_1 = t - t_corr - t_fab
D = 0.660
D_max = D
D_min = D
SMYS = 450e6
f_y = SMYS - 6e6
alpha_fab = 1.00
h_l = -410.
rho_water = 1027.
p_e = rho_water*9.81*abs(h_l)
p_el = pipe_char_elastic_pressure(t_1, D, nu=0.3, E=207.*10**9)
p_p = pipe_char_plastic_pressure(t_1, D, f_y, alpha_fab)
O_0 = pipe_ovality(D, D_max, D_min)
p_c = char_collapse_pressure_num(p_el, p_p, O_0, D, t_1, p_c_0=p_c_0)
print("p_c (numerical)=", p_c)
pipe_collapse_uty = pipe_collapse_unity(p_e, p_c)
print("pipe_colpse_uty (numerical)=", pipe_collapse_uty)
p_c = char_collapse_pressure(p_el, p_p, O_0, D, t_1)
print("p_c=", p_c)
pipe_collapse_uty = pipe_collapse_unity(p_e, p_c)
print("pipe_colpse_uty=", pipe_collapse_uty) |
the-stack_0_11911 | from __future__ import division
import math
import torch
from torch.jit.annotations import List, Tuple
from torch import Tensor
import torchvision
# TODO: https://github.com/pytorch/pytorch/issues/26727
def zeros_like(tensor, dtype):
# type: (Tensor, int) -> Tensor
return torch.zeros_like(tensor, dtype=dtype, layout=tensor.layout,
device=tensor.device, pin_memory=tensor.is_pinned())
@torch.jit.script
class BalancedPositiveNegativeSampler(object):
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
def __init__(self, batch_size_per_image, positive_fraction):
# type: (int, float)
"""
Arguments:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentace of positive elements per batch
"""
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, matched_idxs):
# type: (List[Tensor])
"""
Arguments:
matched idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
positives.
Returns:
pos_idx (list[tensor])
neg_idx (list[tensor])
Returns two lists of binary masks for each image.
The first list contains the positive elements that were selected,
and the second list the negative example.
"""
pos_idx = []
neg_idx = []
for matched_idxs_per_image in matched_idxs:
positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1)
negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1)
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = self.batch_size_per_image - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx_per_image = positive[perm1]
neg_idx_per_image = negative[perm2]
# create binary mask from indices
pos_idx_per_image_mask = zeros_like(
matched_idxs_per_image, dtype=torch.uint8
)
neg_idx_per_image_mask = zeros_like(
matched_idxs_per_image, dtype=torch.uint8
)
pos_idx_per_image_mask[pos_idx_per_image] = torch.tensor(1, dtype=torch.uint8)
neg_idx_per_image_mask[neg_idx_per_image] = torch.tensor(1, dtype=torch.uint8)
pos_idx.append(pos_idx_per_image_mask)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx
@torch.jit.script
def encode_boxes(reference_boxes, proposals, weights):
# type: (torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
"""
Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
# perform some unpacking to make it JIT-fusion friendly
wx = weights[0]
wy = weights[1]
ww = weights[2]
wh = weights[3]
proposals_x1 = proposals[:, 0].unsqueeze(1)
proposals_y1 = proposals[:, 1].unsqueeze(1)
proposals_x2 = proposals[:, 2].unsqueeze(1)
proposals_y2 = proposals[:, 3].unsqueeze(1)
reference_boxes_x1 = reference_boxes[:, 0].unsqueeze(1)
reference_boxes_y1 = reference_boxes[:, 1].unsqueeze(1)
reference_boxes_x2 = reference_boxes[:, 2].unsqueeze(1)
reference_boxes_y2 = reference_boxes[:, 3].unsqueeze(1)
# implementation starts here
ex_widths = proposals_x2 - proposals_x1
ex_heights = proposals_y2 - proposals_y1
ex_ctr_x = proposals_x1 + 0.5 * ex_widths
ex_ctr_y = proposals_y1 + 0.5 * ex_heights
gt_widths = reference_boxes_x2 - reference_boxes_x1
gt_heights = reference_boxes_y2 - reference_boxes_y1
gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths
gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets
@torch.jit.script
class BoxCoder(object):
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
"""
def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
# type: (Tuple[float, float, float, float], float)
"""
Arguments:
weights (4-element tuple)
bbox_xform_clip (float)
"""
self.weights = weights
self.bbox_xform_clip = bbox_xform_clip
def encode(self, reference_boxes, proposals):
# type: (List[Tensor], List[Tensor])
boxes_per_image = [len(b) for b in reference_boxes]
reference_boxes = torch.cat(reference_boxes, dim=0)
proposals = torch.cat(proposals, dim=0)
targets = self.encode_single(reference_boxes, proposals)
return targets.split(boxes_per_image, 0)
def encode_single(self, reference_boxes, proposals):
"""
Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
dtype = reference_boxes.dtype
device = reference_boxes.device
weights = torch.as_tensor(self.weights, dtype=dtype, device=device)
targets = encode_boxes(reference_boxes, proposals, weights)
return targets
def decode(self, rel_codes, boxes):
# type: (Tensor, List[Tensor])
assert isinstance(boxes, (list, tuple))
assert isinstance(rel_codes, torch.Tensor)
boxes_per_image = [b.size(0) for b in boxes]
concat_boxes = torch.cat(boxes, dim=0)
box_sum = 0
for val in boxes_per_image:
box_sum += val
pred_boxes = self.decode_single(
rel_codes.reshape(box_sum, -1), concat_boxes
)
return pred_boxes.reshape(box_sum, -1, 4)
def decode_single(self, rel_codes, boxes):
"""
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Arguments:
rel_codes (Tensor): encoded boxes
boxes (Tensor): reference boxes.
"""
boxes = boxes.to(rel_codes.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = rel_codes[:, 0::4] / wx
dy = rel_codes[:, 1::4] / wy
dw = rel_codes[:, 2::4] / ww
dh = rel_codes[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.bbox_xform_clip)
dh = torch.clamp(dh, max=self.bbox_xform_clip)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes1 = pred_ctr_x - torch.tensor(0.5, dtype=pred_ctr_x.dtype) * pred_w
pred_boxes2 = pred_ctr_y - torch.tensor(0.5, dtype=pred_ctr_y.dtype) * pred_h
pred_boxes3 = pred_ctr_x + torch.tensor(0.5, dtype=pred_ctr_x.dtype) * pred_w
pred_boxes4 = pred_ctr_y + torch.tensor(0.5, dtype=pred_ctr_y.dtype) * pred_h
pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=2).flatten(1)
return pred_boxes
@torch.jit.script
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
__annotations__ = {
'BELOW_LOW_THRESHOLD': int,
'BETWEEN_THRESHOLDS': int,
}
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
# type: (float, float, bool)
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
self.BELOW_LOW_THRESHOLD = -1
self.BETWEEN_THRESHOLDS = -2
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted elements.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_quality_matrix.numel() == 0:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError(
"No ground-truth boxes available for one of the images "
"during training")
else:
raise ValueError(
"No proposal boxes available for one of the images "
"during training")
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
else:
all_matches = None
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches[below_low_threshold] = torch.tensor(self.BELOW_LOW_THRESHOLD)
matches[between_thresholds] = torch.tensor(self.BETWEEN_THRESHOLDS)
if self.allow_low_quality_matches:
assert all_matches is not None
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
# tensor([[ 0, 39796],
# [ 1, 32055],
# [ 1, 32070],
# [ 2, 39190],
# [ 2, 40255],
# [ 3, 40390],
# [ 3, 41455],
# [ 4, 45470],
# [ 5, 45325],
# [ 5, 46390]])
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
|
the-stack_0_11914 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author: Tang Zhuangkun
import time
import sys
sys.path.append("..")
import database.db_operator as db_operator
import log.custom_logger as custom_logger
class DataMinerCommonDBOperation:
# 通用,常用的(非基金或股票信息)数据库操作
def __init__(self):
pass
def get_the_last_trading_date(self,day):
# 获取传入日期参数最近的交易日期, 即上一个交易日
# day: 交易日期,如 2021-06-09
# return: 如果存在最近的交易日期,则返回日期
# 如果不存在,则返回 0000-00-00
# 查询SQL
selecting_sql = "SELECT trading_date FROM trading_days WHERE trading_date <= '%s' ORDER BY " \
"ABS(DATEDIFF(trading_date, '%s')) ASC LIMIT 1" % (day,day)
# 查询
selecting_result = db_operator.DBOperator().select_one("financial_data", selecting_sql)
if selecting_result is not None:
return str(selecting_result["trading_date"])
else:
# 日志记录
log_msg = "无法获取 "+day+" 最近的交易日期"
custom_logger.CustomLogger().log_writter(log_msg, 'error')
return "0000-00-00"
if __name__ == '__main__':
time_start = time.time()
go = DataMinerCommonDBOperation()
last_trade_day = go.get_the_last_trading_date("2022-03-20")
print(last_trade_day)
time_end = time.time()
print('Time Cost: ' + str(time_end - time_start)) |
the-stack_0_11915 | #%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import phd.viz
import phd.thermo
import phd.stats
colors, palette = phd.viz.phd_style()
# %%
# Load the data sets and restrict to the carbon sources
data = pd.read_csv('../../data/ch4_growth/analyzed_foldchange.csv')
stats = pd.read_csv('../../data/ch8_growth_si/DNA_binding_energy_summary.csv')
data = data[(data['strain']=='dilution') & (data['repressors'] > 0) &
(data['fold_change'] >= 0) & (data['temp'] == 37) & (data['size']=='large')]
summary = data.groupby(['carbon', 'date', 'run_number', 'atc_ngml']).mean().reset_index()
summary = summary.groupby(['carbon', 'atc_ngml']).agg(('mean', 'sem')).reset_index()
stats = stats[(stats['temp']==37)]
# Define the constants for plotting
rep_range = np.logspace(0, 3, 100)
# %%
# Set up the figure canvas
fig, ax = plt.subplots(3, 3, figsize=(5.5, 5.5), dpi=100)
phd.viz.despine(ax.ravel())
for a in ax.ravel():
a.set_xscale('log')
a.set_yscale('log')
a.set_xlim([1, 800])
a.set_ylim([1E-2, 1.1])
for i in range(3):
ax[-1, i].set_xlabel('repressors per cell')
ax[i, 0].set_ylabel('fold-change')
for i in range(3):
ax[0, i].spines['bottom'].set_visible(False)
ax[0, i].set_xticks([])
ax[1, i].spines['bottom'].set_visible(False)
ax[1, i].set_xticks([])
ax[i, 1].spines['left'].set_visible(False)
ax[i, 1].set_yticks([])
ax[i, 2].spines['left'].set_visible(False)
ax[i, 2].set_yticks([])
titles = ['acetate', 'glycerol', 'glucose']
title_colors = [colors['dark_brown'], colors['dark_green'], colors['dark_purple']]
bgcolors = [colors['brown'], colors['green'], colors['purple']]
for i in range(3):
if i > 0:
# apply offset transform to all y ticklabels.
dx = -13 / fig.dpi
dy = 0
offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans)
for label in ax[i, 0].yaxis.get_majorticklabels():
label.set_transform(label.get_transform() + offset)
# Plot the predictions
for i, pred in enumerate(titles):
# Get the binding energy values for the prediction strain
low, high = stats[(stats['carbon']==pred) &
(stats['parameter']=='epRA')][
['hpd_min', 'hpd_max']].values[0]
# Compute the theory
theo_min = phd.thermo.SimpleRepression(R=rep_range, ep_r=low, ka=139, ki=0.53,
ep_ai=1000, effector_conc=0).fold_change()
theo_max = phd.thermo.SimpleRepression(R=rep_range, ep_r=high, ka=139, ki=0.53,
ep_ai=1000, effector_conc=0).fold_change()
for j, fit in enumerate(titles):
ax[i, j].fill_between(rep_range, theo_min, theo_max, color=title_colors[i],
alpha=0.25)
# Plot the data
for i, carb in enumerate(titles):
for j in range(3):
if i == j:
fill = 'white'
edge = bgcolors[i]
else:
fill = bgcolors[i]
edge = colors['grey']
# Isolate the data.
d = summary[summary['carbon']==carb]
ax[j, i].errorbar(d['repressors']['mean'], d['fold_change']['mean'],
xerr=d['repressors']['sem'], yerr=d['fold_change']['sem'],
fmt='o', ms=5, markerfacecolor=fill,
markeredgewidth=0.5, linestyle='none', capsize=1,
lw=0.75, markeredgecolor=edge, color=bgcolors[i])
plt.subplots_adjust(wspace=0.05, hspace=0.05)
plt.savefig('../figs/figS10_plots.svg', bbox_inches='tight')
# %%
|
the-stack_0_11916 | """Locale support module.
The module provides low-level access to the C lib's locale APIs and adds high
level number formatting APIs as well as a locale aliasing engine to complement
these.
The aliasing engine includes support for many commonly used locale names and
maps them to values suitable for passing to the C lib's setlocale() function. It
also includes default encodings for all supported locale names.
"""
import sys
import encodings
import encodings.aliases
import re
import _collections_abc
from builtins import str as _builtin_str
import functools
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
# trying the import. So __all__ is also fiddled at the end of the file.
__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
"setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
"str", "atof", "atoi", "format", "format_string", "currency",
"normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
"LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
def _strcoll(a,b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return (a > b) - (a < b)
def _strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
try:
from _locale import *
except ImportError:
# Locale emulation
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
# These may or may not exist in _locale, so be sure to set them.
if 'strxfrm' not in globals():
strxfrm = _strxfrm
if 'strcoll' not in globals():
strcoll = _strcoll
_localeconv = localeconv
# With this dict, you can override some items of localeconv's return value.
# This is useful for testing purposes.
_override_localeconv = {}
@functools.wraps(_localeconv)
def localeconv():
d = _localeconv()
if _override_localeconv:
d.update(_override_localeconv)
return d
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
# Iterate over grouping intervals
def _grouping_intervals(grouping):
last_interval = None
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
if last_interval is None:
raise ValueError("invalid grouping")
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def _format(percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(f, val, grouping=False, monetary=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true.
Conversion uses monetary thousands separator and grouping strings if
forth parameter monetary is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if isinstance(val, _collections_abc.Mapping):
new_val = []
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
new_val.append(_format(perc.group(), val, grouping, monetary))
else:
if not isinstance(val, tuple):
val = (val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(perc.group(),
val[i],
grouping,
monetary,
*val[i+1:i+1+starcount]))
i += (1 + starcount)
val = tuple(new_val)
return new_f % val
def format(percent, value, grouping=False, monetary=False, *additional):
"""Deprecated, use format_string instead."""
import warnings
warnings.warn(
"This method will be removed in a future version of Python. "
"Use 'locale.format_string()' instead.",
DeprecationWarning, stacklevel=2
)
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(percent, value, grouping, monetary, *additional)
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = _format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
if international and smb[-1] == ' ':
smb = smb[:-1]
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to string, taking the locale into account."""
return _format("%.12g", val)
def delocalize(string):
"Parses a string as a normalized number according to the locale settings."
conv = localeconv()
#First, get rid of the grouping
ts = conv['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = conv['decimal_point']
if dd:
string = string.replace(dd, '.')
return string
def atof(string, func=float):
"Parses a string as a float according to the locale settings."
return func(delocalize(string))
def atoi(string):
"Converts a string to an integer according to the locale settings."
return int(delocalize(string))
def _test():
setlocale(LC_ALL, "")
#do grouping
s1 = format_string("%d", 123456789,1)
print(s1, "is", atoi(s1))
#standard formatting
s1 = str(3.14)
print(s1, "is", atof(s1))
### Locale name aliasing engine
# Author: Marc-Andre Lemburg, [email protected]
# Various tweaks by Fredrik Lundh <[email protected]>
# store away the low-level version of setlocale (it's
# overridden below)
_setlocale = setlocale
def _replace_encoding(code, encoding):
if '.' in code:
langname = code[:code.index('.')]
else:
langname = code
# Convert the encoding to a C lib compatible encoding string
norm_encoding = encodings.normalize_encoding(encoding)
#print('norm encoding: %r' % norm_encoding)
norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(),
norm_encoding)
#print('aliased encoding: %r' % norm_encoding)
encoding = norm_encoding
norm_encoding = norm_encoding.lower()
if norm_encoding in locale_encoding_alias:
encoding = locale_encoding_alias[norm_encoding]
else:
norm_encoding = norm_encoding.replace('_', '')
norm_encoding = norm_encoding.replace('-', '')
if norm_encoding in locale_encoding_alias:
encoding = locale_encoding_alias[norm_encoding]
#print('found encoding %r' % encoding)
return langname + '.' + encoding
def _append_modifier(code, modifier):
if modifier == 'euro':
if '.' not in code:
return code + '.ISO8859-15'
_, _, encoding = code.partition('.')
if encoding in ('ISO8859-15', 'UTF-8'):
return code
if encoding == 'ISO8859-1':
return _replace_encoding(code, 'ISO8859-15')
return code + '@' + modifier
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
# Normalize the locale name and extract the encoding and modifier
code = localename.lower()
if ':' in code:
# ':' is sometimes used as encoding delimiter.
code = code.replace(':', '.')
if '@' in code:
code, modifier = code.split('@', 1)
else:
modifier = ''
if '.' in code:
langname, encoding = code.split('.')[:2]
else:
langname = code
encoding = ''
# First lookup: fullname (possibly with encoding and modifier)
lang_enc = langname
if encoding:
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lang_enc += '.' + norm_encoding
lookup_name = lang_enc
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
#print('first lookup failed')
if modifier:
# Second try: fullname without modifier (possibly with encoding)
code = locale_alias.get(lang_enc, None)
if code is not None:
#print('lookup without modifier succeeded')
if '@' not in code:
return _append_modifier(code, modifier)
if code.split('@', 1)[1].lower() == modifier:
return code
#print('second lookup failed')
if encoding:
# Third try: langname (without encoding, possibly with modifier)
lookup_name = langname
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
#print('lookup without encoding succeeded')
if '@' not in code:
return _replace_encoding(code, encoding)
code, modifier = code.split('@', 1)
return _replace_encoding(code, encoding) + '@' + modifier
if modifier:
# Fourth try: langname (without encoding and modifier)
code = locale_alias.get(langname, None)
if code is not None:
#print('lookup without modifier and encoding succeeded')
if '@' not in code:
code = _replace_encoding(code, encoding)
return _append_modifier(code, modifier)
code, defmod = code.split('@', 1)
if defmod.lower() == modifier:
return _replace_encoding(code, encoding) + '@' + defmod
return localename
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@', 1)
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
elif code == 'UTF-8':
# On macOS "LC_CTYPE=UTF-8" is a valid locale setting
# for getting UTF-8 handling for text.
return None, 'UTF-8'
raise ValueError('unknown locale: %s' % localename)
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
try:
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
except (TypeError, ValueError):
raise TypeError('Locale must be None, a string, or an iterable of '
'two strings -- language code, encoding.') from None
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
# check if it's supported by the _locale module
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
# make sure the code/encoding values are valid
if sys.platform == "win32" and code and code[:2] == "0x":
# map windows language identifier to language name
code = windows_locale.get(int(code, 0))
# ...add other platform-specific processing here, if
# necessary...
return code, encoding
# fall back on POSIX behaviour
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable,None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError('category LC_ALL is not supported')
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, an iterable of two strings (language code and encoding),
or None.
Iterables are converted to strings using the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and not isinstance(locale, _builtin_str):
# convert to string
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform.startswith("win"):
# On Win32, this will return the ANSI code page
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
if sys.flags.utf8_mode:
return 'UTF-8'
import _bootlocale
return _bootlocale.getpreferredencoding(False)
else:
# On Unix, if CODESET is available, use that.
try:
CODESET
except NameError:
if hasattr(sys, 'getandroidapilevel'):
# On Android langinfo.h and CODESET are missing, and UTF-8 is
# always used in mbstowcs() and wcstombs().
def getpreferredencoding(do_setlocale = True):
return 'UTF-8'
else:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
if sys.flags.utf8_mode:
return 'UTF-8'
res = getdefaultlocale()[1]
if res is None:
# LANG not set, default conservatively to ASCII
res = 'ascii'
return res
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
if sys.flags.utf8_mode:
return 'UTF-8'
import _bootlocale
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
try:
setlocale(LC_CTYPE, "")
except Error:
pass
result = _bootlocale.getpreferredencoding(False)
if do_setlocale:
setlocale(LC_CTYPE, oldloc)
return result
### Database
#
# The following data was extracted from the locale.alias file which
# comes with X11 and then hand edited removing the explicit encoding
# definitions and adding some more aliases. The file is usually
# available as /usr/lib/X11/locale/locale.alias.
#
#
# The local_encoding_alias table maps lowercase encoding alias names
# to C locale encoding names (case-sensitive). Note that normalize()
# first looks up the encoding in the encodings.aliases dictionary and
# then applies this mapping to find the correct C lib name for the
# encoding.
#
locale_encoding_alias = {
# Mappings for non-standard encoding names used in locale names
'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
'koi8c': 'KOI8-C',
'microsoftcp1251': 'CP1251',
'microsoftcp1255': 'CP1255',
'microsoftcp1256': 'CP1256',
'88591': 'ISO8859-1',
'88592': 'ISO8859-2',
'88595': 'ISO8859-5',
'885915': 'ISO8859-15',
# Mappings from Python codec names to C lib encoding names
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_16': 'ISO8859-16',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF-8',
'koi8_r': 'KOI8-R',
'koi8_t': 'KOI8-T',
'koi8_u': 'KOI8-U',
'kz1048': 'RK1048',
'cp1251': 'CP1251',
'cp1255': 'CP1255',
'cp1256': 'CP1256',
# XXX This list is still incomplete. If you know more
# mappings, please file a bug report. Thanks.
}
for k, v in sorted(locale_encoding_alias.items()):
k = k.replace('_', '')
locale_encoding_alias.setdefault(k, v)
#
# The locale_alias table maps lowercase alias names to C locale names
# (case-sensitive). Encodings are always separated from the locale
# name using a dot ('.'); they should only be given in case the
# language name is needed to interpret the given encoding alias
# correctly (CJK codes often have this need).
#
# Note that the normalize() function which uses this tables
# removes '_' and '-' characters from the encoding part of the
# locale name before doing the lookup. This saves a lot of
# space in the table.
#
# MAL 2004-12-10:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.4
# and older):
#
# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
#
# MAL 2008-05-30:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.5
# and older):
#
# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'
# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'
# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
#
# AP 2010-04-12:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.6.5
# and older):
#
# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
#
# SS 2013-12-20:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 3.3.3
# and older):
#
# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'sd' -> '[email protected]' to 'sd_IN.UTF-8'
# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
#
# SS 2014-10-01:
# Updated alias mapping with glibc 2.19 supported locales.
#
# SS 2018-05-05:
# Updated alias mapping with glibc 2.27 supported locales.
#
# These are the differences compared to the old mapping (Python 3.6.5
# and older):
#
# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia'
# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154'
# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R'
locale_alias = {
'a3': 'az_AZ.KOI8-C',
'a3_az': 'az_AZ.KOI8-C',
'a3_az.koic': 'az_AZ.KOI8-C',
'aa_dj': 'aa_DJ.ISO8859-1',
'aa_er': 'aa_ER.UTF-8',
'aa_et': 'aa_ET.UTF-8',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'agr_pe': 'agr_PE.UTF-8',
'ak_gh': 'ak_GH.UTF-8',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'an_es': 'an_ES.ISO8859-15',
'anp_in': 'anp_IN.UTF-8',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_in': 'ar_IN.UTF-8',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_ss': 'ar_SS.UTF-8',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'as': 'as_IN.UTF-8',
'as_in': 'as_IN.UTF-8',
'ast_es': 'ast_ES.ISO8859-15',
'ayc_pe': 'ayc_PE.UTF-8',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'az_ir': 'az_IR.UTF-8',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_bg.utf8': 'bg_BG.UTF-8',
'be_by': 'be_BY.CP1251',
'be_by@latin': 'be_BY.UTF-8@latin',
'bem_zm': 'bem_ZM.UTF-8',
'ber_dz': 'ber_DZ.UTF-8',
'ber_ma': 'ber_MA.UTF-8',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bhb_in.utf8': 'bhb_IN.UTF-8',
'bho_in': 'bho_IN.UTF-8',
'bho_np': 'bho_NP.UTF-8',
'bi_vu': 'bi_VU.UTF-8',
'bn_bd': 'bn_BD.UTF-8',
'bn_in': 'bn_IN.UTF-8',
'bo_cn': 'bo_CN.UTF-8',
'bo_in': 'bo_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'brx_in': 'brx_IN.UTF-8',
'bs': 'bs_BA.ISO8859-2',
'bs_ba': 'bs_BA.ISO8859-2',
'bulgarian': 'bg_BG.CP1251',
'byn_er': 'byn_ER.UTF-8',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c.ascii': 'C',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c.utf8': 'en_US.UTF-8',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_ad': 'ca_AD.ISO8859-1',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es@valencia': 'ca_ES.UTF-8@valencia',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_it': 'ca_IT.ISO8859-1',
'catalan': 'ca_ES.ISO8859-1',
'ce_ru': 'ce_RU.UTF-8',
'cextend': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'chr_us': 'chr_US.UTF-8',
'ckb_iq': 'ckb_IQ.UTF-8',
'cmn_tw': 'cmn_TW.UTF-8',
'crh_ua': 'crh_UA.UTF-8',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'csb_pl': 'csb_PL.UTF-8',
'cv_ru': 'cv_RU.UTF-8',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da_dk': 'da_DK.ISO8859-1',
'danish': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de_at': 'de_AT.ISO8859-1',
'de_be': 'de_BE.ISO8859-1',
'de_ch': 'de_CH.ISO8859-1',
'de_de': 'de_DE.ISO8859-1',
'de_it': 'de_IT.ISO8859-1',
'de_li.utf8': 'de_LI.UTF-8',
'de_lu': 'de_LU.ISO8859-1',
'deutsch': 'de_DE.ISO8859-1',
'doi_in': 'doi_IN.UTF-8',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'dv_mv': 'dv_MV.UTF-8',
'dz_bt': 'dz_BT.UTF-8',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_cy': 'el_CY.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en_ag': 'en_AG.UTF-8',
'en_au': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_bw': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_dk': 'en_DK.ISO8859-1',
'en_dl.utf8': 'en_DL.UTF-8',
'en_gb': 'en_GB.ISO8859-1',
'en_hk': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_il': 'en_IL.UTF-8',
'en_in': 'en_IN.ISO8859-1',
'en_ng': 'en_NG.UTF-8',
'en_nz': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_sc.utf8': 'en_SC.UTF-8',
'en_sg': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_zm': 'en_ZM.UTF-8',
'en_zw': 'en_ZW.ISO8859-1',
'en_zw.utf8': 'en_ZS.UTF-8',
'eng_gb': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english.iso88591': 'en_US.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo.utf8': 'eo.UTF-8',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_us.utf8': 'eo_US.UTF-8',
'eo_xx': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cu': 'es_CU.UTF-8',
'es_do': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_gt': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pe': 'es_PE.ISO8859-1',
'es_pr': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_sv': 'es_SV.ISO8859-1',
'es_us': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_ve': 'es_VE.ISO8859-1',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_fr': 'eu_FR.ISO8859-1',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'ff_sn': 'ff_SN.UTF-8',
'fi': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fil_ph': 'fil_PH.UTF-8',
'finnish': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fr': 'fr_FR.ISO8859-1',
'fr_be': 'fr_BE.ISO8859-1',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_lu': 'fr_LU.ISO8859-1',
'fran\xe7ais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'fur_it': 'fur_IT.UTF-8',
'fy_de': 'fy_DE.UTF-8',
'fy_nl': 'fy_NL.UTF-8',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'ger_de': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'gez_er': 'gez_ER.UTF-8',
'gez_et': 'gez_ET.UTF-8',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'greek': 'el_GR.ISO8859-7',
'gu_in': 'gu_IN.UTF-8',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'ha_ng': 'ha_NG.UTF-8',
'hak_tw': 'hak_TW.UTF-8',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'hebrew': 'he_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hif_fj': 'hif_FJ.UTF-8',
'hne': 'hne_IN.UTF-8',
'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hsb_de': 'hsb_DE.ISO8859-2',
'ht_ht': 'ht_HT.UTF-8',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'hy_am': 'hy_AM.UTF-8',
'hy_am.armscii8': 'hy_AM.ARMSCII_8',
'ia': 'ia.UTF-8',
'ia_fr': 'ia_FR.UTF-8',
'icelandic': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'ig_ng': 'ig_NG.UTF-8',
'ik_ca': 'ik_CA.UTF-8',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it_ch': 'it_CH.ISO8859-1',
'it_it': 'it_IT.ISO8859-1',
'italian': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.utf8': 'iw_IL.UTF-8',
'ja': 'ja_JP.eucJP',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.pck': 'ja_JP.SJIS',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kab_dz': 'kab_DZ.UTF-8',
'kk_kz': 'kk_KZ.ptcp154',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'km_kh': 'km_KH.UTF-8',
'kn': 'kn_IN.UTF-8',
'kn_in': 'kn_IN.UTF-8',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'kok_in': 'kok_IN.UTF-8',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
'ks_in': 'ks_IN.UTF-8',
'[email protected]': 'ks_IN.UTF-8@devanagari',
'ku_tr': 'ku_TR.ISO8859-9',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'ky': 'ky_KG.UTF-8',
'ky_kg': 'ky_KG.UTF-8',
'lb_lu': 'lb_LU.UTF-8',
'lg_ug': 'lg_UG.ISO8859-10',
'li_be': 'li_BE.UTF-8',
'li_nl': 'li_NL.UTF-8',
'lij_it': 'lij_IT.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
'ln_cd': 'ln_CD.UTF-8',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'lzh_tw': 'lzh_TW.UTF-8',
'mag_in': 'mag_IN.UTF-8',
'mai': 'mai_IN.UTF-8',
'mai_in': 'mai_IN.UTF-8',
'mai_np': 'mai_NP.UTF-8',
'mfe_mu': 'mfe_MU.UTF-8',
'mg_mg': 'mg_MG.ISO8859-15',
'mhr_ru': 'mhr_RU.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'miq_ni': 'miq_NI.UTF-8',
'mjw_in': 'mjw_IN.UTF-8',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'ml': 'ml_IN.UTF-8',
'ml_in': 'ml_IN.UTF-8',
'mn_mn': 'mn_MN.UTF-8',
'mni_in': 'mni_IN.UTF-8',
'mr': 'mr_IN.UTF-8',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'my_mm': 'my_MM.UTF-8',
'nan_tw': 'nan_TW.UTF-8',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nds_de': 'nds_DE.UTF-8',
'nds_nl': 'nds_NL.UTF-8',
'ne_np': 'ne_NP.UTF-8',
'nhn_mx': 'nhn_MX.UTF-8',
'niu_nu': 'niu_NU.UTF-8',
'niu_nz': 'niu_NZ.UTF-8',
'nl': 'nl_NL.ISO8859-1',
'nl_aw': 'nl_AW.UTF-8',
'nl_be': 'nl_BE.ISO8859-1',
'nl_nl': 'nl_NL.ISO8859-1',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.iso88591@bokmal': 'no_NO.ISO8859-1',
'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1',
'norwegian': 'no_NO.ISO8859-1',
'nr': 'nr_ZA.ISO8859-1',
'nr_za': 'nr_ZA.ISO8859-1',
'nso': 'nso_ZA.ISO8859-15',
'nso_za': 'nso_ZA.ISO8859-15',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'om_et': 'om_ET.UTF-8',
'om_ke': 'om_KE.ISO8859-1',
'or': 'or_IN.UTF-8',
'or_in': 'or_IN.UTF-8',
'os_ru': 'os_RU.UTF-8',
'pa': 'pa_IN.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pa_pk': 'pa_PK.UTF-8',
'pap_an': 'pap_AN.UTF-8',
'pap_aw': 'pap_AW.UTF-8',
'pap_cw': 'pap_CW.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_us': 'pd_US.ISO8859-1',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'ps_af': 'ps_AF.UTF-8',
'pt': 'pt_PT.ISO8859-1',
'pt_br': 'pt_BR.ISO8859-1',
'pt_pt': 'pt_PT.ISO8859-1',
'quz_pe': 'quz_PE.UTF-8',
'raj_in': 'raj_IN.UTF-8',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.UTF-8',
'ru_ru': 'ru_RU.UTF-8',
'ru_ua': 'ru_UA.KOI8-U',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.KOI8-R',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'sa_in': 'sa_IN.UTF-8',
'sat_in': 'sat_IN.UTF-8',
'sc_it': 'sc_IT.UTF-8',
'sd': 'sd_IN.UTF-8',
'sd_in': 'sd_IN.UTF-8',
'[email protected]': 'sd_IN.UTF-8@devanagari',
'sd_pk': 'sd_PK.UTF-8',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
'sgs_lt': 'sgs_LT.UTF-8',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
'shn_mm': 'shn_MM.UTF-8',
'shs_ca': 'shs_CA.UTF-8',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'sid_et': 'sid_ET.UTF-8',
'sinhala': 'si_LK.UTF-8',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'sm_ws': 'sm_WS.UTF-8',
'so_dj': 'so_DJ.ISO8859-1',
'so_et': 'so_ET.UTF-8',
'so_ke': 'so_KE.ISO8859-1',
'so_so': 'so_SO.ISO8859-1',
'sp': 'sr_CS.ISO8859-5',
'sp_yu': 'sr_CS.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_mk': 'sq_MK.UTF-8',
'sr': 'sr_RS.UTF-8',
'sr@cyrillic': 'sr_RS.UTF-8',
'sr@latn': 'sr_CS.UTF-8@latin',
'sr_cs': 'sr_CS.UTF-8',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs@latn': 'sr_CS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
'sr_rs@latn': 'sr_RS.UTF-8@latin',
'sr_sp': 'sr_CS.ISO8859-2',
'sr_yu': 'sr_RS.UTF-8@latin',
'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.iso88592': 'sr_CS.ISO8859-2',
'sr_yu.iso88595': 'sr_CS.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.utf8': 'sr_RS.UTF-8',
'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8',
'sr_yu@cyrillic': 'sr_RS.UTF-8',
'ss': 'ss_ZA.ISO8859-1',
'ss_za': 'ss_ZA.ISO8859-1',
'st': 'st_ZA.ISO8859-1',
'st_za': 'st_ZA.ISO8859-1',
'sv': 'sv_SE.ISO8859-1',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_se': 'sv_SE.ISO8859-1',
'sw_ke': 'sw_KE.UTF-8',
'sw_tz': 'sw_TZ.UTF-8',
'swedish': 'sv_SE.ISO8859-1',
'szl_pl': 'szl_PL.UTF-8',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'ta_lk': 'ta_LK.UTF-8',
'tcy_in.utf8': 'tcy_IN.UTF-8',
'te': 'te_IN.UTF-8',
'te_in': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'the_np': 'the_NP.UTF-8',
'ti_er': 'ti_ER.UTF-8',
'ti_et': 'ti_ET.UTF-8',
'tig_er': 'tig_ER.UTF-8',
'tk_tm': 'tk_TM.UTF-8',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'to_to': 'to_TO.UTF-8',
'tpi_pg': 'tpi_PG.UTF-8',
'tr': 'tr_TR.ISO8859-9',
'tr_cy': 'tr_CY.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'ts': 'ts_ZA.ISO8859-1',
'ts_za': 'ts_ZA.ISO8859-1',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif',
'turkish': 'tr_TR.ISO8859-9',
'ug_cn': 'ug_CN.UTF-8',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'univ': 'en_US.utf',
'universal': 'en_US.utf',
'universal.utf8@ucs4': 'en_US.UTF-8',
'unm_us': 'unm_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_in': 'ur_IN.UTF-8',
'ur_pk': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'uz_uz@cyrillic': 'uz_UZ.UTF-8',
've': 've_ZA.UTF-8',
've_za': 've_ZA.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wae_ch': 'wae_CH.UTF-8',
'wal_et': 'wal_ET.UTF-8',
'wo_sn': 'wo_SN.UTF-8',
'xh': 'xh_ZA.ISO8859-1',
'xh_za': 'xh_ZA.ISO8859-1',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yo_ng': 'yo_NG.UTF-8',
'yue_hk': 'yue_HK.UTF-8',
'yuw_pg': 'yuw_PG.UTF-8',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5hk': 'zh_HK.big5hkscs',
'zh_sg': 'zh_SG.GB2312',
'zh_sg.gbk': 'zh_SG.GBK',
'zh_tw': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
'zh_tw.euctw': 'zh_TW.eucTW',
'zu': 'zu_ZA.ISO8859-1',
'zu_za': 'zu_ZA.ISO8859-1',
}
#
# This maps Windows language identifiers to locale strings.
#
# This list has been updated from
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
# to include every locale up to Windows Vista.
#
# NOTE: this mapping is incomplete. If your language is missing, please
# submit a bug report to the Python bug tracker at http://bugs.python.org/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
windows_locale = {
0x0436: "af_ZA", # Afrikaans
0x041c: "sq_AL", # Albanian
0x0484: "gsw_FR",# Alsatian - France
0x045e: "am_ET", # Amharic - Ethiopia
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x0801: "ar_IQ", # Arabic - Iraq
0x0c01: "ar_EG", # Arabic - Egypt
0x1001: "ar_LY", # Arabic - Libya
0x1401: "ar_DZ", # Arabic - Algeria
0x1801: "ar_MA", # Arabic - Morocco
0x1c01: "ar_TN", # Arabic - Tunisia
0x2001: "ar_OM", # Arabic - Oman
0x2401: "ar_YE", # Arabic - Yemen
0x2801: "ar_SY", # Arabic - Syria
0x2c01: "ar_JO", # Arabic - Jordan
0x3001: "ar_LB", # Arabic - Lebanon
0x3401: "ar_KW", # Arabic - Kuwait
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3c01: "ar_BH", # Arabic - Bahrain
0x4001: "ar_QA", # Arabic - Qatar
0x042b: "hy_AM", # Armenian
0x044d: "as_IN", # Assamese - India
0x042c: "az_AZ", # Azeri - Latin
0x082c: "az_AZ", # Azeri - Cyrillic
0x046d: "ba_RU", # Bashkir
0x042d: "eu_ES", # Basque - Russia
0x0423: "be_BY", # Belarusian
0x0445: "bn_IN", # Begali
0x201a: "bs_BA", # Bosnian - Cyrillic
0x141a: "bs_BA", # Bosnian - Latin
0x047e: "br_FR", # Breton - France
0x0402: "bg_BG", # Bulgarian
# 0x0455: "my_MM", # Burmese - Not supported
0x0403: "ca_ES", # Catalan
0x0004: "zh_CHS",# Chinese - Simplified
0x0404: "zh_TW", # Chinese - Taiwan
0x0804: "zh_CN", # Chinese - PRC
0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1004: "zh_SG", # Chinese - Singapore
0x1404: "zh_MO", # Chinese - Macao S.A.R.
0x7c04: "zh_CHT",# Chinese - Traditional
0x0483: "co_FR", # Corsican - France
0x041a: "hr_HR", # Croatian
0x101a: "hr_BA", # Croatian - Bosnia
0x0405: "cs_CZ", # Czech
0x0406: "da_DK", # Danish
0x048c: "gbz_AF",# Dari - Afghanistan
0x0465: "div_MV",# Divehi - Maldives
0x0413: "nl_NL", # Dutch - The Netherlands
0x0813: "nl_BE", # Dutch - Belgium
0x0409: "en_US", # English - United States
0x0809: "en_GB", # English - United Kingdom
0x0c09: "en_AU", # English - Australia
0x1009: "en_CA", # English - Canada
0x1409: "en_NZ", # English - New Zealand
0x1809: "en_IE", # English - Ireland
0x1c09: "en_ZA", # English - South Africa
0x2009: "en_JA", # English - Jamaica
0x2409: "en_CB", # English - Caribbean
0x2809: "en_BZ", # English - Belize
0x2c09: "en_TT", # English - Trinidad
0x3009: "en_ZW", # English - Zimbabwe
0x3409: "en_PH", # English - Philippines
0x4009: "en_IN", # English - India
0x4409: "en_MY", # English - Malaysia
0x4809: "en_IN", # English - Singapore
0x0425: "et_EE", # Estonian
0x0438: "fo_FO", # Faroese
0x0464: "fil_PH",# Filipino
0x040b: "fi_FI", # Finnish
0x040c: "fr_FR", # French - France
0x080c: "fr_BE", # French - Belgium
0x0c0c: "fr_CA", # French - Canada
0x100c: "fr_CH", # French - Switzerland
0x140c: "fr_LU", # French - Luxembourg
0x180c: "fr_MC", # French - Monaco
0x0462: "fy_NL", # Frisian - Netherlands
0x0456: "gl_ES", # Galician
0x0437: "ka_GE", # Georgian
0x0407: "de_DE", # German - Germany
0x0807: "de_CH", # German - Switzerland
0x0c07: "de_AT", # German - Austria
0x1007: "de_LU", # German - Luxembourg
0x1407: "de_LI", # German - Liechtenstein
0x0408: "el_GR", # Greek
0x046f: "kl_GL", # Greenlandic - Greenland
0x0447: "gu_IN", # Gujarati
0x0468: "ha_NG", # Hausa - Latin
0x040d: "he_IL", # Hebrew
0x0439: "hi_IN", # Hindi
0x040e: "hu_HU", # Hungarian
0x040f: "is_IS", # Icelandic
0x0421: "id_ID", # Indonesian
0x045d: "iu_CA", # Inuktitut - Syllabics
0x085d: "iu_CA", # Inuktitut - Latin
0x083c: "ga_IE", # Irish - Ireland
0x0410: "it_IT", # Italian - Italy
0x0810: "it_CH", # Italian - Switzerland
0x0411: "ja_JP", # Japanese
0x044b: "kn_IN", # Kannada - India
0x043f: "kk_KZ", # Kazakh
0x0453: "kh_KH", # Khmer - Cambodia
0x0486: "qut_GT",# K'iche - Guatemala
0x0487: "rw_RW", # Kinyarwanda - Rwanda
0x0457: "kok_IN",# Konkani
0x0412: "ko_KR", # Korean
0x0440: "ky_KG", # Kyrgyz
0x0454: "lo_LA", # Lao - Lao PDR
0x0426: "lv_LV", # Latvian
0x0427: "lt_LT", # Lithuanian
0x082e: "dsb_DE",# Lower Sorbian - Germany
0x046e: "lb_LU", # Luxembourgish
0x042f: "mk_MK", # FYROM Macedonian
0x043e: "ms_MY", # Malay - Malaysia
0x083e: "ms_BN", # Malay - Brunei Darussalam
0x044c: "ml_IN", # Malayalam - India
0x043a: "mt_MT", # Maltese
0x0481: "mi_NZ", # Maori
0x047a: "arn_CL",# Mapudungun
0x044e: "mr_IN", # Marathi
0x047c: "moh_CA",# Mohawk - Canada
0x0450: "mn_MN", # Mongolian - Cyrillic
0x0850: "mn_CN", # Mongolian - PRC
0x0461: "ne_NP", # Nepali
0x0414: "nb_NO", # Norwegian - Bokmal
0x0814: "nn_NO", # Norwegian - Nynorsk
0x0482: "oc_FR", # Occitan - France
0x0448: "or_IN", # Oriya - India
0x0463: "ps_AF", # Pashto - Afghanistan
0x0429: "fa_IR", # Persian
0x0415: "pl_PL", # Polish
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0446: "pa_IN", # Punjabi
0x046b: "quz_BO",# Quechua (Bolivia)
0x086b: "quz_EC",# Quechua (Ecuador)
0x0c6b: "quz_PE",# Quechua (Peru)
0x0418: "ro_RO", # Romanian - Romania
0x0417: "rm_CH", # Romansh
0x0419: "ru_RU", # Russian
0x243b: "smn_FI",# Sami Finland
0x103b: "smj_NO",# Sami Norway
0x143b: "smj_SE",# Sami Sweden
0x043b: "se_NO", # Sami Northern Norway
0x083b: "se_SE", # Sami Northern Sweden
0x0c3b: "se_FI", # Sami Northern Finland
0x203b: "sms_FI",# Sami Skolt
0x183b: "sma_NO",# Sami Southern Norway
0x1c3b: "sma_SE",# Sami Southern Sweden
0x044f: "sa_IN", # Sanskrit
0x0c1a: "sr_SP", # Serbian - Cyrillic
0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
0x081a: "sr_SP", # Serbian - Latin
0x181a: "sr_BA", # Serbian - Bosnia Latin
0x045b: "si_LK", # Sinhala - Sri Lanka
0x046c: "ns_ZA", # Northern Sotho
0x0432: "tn_ZA", # Setswana - Southern Africa
0x041b: "sk_SK", # Slovak
0x0424: "sl_SI", # Slovenian
0x040a: "es_ES", # Spanish - Spain
0x080a: "es_MX", # Spanish - Mexico
0x0c0a: "es_ES", # Spanish - Spain (Modern)
0x100a: "es_GT", # Spanish - Guatemala
0x140a: "es_CR", # Spanish - Costa Rica
0x180a: "es_PA", # Spanish - Panama
0x1c0a: "es_DO", # Spanish - Dominican Republic
0x200a: "es_VE", # Spanish - Venezuela
0x240a: "es_CO", # Spanish - Colombia
0x280a: "es_PE", # Spanish - Peru
0x2c0a: "es_AR", # Spanish - Argentina
0x300a: "es_EC", # Spanish - Ecuador
0x340a: "es_CL", # Spanish - Chile
0x380a: "es_UR", # Spanish - Uruguay
0x3c0a: "es_PY", # Spanish - Paraguay
0x400a: "es_BO", # Spanish - Bolivia
0x440a: "es_SV", # Spanish - El Salvador
0x480a: "es_HN", # Spanish - Honduras
0x4c0a: "es_NI", # Spanish - Nicaragua
0x500a: "es_PR", # Spanish - Puerto Rico
0x540a: "es_US", # Spanish - United States
# 0x0430: "", # Sutu - Not supported
0x0441: "sw_KE", # Swahili
0x041d: "sv_SE", # Swedish - Sweden
0x081d: "sv_FI", # Swedish - Finland
0x045a: "syr_SY",# Syriac
0x0428: "tg_TJ", # Tajik - Cyrillic
0x085f: "tmz_DZ",# Tamazight - Latin
0x0449: "ta_IN", # Tamil
0x0444: "tt_RU", # Tatar
0x044a: "te_IN", # Telugu
0x041e: "th_TH", # Thai
0x0851: "bo_BT", # Tibetan - Bhutan
0x0451: "bo_CN", # Tibetan - PRC
0x041f: "tr_TR", # Turkish
0x0442: "tk_TM", # Turkmen - Cyrillic
0x0480: "ug_CN", # Uighur - Arabic
0x0422: "uk_UA", # Ukrainian
0x042e: "wen_DE",# Upper Sorbian - Germany
0x0420: "ur_PK", # Urdu
0x0820: "ur_IN", # Urdu - India
0x0443: "uz_UZ", # Uzbek - Latin
0x0843: "uz_UZ", # Uzbek - Cyrillic
0x042a: "vi_VN", # Vietnamese
0x0452: "cy_GB", # Welsh
0x0488: "wo_SN", # Wolof - Senegal
0x0434: "xh_ZA", # Xhosa - South Africa
0x0485: "sah_RU",# Yakut - Cyrillic
0x0478: "ii_CN", # Yi - PRC
0x046a: "yo_NG", # Yoruba - Nigeria
0x0435: "zu_ZA", # Zulu
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print('Locale defaults as determined by getdefaultlocale():')
print('-'*72)
lang, enc = getdefaultlocale()
print('Language: ', lang or '(undefined)')
print('Encoding: ', enc or '(undefined)')
print()
print('Locale settings on startup:')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
print()
print('Locale settings after calling resetlocale():')
print('-'*72)
resetlocale()
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
try:
setlocale(LC_ALL, "")
except:
print('NOTE:')
print('setlocale(LC_ALL, "") does not support the default locale')
print('given in the OS environment variables.')
else:
print()
print('Locale settings after calling setlocale(LC_ALL, ""):')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
###
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append("LC_MESSAGES")
if __name__=='__main__':
print('Locale aliasing:')
print()
_print_locale()
print()
print('Number formatting:')
print()
_test()
|
the-stack_0_11917 | """
Augmenter that apply operation (word level) to textual input based on contextual word embeddings.
"""
import string
import os
import re
import logging
from nlpaug.augmenter.word import WordAugmenter
import nlpaug.model.lang_models as nml
from nlpaug.util import Action, Doc
CONTEXT_WORD_EMBS_MODELS = {}
def init_context_word_embs_model(model_path, model_type, device, force_reload=False, batch_size=32,
top_k=None, silence=True, use_custom_api=False):
global CONTEXT_WORD_EMBS_MODELS
model_name = '_'.join([os.path.basename(model_path), model_type, str(device)])
if model_name in CONTEXT_WORD_EMBS_MODELS and not force_reload:
CONTEXT_WORD_EMBS_MODELS[model_name].top_k = top_k
CONTEXT_WORD_EMBS_MODELS[model_name].batch_size = batch_size
CONTEXT_WORD_EMBS_MODELS[model_name].silence = silence
return CONTEXT_WORD_EMBS_MODELS[model_name]
if use_custom_api:
if model_type == 'distilbert':
model = nml.DistilBert(model_path, device=device, top_k=top_k, silence=silence, batch_size=batch_size)
elif model_type == 'roberta':
model = nml.Roberta(model_path, device=device, top_k=top_k, silence=silence, batch_size=batch_size)
elif model_type == 'bert':
model = nml.Bert(model_path, device=device, top_k=top_k, silence=silence, batch_size=batch_size)
else:
raise ValueError('Model type value is unexpected. Only support bert and roberta models.')
else:
if model_type in ['distilbert', 'bert', 'roberta', 'bart']:
model = nml.FmTransformers(model_path, model_type=model_type, device=device, batch_size=batch_size,
top_k=top_k, silence=silence)
else:
raise ValueError('Model type value is unexpected. Only support bert and roberta models.')
CONTEXT_WORD_EMBS_MODELS[model_name] = model
return model
class ContextualWordEmbsAug(WordAugmenter):
# https://arxiv.org/pdf/1805.06201.pdf, https://arxiv.org/pdf/2003.02245.pdf
"""
Augmenter that leverage contextual word embeddings to find top n similar word for augmentation.
:param str model_path: Model name or model path. It used transformers to load the model. Tested
'bert-base-uncased', 'bert-base-cased', 'distilbert-base-uncased', 'roberta-base', 'distilroberta-base',
'facebook/bart-base', 'squeezebert/squeezebert-uncased'.
:param str model_type: Type of model. For BERT model, use 'bert'. For RoBERTa/LongFormer model, use 'roberta'.
For BART model, use 'bart'. If no value is provided, will determine from model name.
:param str action: Either 'insert or 'substitute'. If value is 'insert', a new word will be injected to random
position according to contextual word embeddings calculation. If value is 'substitute', word will be replaced
according to contextual embeddings calculation
:param int top_k: Controlling lucky draw pool. Top k score token will be used for augmentation. Larger k, more
token can be used. Default value is 100. If value is None which means using all possible tokens.
:param float aug_p: Percentage of word will be augmented.
:param int aug_min: Minimum number of word will be augmented.
:param int aug_max: Maximum number of word will be augmented. If None is passed, number of augmentation is
calculated via aup_p. If calculated result from aug_p is smaller than aug_max, will use calculated result from
aug_p. Otherwise, using aug_max.
:param list stopwords: List of words which will be skipped from augment operation. Do NOT include the UNKNOWN word.
UNKNOWN word of BERT is [UNK]. UNKNOWN word of RoBERTa and BART is <unk>.
:param str stopwords_regex: Regular expression for matching words which will be skipped from augment operation.
:param str device: Default value is CPU. If value is CPU, it uses CPU for processing. If value is CUDA, it uses GPU
for processing. Possible values include 'cuda' and 'cpu'. (May able to use other options)
:param int batch_size: Batch size.
:param bool force_reload: Force reload the contextual word embeddings model to memory when initialize the class.
Default value is False and suggesting to keep it as False if performance is the consideration.
:param bool silence: Default is True. transformers library will print out warning message when leveraing
pre-trained model. Set True to disable the expected warning message.
:param str name: Name of this augmenter
>>> import nlpaug.augmenter.word as naw
>>> aug = naw.ContextualWordEmbsAug()
"""
def __init__(self, model_path='bert-base-uncased', model_type='', action="substitute", top_k=100,
name='ContextualWordEmbs_Aug', aug_min=1, aug_max=10, aug_p=0.3, stopwords=None,
batch_size=32, device='cpu', force_reload=False, stopwords_regex=None,
verbose=0, silence=True, use_custom_api=True):
super().__init__(
action=action, name=name, aug_p=aug_p, aug_min=aug_min, aug_max=aug_max, tokenizer=None,
device=device, stopwords=stopwords, verbose=verbose, stopwords_regex=stopwords_regex,
include_detail=False)
self.model_path = model_path
self.model_type = model_type if model_type != '' else self.check_model_type()
self.silence = silence
# TODO: Slow when switching to HuggingFace pipeline. #https://github.com/makcedward/nlpaug/issues/248
self.use_custom_api = use_custom_api
self.model = self.get_model(
model_path=model_path, model_type=self.model_type, device=device, force_reload=force_reload,
batch_size=batch_size, top_k=top_k, silence=silence, use_custom_api=use_custom_api)
# Override stopwords
# if stopwords and self.model_type in ['xlnet', 'roberta']:
# stopwords = [self.stopwords]
# lower case all stopwords
if stopwords and 'uncased' in model_path:
self.stopwords = [s.lower() for s in self.stopwords]
self.stopword_reg = None
self.reserve_word_reg = None
self._build_stop_words(stopwords)
self.device = self.model.device
"""
TODO: Reserve 2 spaces (e.g. [CLS], [SEP]) is not enough as it hit CUDA error in batch processing mode.
Therefore, forcing to reserve 5 times of reserved spaces (i.e. 5)
"""
self.max_num_token = self.model.get_max_num_token()
def _build_stop_words(self, stopwords):
if stopwords:
prefix_reg = '(?<=\s|\W)'
suffix_reg = '(?=\s|\W)'
stopword_reg = '('+')|('.join([prefix_reg + re.escape(s) + suffix_reg for s in stopwords])+')'
self.stopword_reg = re.compile(stopword_reg)
unknown_token = self.model.get_unknown_token() or self.model.UNKNOWN_TOKEN
reserve_word_reg = '(' + prefix_reg + re.escape(unknown_token) + suffix_reg + ')'
self.reserve_word_reg = re.compile(reserve_word_reg)
def check_model_type(self):
# if 'xlnet' in self.model_path.lower():
# return 'xlnet'
if 'longformer' in self.model_path.lower():
return 'roberta'
elif 'roberta' in self.model_path.lower():
return 'roberta'
elif 'distilbert' in self.model_path.lower():
return 'bert'
elif 'squeezebert' in self.model_path.lower():
return 'bert'
elif 'bert' in self.model_path.lower():
return 'bert'
elif 'bart' in self.model_path.lower():
return 'bart'
# 'google/electra-small-discriminator',
# 'google/reformer-enwik8',
# 'funnel-transformer/small-base',
# 'google/tapas-base',
# 'microsoft/deberta-base'
return ''
def is_stop_words(self, token):
# Will execute before any tokenization. No need to handle prefix processing
if self.stopwords:
unknown_token = self.model.get_unknown_token() or self.model.UNKNOWN_TOKEN
if token == unknown_token:
return True
return token.lower() in self.stopwords
else:
return False
def skip_aug(self, token_idxes, tokens):
results = []
for token_idx in token_idxes:
token = tokens[token_idx]
# Do not augment subword
if self.model_type in ['bert', 'electra'] \
and token.startswith(self.model.get_subword_prefix()):
continue
# Do not augment tokens if len is less than aug_min
if (self.model.get_subword_prefix() in token and len(token) < self.aug_min+1) \
or (self.model.get_subword_prefix() not in token and len(token) < self.aug_min):
continue
if self.model_type in ['xlnet', 'roberta', 'bart']:
# xlent may tokenize word incorrectly. For example, 'fox', will be tokeinzed as ['_', 'fox']
if token == self.model.get_subword_prefix():
continue
# subword
if not token.startswith(self.model.get_subword_prefix()):
continue
results.append(token_idx)
return results
def split_text(self, data):
# Expect to have waring for "Token indices sequence length is longer than the specified maximum sequence length for this model"
# Handle stopwords first #https://github.com/makcedward/nlpaug/issues/247
if self.stopwords:
unknown_token = self.model.get_unknown_token() or self.model.UNKNOWN_TOKEN
preprocessed_data, reserved_stopwords = self.replace_stopword_by_reserved_word(data, self.stopword_reg, unknown_token)
else:
preprocessed_data, reserved_stopwords = data, None
orig_log_level = logging.getLogger('transformers.' + 'tokenization_utils_base').getEffectiveLevel()
logging.getLogger('transformers.' + 'tokenization_utils_base').setLevel(logging.ERROR)
tokens = self.model.get_tokenizer().tokenize(preprocessed_data)
logging.getLogger('transformers.' + 'tokenization_utils_base').setLevel(orig_log_level)
if self.model.get_model().config.max_position_embeddings == -1: # e.g. No max length restriction for XLNet
return (preprocessed_data, None, tokens, None), reserved_stopwords # (Head text, tail text, head token, tail token), reserved_stopwords
ids = self.model.get_tokenizer().convert_tokens_to_ids(tokens[:self.max_num_token])
head_text = self.model.get_tokenizer().decode(ids).strip()
# head_text = self.model.get_tokenizer().convert_tokens_to_string(tokens[:self.max_num_token]).strip()
tail_text = None
if len(tokens) >= self.max_num_token:
# tail_text = self.model.get_tokenizer().convert_tokens_to_string(tokens[self.max_num_token:]).strip()
ids = self.model.get_tokenizer().convert_tokens_to_ids(tokens[self.max_num_token:])
tail_text = self.model.get_tokenizer().decode(ids).strip()
return (head_text, tail_text, tokens[:self.max_num_token], tokens[self.max_num_token:]), reserved_stopwords
def insert(self, data):
if not data:
return data
if isinstance(data, list):
all_data = data
else:
if data.strip() == '':
return data
all_data = [data]
# If length of input is larger than max allowed input, only augment heading part
split_results = [] # head_text, tail_text, head_tokens, tail_tokens
reserved_stopwords = []
for d in all_data:
split_result, reserved_stopword = self.split_text(d)
split_results.append(split_result)
reserved_stopwords.append(reserved_stopword)
change_seq = 0
# Pick target word for augmentation
for i, (split_result, reserved_stopword_tokens) in enumerate(zip(split_results, reserved_stopwords)):
head_text, tail_text, head_tokens, tail_tokens = split_result
if self.model_type in ['xlnet', 'roberta', 'bart']:
# xlent and roberta tokens include prefix (e.g. ▁ or Ġ')
cleaned_head_tokens = [t.replace(self.model.get_subword_prefix(), '') for t in head_tokens]
else:
cleaned_head_tokens = head_tokens
head_doc = Doc(head_text, head_tokens)
aug_idxes = self._get_aug_idxes(head_tokens)
aug_idxes.sort(reverse=True)
if reserved_stopword_tokens:
head_doc, change_seq = self.substitute_back_reserved_stopwords(
head_doc, reserved_stopword_tokens, change_seq)
split_results[i] += (cleaned_head_tokens, head_doc, aug_idxes, )
# Pad aug_idxes
max_aug_size = max([len(split_result[6]) for split_result in split_results])
for split_result in split_results:
aug_idxes = split_result[6]
for _ in range(max_aug_size - len(aug_idxes)):
aug_idxes.append(-1)
token_placeholder = self.model.get_mask_token()
if self.model_type in ['xlnet', 'roberta', 'bart']:
token_placeholder = self.model.get_subword_prefix() + token_placeholder # Adding prefix for
# Augment same index of aug by batch
for i in range(max_aug_size):
masked_texts = []
aug_input_poses = [] # store which input augmented. No record if padding
change_seq += 1
for j, split_result in enumerate(split_results):
head_doc, aug_idx = split_result[5], split_result[6][i]
# -1 if it is padding
if aug_idx == -1:
continue
head_doc.add_token(aug_idx, token=token_placeholder, action=Action.INSERT,
change_seq=self.parent_change_seq+change_seq)
aug_input_poses.append(j)
# some tokenizers handle special charas (e.g. don't can merge after decode)
if self.model_type in ['bert', 'electra']:
ids = self.model.get_tokenizer().convert_tokens_to_ids(head_doc.get_augmented_tokens())
masked_text = self.model.get_tokenizer().decode(ids).strip()
elif self.model_type in ['xlnet', 'roberta', 'bart']:
masked_text = self.model.get_tokenizer().convert_tokens_to_string(head_doc.get_augmented_tokens()).strip()
masked_texts.append(masked_text)
if not len(masked_texts):
continue
outputs = self.model.predict(masked_texts, target_words=None, n=2)
# Update doc
for aug_input_pos, output, masked_text in zip(aug_input_poses, outputs, masked_texts):
split_result = split_results[aug_input_pos]
head_doc = split_result[5]
aug_idx = split_result[6][i] # augment position in text
# TODO: Alternative method better than dropout
candidate = ''
if len(output) == 0:
# TODO: no result?
pass
elif len(output) == 1:
candidate = output[0]
elif len(output) > 1:
candidate = self.sample(output, 1)[0]
# # In XLNet, it can be the first word of sentence which does not come with space. E.g. Zombine (ID:29110)
# if self.model_type in ['xlnet']:
# if candidate != '' and not candidate.startswith(self.model.get_subword_prefix()):
# candidate = self.model.get_subword_prefix() + candidate
# if self.model_type in ['roberta', 'bart']:
# if candidate != '' and not candidate.startswith(self.model.get_subword_prefix()) and candidate.strip() != candidate:
# candidate = self.model.get_subword_prefix() + candidate.strip()
# no candidate
if candidate == '':
head_doc.add_change_log(aug_idx, new_token='', action=Action.DELETE, change_seq=self.parent_change_seq+change_seq)
continue
head_doc.update_change_log(aug_idx, token=candidate)
# Early stop if number of token exceed max number
if head_doc.size() > self.max_num_token:
for j in range(i+1, max_aug_size):
split_results[aug_input_pos][6][j] = -1
augmented_texts = []
for split_result, reserved_stopword_tokens in zip(split_results, reserved_stopwords):
tail_text, head_doc = split_result[1], split_result[5]
head_tokens = head_doc.get_augmented_tokens()
# if self.model_type in ['xlnet', 'roberta']:
# # xlent and roberta tokens include prefix (e.g. ▁ or Ġ')
# head_tokens = [self.model.get_subword_prefix() + t if self.model.get_subword_prefix() not in t and i != 0 else t for i, t in enumerate(head_tokens)]
ids = self.model.get_tokenizer().convert_tokens_to_ids(head_tokens)
augmented_text = self.model.get_tokenizer().decode(ids)
if tail_text:
augmented_text += ' ' + tail_text
augmented_texts.append(augmented_text)
if isinstance(data, list):
return augmented_texts
else:
return augmented_texts[0]
def substitute(self, data):
if not data:
return data
if isinstance(data, list):
all_data = data
else:
if data.strip() == '':
return data
all_data = [data]
# If length of input is larger than max allowed input, only augment heading part
split_results = [] # head_text, tail_text, head_tokens, tail_tokens
reserved_stopwords = []
for d in all_data:
split_result, reserved_stopword = self.split_text(d)
split_results.append(split_result)
reserved_stopwords.append(reserved_stopword)
change_seq = 0
# Pick target word for augmentation
for i, (split_result, reserved_stopword_tokens) in enumerate(zip(split_results, reserved_stopwords)):
head_text, tail_text, head_tokens, tail_tokens = split_result
if self.model_type in ['xlnet', 'roberta', 'bart']:
# xlent and roberta tokens include prefix (e.g. ▁ or Ġ')
cleaned_head_tokens = [t.replace(self.model.get_subword_prefix(), '') for t in head_tokens]
else:
cleaned_head_tokens = head_tokens
head_doc = Doc(head_text, head_tokens)
aug_idxes = self._get_aug_idxes(head_tokens)
aug_idxes.sort(reverse=True)
if reserved_stopword_tokens:
head_doc, change_seq = self.substitute_back_reserved_stopwords(
head_doc, reserved_stopword_tokens, change_seq)
head_tokens = head_doc.get_augmented_tokens()
split_results[i] += (cleaned_head_tokens, head_doc, aug_idxes, )
# Pad aug_idxes
max_aug_size = max([len(split_result[6]) for split_result in split_results])
for split_result in split_results:
aug_idxes = split_result[6]
for _ in range(max_aug_size - len(aug_idxes)):
aug_idxes.append(-1)
token_placeholder = self.model.get_mask_token()
if self.model_type in ['xlnet', 'roberta', 'bart']:
token_placeholder = self.model.get_subword_prefix() + token_placeholder # Adding prefix for
# Augment same index of aug by batch
for i in range(max_aug_size):
original_tokens = []
masked_texts = []
aug_input_poses = [] # store which input augmented. No record if padding
change_seq += 1
for j, split_result in enumerate(split_results):
head_doc, aug_idx = split_result[5], split_result[6][i]
# -1 if it is padding
if aug_idx == -1:
continue
original_tokens.append(head_doc.get_token(aug_idx).get_latest_token().token)
head_doc.add_change_log(aug_idx, new_token=token_placeholder, action=Action.SUBSTITUTE,
change_seq=self.parent_change_seq+change_seq)
# remove continuous sub-word
to_remove_idxes = []
for k in range(aug_idx+1, head_doc.size()):
subword_token = head_doc.get_token(k).orig_token.token
if subword_token in string.punctuation:
break
if self.model_type in ['bert', 'electra'] and self.model.get_subword_prefix() in subword_token:
to_remove_idxes.append(k)
elif self.model_type in ['xlnet', 'roberta', 'bart'] and self.model.get_subword_prefix() not in subword_token:
to_remove_idxes.append(k)
else:
break
for k in reversed(to_remove_idxes):
head_doc.add_change_log(k, new_token='', action=Action.SUBSTITUTE,
change_seq=self.parent_change_seq+change_seq)
aug_input_poses.append(j)
# some tokenizers handle special charas (e.g. don't can merge after decode)
if self.model_type in ['bert', 'electra']:
ids = self.model.get_tokenizer().convert_tokens_to_ids(head_doc.get_augmented_tokens())
masked_text = self.model.get_tokenizer().decode(ids).strip()
elif self.model_type in ['xlnet', 'roberta', 'bart']:
masked_text = self.model.get_tokenizer().convert_tokens_to_string(head_doc.get_augmented_tokens()).strip()
masked_texts.append(masked_text)
if not len(masked_texts):
continue
outputs = self.model.predict(masked_texts, target_words=original_tokens, n=2)
# Update doc
for original_token, aug_input_pos, output, masked_text in zip(original_tokens, aug_input_poses, outputs, masked_texts):
split_result = split_results[aug_input_pos]
head_doc = split_result[5]
aug_idx = split_result[6][i] # augment position in text
# TODO: Alternative method better than dropout
candidate = ''
if len(output) == 0:
# TODO: no result?
pass
elif len(output) == 1:
candidate = output[0]
elif len(output) > 1:
candidate = self.sample(output, 1)[0]
# # In XLNet, it can be the first word of sentence which does not come with space. E.g. Zombine (ID:29110)
# if self.model_type in ['xlnet']:
# if candidate != '' and not candidate.startswith(self.model.get_subword_prefix()):
# candidate = self.model.get_subword_prefix() + candidate
# if self.model_type in ['roberta', 'bart']:
# if candidate != '' and not candidate.startswith(self.model.get_subword_prefix()) and candidate.strip() != candidate:
# candidate = self.model.get_subword_prefix() + candidate.strip()
# Fallback to original token if no candidate is appropriate
if candidate == '':
candidate = original_token
head_doc.update_change_log(aug_idx, token=candidate, action=Action.SUBSTITUTE,
change_seq=self.parent_change_seq+change_seq)
# Early stop if number of token exceed max number
if head_doc.size() > self.max_num_token:
for j in range(i+1, max_aug_size):
split_results[aug_input_pos][6][j] = -1
augmented_texts = []
for split_result in split_results:
tail_text, head_doc = split_result[1], split_result[5]
head_tokens = head_doc.get_augmented_tokens()
# if self.model_type in ['xlnet', 'roberta']:
# # xlent and roberta tokens include prefix (e.g. ▁ or Ġ')
# head_tokens = [self.model.get_subword_prefix() + t if self.model.get_subword_prefix() not in t and i != 0 else t for i, t in enumerate(head_tokens)]
ids = self.model.get_tokenizer().convert_tokens_to_ids(head_tokens)
augmented_text = self.model.get_tokenizer().decode(ids)
if tail_text is not None:
augmented_text += ' ' + tail_text
augmented_texts.append(augmented_text)
if isinstance(data, list):
return augmented_texts
else:
return augmented_texts[0]
@classmethod
def get_model(cls, model_path, model_type, device='cuda', force_reload=False, batch_size=32,
top_k=None, silence=True, use_custom_api=False):
return init_context_word_embs_model(model_path, model_type, device, force_reload, batch_size, top_k,
silence, use_custom_api)
def substitute_back_reserved_stopwords(self, doc, reserved_stopword_tokens, change_seq):
unknown_token = self.model.get_unknown_token() or self.model.UNKNOWN_TOKEN
reserved_pos = len(reserved_stopword_tokens) - 1
for token_i, token in enumerate(doc.get_augmented_tokens()):
if token == unknown_token:
change_seq += 1
doc.update_change_log(token_i, token=reserved_stopword_tokens[reserved_pos],
action=Action.SUBSTITUTE,
change_seq=self.parent_change_seq+change_seq)
reserved_pos -= 1
return doc, change_seq
|
the-stack_0_11919 | #!/usr/bin/env python2
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
args = {'min_speed': 0.1,
'vehicle_mass': rospy.get_param('~vehicle_mass', 1736.35),
'fuel_capacity': rospy.get_param('~fuel_capacity', 13.5),
'brake_deadband': rospy.get_param('~brake_deadband', .1),
'decel_limit': rospy.get_param('~decel_limit', -5),
'accel_limit': rospy.get_param('~accel_limit', 1.),
'wheel_radius': rospy.get_param('~wheel_radius', 0.2413),
'wheel_base': rospy.get_param('~wheel_base', 2.8498),
'steer_ratio': rospy.get_param('~steer_ratio', 14.8),
'max_lat_accel': rospy.get_param('~max_lat_accel', 3.),
'max_steer_angle': rospy.get_param('~max_steer_angle', 8.)}
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# Create `Controller` object
self.controller = Controller(args)
# Subscribe to all the topics you need to
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_cb)
self.current_vel = None
self.curr_ang_vel = None
self.angular_vel = None
self.linear_vel = None
self.dbw = True
self.twist = None
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
# print('RosPy:', rospy.is_shutdown())
while not rospy.is_shutdown():
# Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
# print('Status (VAL):', self.current_vel, self.angular_vel, self.linear_vel)
if None not in (self.current_vel, self.angular_vel, self.linear_vel):
throttle, brake, steer = self.controller.control(self.current_vel, self.angular_vel,
self.linear_vel, self.dbw)
# print('Prediction (TBS):', throttle, brake, steer)
if self.dbw:
self.publish(throttle, brake, steer)
rate.sleep()
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def dbw_cb(self, msg):
self.dbw = msg
if __name__ == '__main__':
DBWNode()
|
the-stack_0_11922 | from tensorflow.keras.models import load_model
from tensorflow import get_logger
import logging
import pickle
import numpy as np
logger = get_logger()
logger.setLevel(logging.CRITICAL)
CATEGORIES = ['Abyssian', 'American_bulldog', 'American_pit_bull', 'Basset_hound', 'Beagle', 'Bengal', 'Birdman', 'Bombay', 'Boxer', 'British_Shorthair', 'Chihuahua', 'Egyptian_Mau', 'English_Cocker_Spaniel', 'English_Setter', 'German_Shorthaired', 'Great_Pyrenees', 'Havanese', 'Japanese_Chin',
'Keeshond', 'Leonberger', 'Maine_Coon', 'Miniature_Pinscher', 'Newfoundland', 'Persian', 'Pomeranian', 'Pug', 'Ragdoll', 'Russian_Blue', 'Saint_Bernard', 'Samoyed', 'Scottish_Terrier', 'Shiba_Inu', 'Siamese', 'Sphynx', 'Staffordshire_Bull_Terrier', 'Wheaten_Terrier', 'Yorkshire_Terrier']
# Carrega o modelo
model = load_model('98.1581807136535620200213231827_model.h5')
# Pega a label
label = pickle.load(open("y.pickle", "rb"))[2]
# Pega uma imagem do dataset normalizado
img = pickle.load(open("x.pickle", "rb"))[2]
# Modelos so keras são otimizados para fazer predições em um batch, ou coleções
# Adiciona a imagem em um batch que possui um só membro.
img = (np.expand_dims(img, 0))
# Faz a predição
predictions_single = model.predict(img)
# Predição da única imagem no batch:
predicao = np.argmax(predictions_single[0])
print(f"Predição: {predicao} -> {CATEGORIES[int(predicao)]}")
print(f"label: {label} -> {CATEGORIES[label]}")
|
the-stack_0_11923 | import re
import requests
import subprocess
import time
from time import sleep
from log import log, log_add, now, RED, WHT, GRN, YEL
# Named Constants
# A local host that should always be up (ideally, the gateway router)
Local_Host = "192.168.1.1"
# An internet host that should always be up (Google DNS,for example)
Internet_Host = "8.8.8.8"
smallest_outage_to_report = 30 # seconds
# Twitter Account of your Internet Provider.
#Tweet_To = "@ask_spectrum"
My_City = "SomeCity"
# replace 'K' sequence by your API_KEY of ThingTweet
Api_Key = 'KKKKKKKKKKKKKKKK'
# replace 'W' sequence by your WriteAPIKey (from your thingSpeak channel settings)
Write_Api_Key = 'WWWWWWWWWWWWWWWW'
Thingspeak_Host = "api.thingspeak.com"
Tweet_Path = "apps/thingtweet/1/statuses/update"
Thingspeak_Path = "/update"
Report_File = "netmon.log"
# Delete the following line if you put your keys above
from my_api_keys import Tweet_To, My_City, Api_Key, Write_Api_Key
def send_tweet(message):
payload = {'api_key': Api_Key, 'status': message}
try:
r = requests.post(f"https://{Thingspeak_Host}/{Tweet_Path}", params=payload)
except Exception as e:
log(f"Couldn't send tweet \"{message}\". Continuing. ({e})")
if r.status_code != 200:
log("Tweet fail {repr(r)}.")
def send_down_tweet(duration):
send_tweet(f"{Tweet_To}, internet was down: {str(duration)} s. "
f"I'm in {My_City}. #DownTimeDetected")
def send_start_tweet():
send_tweet(f"Downtime monitor started {now()}.")
def send_thingspeak(duration):
args = {'field1': str(duration), 'key': Write_Api_Key}
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
try:
r = requests.post(f"https://{Thingspeak_Host}/{Thingspeak_Path}",
params=args, headers=headers)
except Exception as e:
log(f"Couldn't post to thingspeak \"{duration}\". Continuing. ({e})")
def host_down(host):
"""Return (True, '') if host is down, (False, latency) if up.
"""
latency = ''
try:
pipe = subprocess.PIPE
r = subprocess.run(f"ping -c 1 {host}",
shell=True, stdout=pipe, stderr=pipe)
except subprocess.CalledProcessError as err:
log(f'{RED}FATAL ERROR. Exiting: {err}')
exit(1)
else:
output = r.stdout.decode('utf-8')
if len(r.stderr) > 0:
err = r.stderr.decode('utf-8')
log(f"Surprising error: {YEL}{err}")
ms_match = re.search("time=([0-9.]+) ms", output)
if ms_match:
latency = ms_match.group(1)
down = r.returncode > 0
return (down, latency)
def check_down(local, internet):
"""Returns (True, '') if internet is down, (False, latency) if up,
None if it's not possible to check because the local net is down.
"""
inet_down, inet_latency = host_down(internet)
if inet_down:
# 2nd chance check in case it was just one lost packet
inet_down, inet_latency = host_down(internet)
if not inet_down:
log("hiccup")
return (inet_down, inet_latency)
# Internet seems down, but are we even locally connected?
local_down, local_latency = host_down(local)
if local_down:
# Locally disconnected, can't tell anything
return (None, None)
return (inet_down, inet_latency)
# MAIN LOOP
if __name__ == "__main__":
send_start_tweet()
log(f"{WHT} -- DownTime Monitor --\n")
attempt_num = 0
was_offline = False
start_of_outage = 0
outage_count = 0
long_outage_count = 0
while True:
attempt_num += 1
latency = ''
log(f"{YEL}#{attempt_num}, {long_outage_count}/{outage_count} "
"short/long outage(s). ", end="")
try:
is_down, latency = check_down(Local_Host, Internet_Host)
sleep(5)
except Exception as e:
log(f"{RED}Fail:{e}")
sleep(5)
continue
except KeyboardInterrupt:
log(f"{WHT}Goodbye!")
exit(1)
if is_down is None:
log_add(f"{now()}: Disconnect on local network.")
continue
elif is_down:
log_add(f"{WHT}Internet is {RED}down...")
else:
log_add(f"{WHT}Internet is up. (latency {GRN}{latency}{WHT})")
# Internet went down after previous check
if is_down and not was_offline:
start_of_outage = time.time()
was_offline = True
continue
# Internet came up after previous check
if not is_down and was_offline:
# 2 digits after decimal makes tweet slightly less likely
# to be duplicate. Twitter blocks duplicate tweets.
downtime = round(time.time() - start_of_outage, 2)
outage_count += 1
was_offline = False
send_thingspeak(downtime)
if (downtime > smallest_outage_to_report):
long_outage_count += 1
dt_str = log(f"Outage above {smallest_outage_to_report} s: {downtime} s\n")
with open(Report_File, "a") as TxtFile:
TxtFile.write(dt_str)
send_down_tweet(downtime)
|
the-stack_0_11925 | # befor eimporting anything!
import sys
import os
sys.path.insert(1, os.path.realpath('/work/dev-box/blender-2.79-linux-glibc219-x86_64/2.79/python/lib/python3.5/site-packages/'))
from enum import Enum
class LogColor:
INFO = '\033[94m'
WARNING = '\033[93m'
ERROR = '\033[91m\033[1m'
ENDC = '\033[0m'
class LogLevel(Enum):
INFO = 1
WARNING = 2
ERROR = 3
def log(output,level=LogLevel.INFO):
if level == LogLevel.INFO:
sys.stderr.write(LogColor.INFO)
elif level == LogLevel.WARNING:
sys.stderr.write(LogColor.WARNING)
elif level == LogLevel.ERROR:
sys.stderr.write(LogColor.ERROR)
sys.stderr.write(str(output))
sys.stderr.write(LogColor.ENDC)
sys.stderr.write("\n")
sys.stderr.flush()
import bpy
import bmesh
import math
import numpy as np
import binvox_rw
import import_off
import_off.register()
from bpy_extras.io_utils import axis_conversion
from bpy.props import EnumProperty
sphere_base_mesh = None
cube_base_mesh = None
circle_base_mesh = None
def initialize(width=512, height=448):
bpy.ops.mesh.primitive_ico_sphere_add()
global sphere_base_mesh
sphere_base_mesh = bpy.context.scene.objects.active.data.copy()
for face in sphere_base_mesh.polygons:
face.use_smooth = True
bpy.ops.mesh.primitive_cube_add()
global cube_base_mesh
cube_base_mesh = bpy.context.scene.objects.active.data.copy()
bpy.ops.mesh.primitive_circle_add(vertices=1024, radius=1, fill_type='NGON')
global circle_base_mesh
circle_base_mesh = bpy.context.scene.objects.active.data.copy()
# Delete the scene, except for the camera and the lamp
for obj in bpy.data.objects:
if str(obj.name) in ['Camera']:
continue
obj.select = True
bpy.ops.object.delete()
scene = bpy.context.scene
# set the camera and its constraint
cam = scene.objects['Camera']
cam.location = (0, 3.0, 1.0)
cam.data.lens = 35
cam.data.sensor_width = 32
cam.data.sensor_height = 32
cam_constraint = cam.constraints.new(type='TRACK_TO')
cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_constraint.up_axis = 'UP_Y'
def parent_obj_to_camera(b_camera):
origin = (0, 0, 0)
b_empty = bpy.data.objects.new('Empty', None)
b_empty.location = origin
b_camera.parent = b_empty # setup parenting
scn = bpy.context.scene
scn.objects.link(b_empty)
scn.objects.active = b_empty
return b_empty
camera_target = parent_obj_to_camera(cam)
cam_constraint.target = camera_target
locations = [
(-0.98382, 0.445997, 0.526505),
(-0.421806, -0.870784, 0.524944),
(0.075576, -0.960128, 0.816464),
(0.493553, -0.57716, 0.928208),
(0.787275, -0.256822, 0.635172),
(1.01032, 0.148764, 0.335078)
]
for i in range(len(locations)):
lamp_data = bpy.data.lamps.new(name='Point Lamp ' + str(i), type='POINT')
lamp_data.shadow_method = 'RAY_SHADOW'
lamp_data.shadow_ray_sample_method = 'CONSTANT_QMC'
lamp_data.use_shadow = True
lamp_data.shadow_soft_size = 1e6
lamp_data.distance = 2
lamp_data.energy = 0.1
lamp_data.use_diffuse = True
lamp_data.use_specular = True
lamp_data.falloff_type = 'CONSTANT'
lamp_object = bpy.data.objects.new(name='Spot Lamp ' + str(i), object_data=lamp_data)
scene.objects.link(lamp_object)
lamp_object.location[0] = locations[i][0]
lamp_object.location[1] = locations[i][1]
lamp_object.location[2] = locations[i][2]
lamp_object.rotation_euler[0] = 0
lamp_object.rotation_euler[1] = 0
lamp_object.rotation_euler[2] = 0
lamp_object.parent = camera_target
try:
if (2, 78, 0) <= bpy.app.version:
# https://blender.stackexchange.com/questions/5281/blender-sets-compute-device-cuda-but-doesnt-use-it-for-actual-render-on-ec2
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = True
else:
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
except TypeError:
pass
scene.render.use_file_extension = False
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.render.use_antialiasing = True
scene.render.use_shadows = True
world = bpy.context.scene.world
world.zenith_color = [1.0, 1.0, 1.0]
world.horizon_color = [1.0, 1.0, 1.0]
scene.render.alpha_mode = 'SKY'
world.light_settings.use_environment_light = True
world.light_settings.environment_color = 'PLAIN'
world.light_settings.environment_energy = 0.5
return camera_target
def make_material(name, diffuse, alpha, shadow=False):
material = bpy.data.materials.new(name)
material.diffuse_color = diffuse
material.diffuse_shader = 'LAMBERT'
material.diffuse_intensity = 1
material.specular_color = (1, 1, 1)
material.specular_shader = 'COOKTORR'
material.specular_intensity = 2
material.alpha = alpha
material.use_transparency = True
material.ambient = 1.0
material.use_cast_shadows = shadow
material.use_shadows = shadow
return material
def shadow_plane(material, offset = (0, 0, 0), scale = 1):
global circle_base_mesh
ob = bpy.data.objects.new("BRC_Shadow_Plane", circle_base_mesh)
ob.location = offset
ob.scale = (scale, scale, scale)
bpy.context.scene.objects.link(ob)
mat = material
mat.use_shadows = True
mat.use_transparent_shadows = True
mat.use_only_shadow = True
mat.use_raytrace = True
mat.ambient = 0
ob.data.materials.append(mat)
ob.active_material_index = 0
ob.active_material = mat
def _load_mesh(name, vertices, faces):
# vertices should be list of lists
# faces should be list of lists
edges = []
mesh = bpy.data.meshes.new(name=name)
mesh.from_pydata(vertices, edges, faces)
# mesh.vertices.add(len(verts))
# mesh.vertices.foreach_set("co", unpack_list(verts))
# mesh.faces.add(len(facets))
# mesh.faces.foreach_set("vertices", unpack_face_list(facets))
mesh.validate()
mesh.update()
scene = bpy.context.scene
obj = bpy.data.objects.new(mesh.name, mesh)
scene.objects.link(obj)
scene.objects.active = obj
obj.select = True
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='Y',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Z',
)
global_matrix = axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4()
obj.matrix_world = global_matrix
scene.update()
return mesh
def load_mesh(name, vertices, faces, material, offset=(0, 0, 0), scale=1, axes='xyz'):
_load_mesh(name, vertices, faces)
assert len(offset) == 3
assert scale > 0
assert len(axes) == 3
x_index = axes.find('x')
y_index = axes.find('y')
z_index = axes.find('z')
assert x_index >= 0 and x_index < 3
assert y_index >= 0 and y_index < 3
assert z_index >= 0 and z_index < 3
assert x_index != y_index and x_index != z_index and y_index != z_index
for obj in bpy.context.scene.objects:
# obj.name contains the group name of a group of faces, see http://paulbourke.net/dataformats/obj/
# every mesh is of type 'MESH', this works not only for ShapeNet but also for 'simple'
# obj files
if obj.type == 'MESH' and not 'BRC' in obj.name:
# change color
# this is based on https://stackoverflow.com/questions/4644650/blender-how-do-i-add-a-color-to-an-object
# but needed changing a lot of attributes according to documentation
obj.data.materials.append(material)
for vertex in obj.data.vertices:
# make a copy, otherwise axes switching does not work
vertex_copy = (vertex.co[0], vertex.co[1], vertex.co[2])
vertex.co[0] = vertex_copy[x_index]
vertex.co[1] = vertex_copy[y_index]
vertex.co[2] = vertex_copy[z_index]
vertex.co[0] = vertex.co[0] * scale + offset[0]
vertex.co[1] = vertex.co[1] * scale + offset[1]
vertex.co[2] = vertex.co[2] * scale + offset[2]
obj.name = 'BRC_' + obj.name
def load_off(off_file, material, offset=(0, 0, 0), scale=1, axes='xyz'):
bpy.ops.import_mesh.off(filepath=off_file)
assert len(offset) == 3
assert scale > 0
assert len(axes) == 3
x_index = axes.find('x')
y_index = axes.find('y')
z_index = axes.find('z')
assert x_index >= 0 and x_index < 3
assert y_index >= 0 and y_index < 3
assert z_index >= 0 and z_index < 3
assert x_index != y_index and x_index != z_index and y_index != z_index
for obj in bpy.context.scene.objects:
# obj.name contains the group name of a group of faces, see http://paulbourke.net/dataformats/obj/
# every mesh is of type 'MESH', this works not only for ShapeNet but also for 'simple'
# obj files
if obj.type == 'MESH' and not 'BRC' in obj.name:
# change color
# this is based on https://stackoverflow.com/questions/4644650/blender-how-do-i-add-a-color-to-an-object
# but needed changing a lot of attributes according to documentation
obj.data.materials.append(material)
for vertex in obj.data.vertices:
# make a copy, otherwise axes switching does not work
vertex_copy = (vertex.co[0], vertex.co[1], vertex.co[2])
vertex.co[0] = vertex_copy[x_index]
vertex.co[1] = vertex_copy[y_index]
vertex.co[2] = vertex_copy[z_index]
vertex.co[0] = vertex.co[0] * scale + offset[0]
vertex.co[1] = vertex.co[1] * scale + offset[1]
vertex.co[2] = vertex.co[2] * scale + offset[2]
obj.name = 'BRC_' + obj.name
def load_txt(txt_file, radius, material, offset=(0, 0, 0), scale=1, axes='xyz'):
global sphere_base_mesh
assert len(offset) == 3
assert scale > 0
assert len(axes) == 3
x_index = axes.find('x')
y_index = axes.find('y')
z_index = axes.find('z')
assert x_index >= 0 and x_index < 3
assert y_index >= 0 and y_index < 3
assert z_index >= 0 and z_index < 3
assert x_index != y_index and x_index != z_index and y_index != z_index
voxel_file = open(txt_file, 'r')
voxel_lines = voxel_file.readlines()
voxel_file.close()
mesh = bmesh.new()
for line in voxel_lines:
vals = line.split(' ')
if not line.startswith('#') and line.strip() != '' and len(vals) >= 3:
location = (
float(vals[x_index]) * scale + offset[0],
float(vals[y_index]) * scale + offset[1],
float(vals[z_index]) * scale + offset[2]
)
m = sphere_base_mesh.copy()
for vertex in m.vertices:
vertex.co[0] = vertex.co[0] * radius + location[0]
vertex.co[1] = vertex.co[1] * radius + location[1]
vertex.co[2] = vertex.co[2] * radius + location[2]
mesh.from_mesh(m)
mesh2 = bpy.data.meshes.new('Mesh')
mesh.to_mesh(mesh2)
obj = bpy.data.objects.new('BRC_Point_Cloud', mesh2)
obj.data.materials.append(material)
obj.active_material_index = 0
obj.active_material = material
bpy.context.scene.objects.link(obj)
def load_binvox(binvox_file, radius, material, offset, scale, axes):
global cube_base_mesh
assert len(offset) == 3
assert len(scale) == 3
assert len(axes) == 3
x_index = axes.find("x")
y_index = axes.find("y")
z_index = axes.find("z")
assert x_index >= 0 and x_index < 3
assert y_index >= 0 and y_index < 3
assert z_index >= 0 and z_index < 3
assert x_index != y_index and x_index != z_index and y_index != z_index
with open(binvox_file, 'rb') as f:
model = binvox_rw.read_as_3d_array(f)
points = np.where(model.data)
locations = np.zeros((points[0].shape[0], 3), dtype=float)
locations[:, 0] = (points[x_index][:] + 0.5) / model.data.shape[x_index]
locations[:, 1] = (points[y_index][:] + 0.5) / model.data.shape[y_index]
locations[:, 2] = (points[z_index][:] + 0.5) / model.data.shape[z_index]
locations[:, 0] -= 0.5
locations[:, 1] -= 0.5
locations[:, 2] -= 0.5
locations[:, 0] = locations[:, 0] * scale[0] + offset[0]
locations[:, 1] = locations[:, 1] * scale[1] + offset[1]
locations[:, 2] = locations[:, 2] * scale[2] + offset[2]
mesh = bmesh.new()
for i in range(locations.shape[0]):
m = cube_base_mesh.copy()
for vertex in m.vertices:
vertex.co[0] = vertex.co[0] * radius + locations[i, 0]
vertex.co[1] = vertex.co[1] * radius + locations[i, 1]
vertex.co[2] = vertex.co[2] * radius + locations[i, 2]
mesh.from_mesh(m)
mesh2 = bpy.data.meshes.new('Mesh')
mesh.to_mesh(mesh2)
obj = bpy.data.objects.new('BRC_Occupancy', mesh2)
obj.data.materials.append(material)
obj.active_material_index = 0
obj.active_material = material
bpy.context.scene.objects.link(obj)
def load_volume(volume, radius, material, offset, scale, axes):
global cube_base_mesh
assert len(offset) == 3
assert len(scale) == 3
assert len(axes) == 3
x_index = axes.find("x")
y_index = axes.find("y")
z_index = axes.find("z")
assert x_index >= 0 and x_index < 3
assert y_index >= 0 and y_index < 3
assert z_index >= 0 and z_index < 3
assert x_index != y_index and x_index != z_index and y_index != z_index
points = np.where(volume > 0)
locations = np.zeros((points[0].shape[0], 3), dtype=float)
locations[:, 0] = (points[x_index][:] + 0.5) / volume.shape[x_index]
locations[:, 1] = (points[y_index][:] + 0.5) / volume.shape[y_index]
locations[:, 2] = (points[z_index][:] + 0.5) / volume.shape[z_index]
locations[:, 0] -= 0.5
locations[:, 1] -= 0.5
locations[:, 2] -= 0.5
locations[:, 0] = locations[:, 0] * scale[0] + offset[0]
locations[:, 1] = locations[:, 1] * scale[1] + offset[1]
locations[:, 2] = locations[:, 2] * scale[2] + offset[2]
mesh = bmesh.new()
for i in range(locations.shape[0]):
m = cube_base_mesh.copy()
for vertex in m.vertices:
vertex.co[0] = vertex.co[0] * radius + locations[i, 0]
vertex.co[1] = vertex.co[1] * radius + locations[i, 1]
vertex.co[2] = vertex.co[2] * radius + locations[i, 2]
mesh.from_mesh(m)
mesh2 = bpy.data.meshes.new('Mesh')
mesh.to_mesh(mesh2)
obj = bpy.data.objects.new('BRC_Occupancy', mesh2)
obj.data.materials.append(material)
obj.active_material_index = 0
obj.active_material = material
bpy.context.scene.objects.link(obj)
def render(camera_target, output_file, rotation, distance):
bpy.context.scene.render.filepath = output_file
camera_target.rotation_euler[0] = math.radians(rotation[0])
camera_target.rotation_euler[1] = math.radians(rotation[1])
camera_target.rotation_euler[2] = math.radians(rotation[2])
cam = bpy.context.scene.objects['Camera']
cam.location = (0, 3.0 * distance, 1.0 * distance)
bpy.ops.render.render(animation=False, write_still=True)
|
the-stack_0_11926 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
from .externals.six import string_types
import os
import copy
from math import ceil
import numpy as np
from scipy import linalg, sparse
from scipy.sparse import csr_matrix, coo_matrix
import warnings
from .filter import resample
from .fiff.evoked import _get_peak
from .parallel import parallel_func
from .surface import (read_surface, _get_ico_surface, read_morph_map,
_compute_nearest)
from .utils import (get_subjects_dir, _check_subject,
_check_pandas_index_arguments, _check_pandas_installed,
logger, verbose)
from .viz import plot_source_estimates
from .fixes import in1d
from .externals.six.moves import zip
def _read_stc(filename):
""" Aux Function
"""
fid = open(filename, 'rb')
stc = dict()
fid.seek(0, 2) # go to end of file
file_length = fid.tell()
fid.seek(0, 0) # go to beginning of file
# read tmin in ms
stc['tmin'] = float(np.fromfile(fid, dtype=">f4", count=1))
stc['tmin'] /= 1000.0
# read sampling rate in ms
stc['tstep'] = float(np.fromfile(fid, dtype=">f4", count=1))
stc['tstep'] /= 1000.0
# read number of vertices/sources
vertices_n = int(np.fromfile(fid, dtype=">u4", count=1))
# read the source vector
stc['vertices'] = np.fromfile(fid, dtype=">u4", count=vertices_n)
# read the number of timepts
data_n = int(np.fromfile(fid, dtype=">u4", count=1))
if (vertices_n and # vertices_n can be 0 (empty stc)
((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n * data_n)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
# close the file
fid.close()
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tostring())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tostring())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tostring())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tostring())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tostring())
# close the file
fid.close()
def _read_3(fid):
""" Read 3 byte integer from file
"""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file and return as dict
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
""" Write 3 byte integer to file
"""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tostring())
def _write_w(filename, vertices, data):
"""Read a w file
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert(len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tostring())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
#XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tostring())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a soure estimate object
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VolSourceEstimate
The soure estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
"""
fname_arg = fname
# make sure corresponding file(s) can be found
ftype = None
if os.path.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
if ftype is not 'volume':
stc_exist = [os.path.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [os.path.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
if all(stc_exist) and (ftype is not 'w'):
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any([np.any(np.diff(v.astype(int)) <= 0) for v in vertices]):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
kwargs['subject'] = subject
if ftype == 'volume':
stc = VolSourceEstimate(**kwargs)
else:
stc = SourceEstimate(**kwargs)
return stc
def _make_stc(data, vertices, tmin=None, tstep=None, subject=None):
"""Helper function to generate either a surface or volume source estimate
"""
if isinstance(vertices, list) and len(vertices) == 2:
# make a surface source estimate
stc = SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep,
subject=subject)
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list)\
and len(vertices) == 1:
stc = VolSourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
else:
raise ValueError('vertices has to be either a list with one or two '
'arrays or an array')
return stc
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations"""
compat = False
if len(a.vertno) == len(b.vertno):
if all([np.array_equal(av, vv) for av, vv in zip(a.vertno, b.vertno)]):
compat = True
if not compat:
raise ValueError('Cannot combine SourceEstimates that do not have the '
'same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, "%s" and "%s"' % (a.name, b.name))
class _BaseSourceEstimate(object):
"""Abstract base class for source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : array | list of two arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array or list of arrays of shape (n_dipoles,)
The indices of the dipoles in the different source spaces. Can
be an array if there is only one source space (e.g., for volumes).
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel and sens_data have invalid '
'dimensions')
if isinstance(vertices, list):
if not (len(vertices) == 2 or len(vertices) == 1) or \
not all([isinstance(v, np.ndarray) for v in vertices]):
raise ValueError('Vertices, if a list, must contain one or '
'two numpy arrays')
if any([np.any(np.diff(v.astype(int)) <= 0) for v in vertices]):
raise ValueError('Vertices must be ordered in increasing '
'order.')
n_src = sum([len(v) for v in vertices])
if len(vertices) == 1:
vertices = vertices[0]
elif isinstance(vertices, np.ndarray):
n_src = len(vertices)
else:
raise ValueError('Vertices must be a list or numpy array')
# safeguard the user against doing something silly
if data is not None and data.shape[0] != n_src:
raise ValueError('Number of vertices (%i) and stc.shape[0] (%i) '
'must match' % (n_src, data.shape[0]))
self._data = data
self.tmin = tmin
self.tstep = tstep
self.vertno = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self.times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data
"""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
def crop(self, tmin=None, tmax=None):
"""Restrict SourceEstimate to a time interval
Parameters
----------
tmin : float or None
The first time point in seconds. If None the first present is used.
tmax : float or None
The last time point in seconds. If None the last present is used.
"""
mask = np.ones(len(self.times), dtype=np.bool)
if tmax is not None:
mask = mask & (self.times <= tmax)
if tmin is not None:
mask = mask & (self.times >= tmin)
self.tmin = tmin
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[:, mask]
else:
self._data = self._data[:, mask]
self._update_times()
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
verbose=None):
"""Resample data
Parameters
----------
sfreq : float
New sample rate to use.
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
self._data = resample(self._data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
self._update_times()
@property
def data(self):
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@property
def shape(self):
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep"""
self.times = self.tmin + (self.tstep * np.arange(self.shape[1]))
def __add__(self, a):
stc = copy.deepcopy(self)
stc += a
return stc
def __iadd__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data += a.data
else:
self._data += a
return self
def mean(self):
"""Make a summary stc file with mean power between tmin and tmax.
Returns
-------
stc : instance of SourceEstimate
The modified stc (note: method operates inplace).
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
mean_stc = SourceEstimate(self.data.mean(axis=1)[:, np.newaxis],
vertices=self.vertno, tmin=tmin,
tstep=tstep, subject=self.subject)
return mean_stc
def __sub__(self, a):
stc = copy.deepcopy(self)
stc -= a
return stc
def __isub__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data -= a.data
else:
self._data -= a
return self
def __truediv__(self, a):
return self.__div__(a)
def __div__(self, a):
stc = copy.deepcopy(self)
stc /= a
return stc
def __itruediv__(self, a):
return self.__idiv__(a)
def __idiv__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data /= a.data
else:
self._data /= a
return self
def __mul__(self, a):
stc = copy.deepcopy(self)
stc *= a
return stc
def __imul__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data *= a.data
else:
self._data *= a
return self
def __pow__(self, a):
stc = copy.deepcopy(self)
stc **= a
return stc
def __ipow__(self, a):
self._remove_kernel_sens_data_()
self._data **= a
return self
def __radd__(self, a):
return self + a
def __rsub__(self, a):
return self - a
def __rmul__(self, a):
return self * a
def __rdiv__(self, a):
return self / a
def __neg__(self):
stc = copy.deepcopy(self)
stc._remove_kernel_sens_data_()
stc._data *= -1
return stc
def __pos__(self):
return self
def sqrt(self):
"""Return copy of SourceEstimate with sqrt(data)."""
return self ** (0.5)
def copy(self):
"""Return copy of SourceEstimate instance"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Returns a SourceEstimate object with data summarized over time bins
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
Returns
-------
stc : instance of SourceEstimate
The binned SourceEstimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nv, _ = self.shape
nt = len(times) - 1
data = np.empty((nv, nt), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[:, i] = func(self.data[:, idx], axis=1)
tmin = times[0] + width / 2.
stc = _make_stc(data, vertices=self.vertno,
tmin=tmin, tstep=width, subject=self.subject)
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied
The transorm is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
mne.fixes.partial). The first parameter of the function is the
input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
.. note::
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warnings.warn('Performance can be improved by not accessing '
'the data attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
mne.fixes.partial). The first parameter of the function is the
input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter (see below) must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : instance of SourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
this automatically (if possible).
"""
# min and max data indices to include
times = np.round(1000 * self.times)
if tmin is None:
tmin_idx = None
else:
tmin = float(tmin)
tmin_idx = np.where(times >= tmin)[0][0]
if tmax is None:
tmax_idx = None
else:
tmax = float(tmax)
tmax_idx = np.where(times <= tmax)[0][-1]
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmax_idx = -1 if tmax_idx is None else tmax_idx
tmin = self.times[tmin_idx]
times = np.arange(self.times[tmin_idx],
self.times[tmax_idx] + self.tstep / 2, self.tstep)
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs._data, stcs.vertno = data_t, verts
stcs.tmin, stcs.times = tmin, times
return stcs
def as_data_frame(self, index=None, scale_time=1e3, copy=True):
"""Represent source estimates as Pandas DataFrame
Export source estimates in tabular structure with vertices as columns
and two additional info columns 'subject' and 'time'.
This function is useful to visualize and analyse source time courses
with external statistical software such as statsmodels or R.
Parameters
----------
index : tuple of str | None
Column to be used as index for the data. Valid string options
are 'subject' and 'time'. If None, both info
columns will be included in the table as categorial data.
If stc.subject is None, only time will be included.
scale_time : float
Scaling to be applied to time units.
copy : bool
If true, data will be copied. Else data may be modified in place.
Returns
-------
df : instance of DataFrame
Source estimates exported into tabular data structure.
"""
pd = _check_pandas_installed()
default_index = ['subject', 'time']
if index is not None:
_check_pandas_index_arguments(index, default_index)
else:
index = default_index
if self.subject is None:
index.remove('subject')
data = self.data.T
shape = data.shape
mindex = list()
mindex.append(('time', self.times * scale_time))
mindex.append(('subject', np.repeat(self.subject, shape[0])))
if copy:
data = data.copy()
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
if isinstance(self.vertno, list):
# surface source estimates
v_names = [i for e in [['%s %i' % ('LH' if ii < 1 else 'RH', vert)
for vert in vertno]
for ii, vertno in enumerate(self.vertno)] for i in e]
else:
# volume source estimates
v_names = ['VOL %d' % vert for vert in self.vertno]
df = pd.DataFrame(data, columns=v_names)
[df.insert(i, k, v) for i, (k, v) in enumerate(mindex)]
if index is not None:
if 'time' in index:
df['time'] = df['time'].astype(np.int64)
with warnings.catch_warnings(record=True):
df.set_index(index, inplace=True)
return df
class SourceEstimate(_BaseSourceEstimate):
"""Container for surface source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : list of two arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertno : list of two arrays of shape (n_dipoles,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
if not (isinstance(vertices, list) and len(vertices) == 2):
raise ValueError('Vertices, if a list, must contain two '
'numpy arrays')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file
Parameters
----------
fname : string
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : string
File format to use. Allowed values are "stc" (default) and "w".
The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
if ftype not in ['stc', 'w']:
raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype)
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
logger.info('[done]')
def __repr__(self):
if isinstance(self.vertno, list):
nv = sum([len(v) for v in self.vertno])
else:
nv = self.vertno.size
s = "%d vertices" % nv
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data size : %s x %s" % self.shape
return "<SourceEstimate | %s>" % s
@property
def lh_data(self):
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
return self.vertno[0]
@property
def rh_vertno(self):
return self.vertno[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertno[0]
else:
stc_vertices = self.vertno[1]
# find index of the Label's vertices
idx = np.nonzero(in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertno[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Returns a SourceEstimate object restricted to a label
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
"""
# make sure label and stc are compatible
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([])]
elif label.hemi == 'rh':
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([]), rh_vert]
else:
raise TypeError("Expected Label or BiHemiLabel; got %r" % label)
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = SourceEstimate(values, vertices=vertices,
tmin=self.tmin, tstep=self.tstep,
subject=self.subject)
return label_stc
def expand(self, vertno):
"""Expand SourceEstimate to include more vertices
This will add rows to stc.data (zero-filled) and modify stc.vertno
to include all vertices in stc.vertno and the input vertno.
Parameters
----------
vertno : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : instance of SourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertno, list):
raise TypeError('vertno must be a list')
if not len(self.vertno) == len(vertno):
raise ValueError('vertno must have the same length as stc.vertno')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertno, vertno)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertno[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds), self._data.shape[1]))
self._data = np.insert(self._data, inds, new_data, axis=0)
return self
@verbose
def extract_label_time_course(self, labels, src, mode='mean_flip',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Valid values for mode are:
'mean': Average within each label.
'mean_flip': Average within each label with sign flip depending on
source orientation.
'pca_flip': Apply an SVD to the time courses within each label and use
the scaled and sign-flipped first right-singular vector as the label
time course. The scaling is performed such that the power of the label
time course is the same as the average per-vertex time course power
within the label. The sign of the resulting time course is adjusted by
multiplying it with "sign(dot(u, flip))" where u is the first
left-singular vector, and flip is a sing-flip vector based on the
vertex normals. This procedure assures that the phase does not
randomly change by 180 degrees from one stc to the next.
See also mne.extract_label_time_course to extract time courses for a
list of SourceEstimate more efficiently.
Parameters
----------
labels : Label | list of Label
The labels for which to extract the time courses.
src : list
Source spaces for left and right hemisphere.
mode : str
Extraction mode, see explanation above.
allow_empty : bool
Instead of emitting an error, return all-zero time course for
labels that do not have any vertices in the source estimate.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
label_tc : array, shape=(len(labels), n_times)
Extracted time course for each label.
"""
label_tc = extract_label_time_course(self, labels, src, mode=mode,
return_generator=False,
allow_empty=allow_empty,
verbose=verbose)
return label_tc
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None):
"""Return the vertex on a given surface that is at the center of mass
of the activity in stc. Note that all activity must occur in a single
hemisphere, otherwise an error is returned. The "mass" of each point in
space for computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in computing the
temporal center of mass. This is useful for quantifying spatio-temporal
cluster locations, especially when combined with the function
mne.source_space.vertex_to_mni().
Parameters
----------
subject : string | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool, or array of int
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. For most accuruate estimates, do not
restrict vertices.
subjects_dir : str, or None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
References:
Used in Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
subject = _check_subject(self.subject, subject)
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertno[0])),
np.arange(len(self.vertno[1])) + len(self.vertno[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
if not hemi in [0, 1]:
raise ValueError('hemi must be 0 or 1')
subjects_dir = get_subjects_dir(subjects_dir)
values = values[vert_inds[hemi]]
hemis = ['lh', 'rh']
surf = os.path.join(subjects_dir, subject, 'surf',
hemis[hemi] + '.sphere')
if isinstance(surf, string_types): # read in surface
surf = read_surface(surf)
if restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif restrict_vertices is True:
restrict_vertices = self.vertno[hemi]
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
pos = surf[0][self.vertno[hemi], :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
# Find the vertex closest to the COM
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='hot', time_label='time=%0.2f ms',
smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
transparent=True, alpha=1.0, time_viewer=False,
config_opts={}, subjects_dir=None, figure=None,
views='lat', colorbar=True):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str
The type of colormap to use.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing.
fmin : float
The minimum value to display.
fmid : float
The middle value on the colormap.
fmax : float
The maximum value for the colormap.
transparent : bool
If True, use a linear transparency between fmin and fmid.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the FreeSurfer subjects reconstructions.
It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | None
If None, the last figure will be cleaned and a new figure will
be created.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
brain = plot_source_estimates(self, subject, surface=surface,
hemi=hemi, colormap=colormap, time_label=time_label,
smoothing_steps=smoothing_steps, fmin=fmin, fmid=fmid,
fmax=fmax, transparent=transparent, alpha=alpha,
time_viewer=time_viewer, config_opts=config_opts,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar)
return brain
@verbose
def morph(self, subject_to, grade=5, smooth=None,
subjects_dir=None, buffer_size=64, n_jobs=1, subject_from=None,
verbose=None):
"""Morph a source estimate from one subject to another
Parameters
----------
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
stc_from : SourceEstimate
Source estimates for subject "from" to morph
grade : int, list (of two arrays), or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
buffer_size : int
Morph data in chunks of `buffer_size` time instants.
Saves memory when morphing long time intervals.
n_jobs : int
Number of jobs to run in parallel.
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
If None, self.subject will be used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
subject_from = _check_subject(self.subject, subject_from)
return morph_data(subject_from, subject_to, self, grade, smooth,
subjects_dir, buffer_size, n_jobs, verbose)
def morph_precomputed(self, subject_to, vertices_to, morph_mat,
subject_from=None):
"""Morph source estimate between subjects using a precomputed matrix
Parameters
----------
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
vertices_to : list of array of int
The vertices on the destination subject's brain.
morph_mat : sparse matrix
The morphing matrix, usually from compute_morph_matrix.
subject_from : string | None
Name of the original subject as named in the SUBJECTS_DIR.
If None, self.subject will be used.
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
subject_from = _check_subject(self.subject, subject_from)
return morph_data_precomputed(subject_from, subject_to, self,
vertices_to, morph_mat)
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
whether to return the vertex index instead of of its ID.
Defaults to False.
time_as_index : bool
Whether to return the time index instead of the latency.
Defaults to False.
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]
vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,
None: np.concatenate(self.vertno)}[hemi]
vert_idx, time_idx = _get_peak(data, self.times, tmin, tmax, mode)
return (vert_idx if vert_as_index else vertno[vert_idx],
time_idx if time_as_index else self.times[time_idx])
class VolSourceEstimate(_BaseSourceEstimate):
"""Container for volume source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : array
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertno : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
if not (isinstance(vertices, np.ndarray) or isinstance(vertices, list)
and len(vertices) == 1):
raise ValueError('Vertices must be a numpy array or a list with '
'one array')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file
Parameters
----------
fname : string
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : string
File format to use. Allowed values are "stc" (default) and "w".
The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
if ftype not in ['stc', 'w']:
raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype)
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc')
or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertno, data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w')
or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertno, data=self.data)
logger.info('[done]')
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a nifti file
Parameters
----------
fname : string
The name of the generated nifti file.
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
"""
save_stc_as_volume(fname, self, src, dest=dest,
mri_resolution=mri_resolution)
def as_volume(self, src, dest='mri', mri_resolution=False):
"""Export volume source estimate as a nifti object
Parameters
----------
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
"""
return save_stc_as_volume(None, self, src, dest=dest,
mri_resolution=mri_resolution)
def __repr__(self):
if isinstance(self.vertno, list):
nv = sum([len(v) for v in self.vertno])
else:
nv = self.vertno.size
s = "%d vertices" % nv
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data size : %s x %s" % self.shape
return "<VolSourceEstimate | %s>" % s
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
whether to return the vertex index instead of of its ID.
Defaults to False.
time_as_index : bool
Whether to return the time index instead of the latency.
Defaults to False.
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
vert_idx, time_idx = _get_peak(self.data, self.times, tmin, tmax,
mode)
return (vert_idx if vert_as_index else self.vertno[vert_idx],
time_idx if time_as_index else self.times[time_idx])
###############################################################################
# Morphing
def mesh_edges(tris):
"""Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
tris : array of shape [n_triangles x 3]
The triangles.
Returns
-------
edges : sparse matrix
The adjacency matrix.
"""
npoints = np.max(tris) + 1
ones_ntris = np.ones(3 * len(tris))
a, b, c = tris.T
x = np.concatenate((a, b, c))
y = np.concatenate((b, c, a))
edges = coo_matrix((ones_ntris, (x, y)), shape=(npoints, npoints))
edges = edges.tocsr()
edges = edges + edges.T
return edges
def mesh_dist(tris, vert):
"""Compute adjacency matrix weighted by distances
It generates an adjacency matrix where the entries are the distances
between neighboring vertices.
Parameters
----------
tris : array (n_tris x 3)
Mesh triangulation
vert : array (n_vert x 3)
Vertex locations
Returns
-------
dist_matrix : scipy.sparse.csr_matrix
Sparse matrix with distances between adjacent vertices
"""
edges = mesh_edges(tris).tocoo()
# Euclidean distances between neighboring vertices
dist = np.sqrt(np.sum((vert[edges.row, :] - vert[edges.col, :]) ** 2,
axis=1))
dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape)
return dist_matrix
@verbose
def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
verbose=None):
"""Morph data from one subject's source space to another
Parameters
----------
data : array, or csr sparse matrix
A n_vertices x n_times (or other dimension) dataset to morph.
idx_use : array of int
Vertices from the original subject's data.
e : sparse matrix
The mesh edges of the "from" subject.
smooth : int
Number of smoothing iterations to perform. A hard limit of 100 is
also imposed.
n_vertices : int
Number of vertices.
nearest : array of int
Vertices on the destination surface to use.
maps : sparse matrix
Morph map from one subject to the other.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data_morphed : array, or csr sparse matrix
The morphed data (same type as input).
"""
n_iter = 99 # max nb of smoothing iterations (minus one)
if smooth is not None:
if smooth <= 0:
raise ValueError('The number of smoothing operations ("smooth") '
'has to be at least 1.')
smooth -= 1
# make sure we're in CSR format
e = e.tocsr()
if sparse.issparse(data):
use_sparse = True
if not isinstance(data, sparse.csr_matrix):
data = data.tocsr()
else:
use_sparse = False
done = False
# do the smoothing
for k in range(n_iter + 1):
# get the row sum
mult = np.zeros(e.shape[1])
mult[idx_use] = 1
idx_use_data = idx_use
data_sum = e * mult
# new indices are non-zero sums
idx_use = np.where(data_sum)[0]
# typically want to make the next iteration have these indices
idx_out = idx_use
# figure out if this is the last iteration
if smooth is None:
if k == n_iter or len(idx_use) >= n_vertices:
# stop when vertices filled
idx_out = None
done = True
elif k == smooth:
idx_out = None
done = True
# do standard smoothing multiplication
data = _morph_mult(data, e, use_sparse, idx_use_data, idx_out)
if done is True:
break
# do standard normalization
if use_sparse:
data.data /= data_sum[idx_use].repeat(np.diff(data.indptr))
else:
data /= data_sum[idx_use][:, None]
# do special normalization for last iteration
if use_sparse:
data_sum[data_sum == 0] = 1
data.data /= data_sum.repeat(np.diff(data.indptr))
else:
data[idx_use, :] /= data_sum[idx_use][:, None]
logger.info(' %d smooth iterations done.' % (k + 1))
data_morphed = maps[nearest, :] * data
return data_morphed
def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None):
"""Helper for morphing
Equivalent to "data = (e[:, idx_use_data] * data)[idx_use_out]"
but faster.
"""
if len(idx_use_data) < e.shape[1]:
if use_sparse:
data = e[:, idx_use_data] * data
else:
# constructing a new sparse matrix is faster than sub-indexing
# e[:, idx_use_data]!
col, row = np.meshgrid(np.arange(data.shape[1]), idx_use_data)
d_sparse = sparse.csr_matrix((data.ravel(),
(row.ravel(), col.ravel())),
shape=(e.shape[1], data.shape[1]))
data = e * d_sparse
data = np.asarray(data.todense())
else:
data = e * data
# trim data
if idx_use_out is not None:
data = data[idx_use_out]
return data
def _get_subject_sphere_tris(subject, subjects_dir):
spheres = [os.path.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
tris = [read_surface(s)[1] for s in spheres]
return tris
@verbose
def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
subjects_dir=None, buffer_size=64, n_jobs=1, verbose=None):
"""Morph a source estimate from one subject to another
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
stc_from : SourceEstimate
Source estimates for subject "from" to morph
grade : int, list (of two arrays), or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
buffer_size : int
Morph data in chunks of `buffer_size` time instants.
Saves memory when morphing long time intervals.
n_jobs : int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
if not isinstance(stc_from, SourceEstimate):
raise ValueError('Morphing is only possible with surface source '
'estimates')
logger.info('Morphing data...')
subjects_dir = get_subjects_dir(subjects_dir)
nearest = grade_to_vertices(subject_to, grade, subjects_dir, n_jobs)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir)
# morph the data
data = [stc_from.lh_data, stc_from.rh_data]
data_morphed = [None, None]
n_chunks = ceil(stc_from.data.shape[1] / float(buffer_size))
parallel, my_morph_buffer, _ = parallel_func(_morph_buffer, n_jobs)
for hemi in [0, 1]:
e = mesh_edges(tris[hemi])
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = stc_from.vertno[hemi]
if len(idx_use) == 0:
continue
data_morphed[hemi] = np.concatenate(
parallel(my_morph_buffer(data_buffer, idx_use, e, smooth,
n_vertices, nearest[hemi], maps[hemi])
for data_buffer
in np.array_split(data[hemi], n_chunks, axis=1)), axis=1)
vertices = [nearest[0], nearest[1]]
if data_morphed[0] is None:
if data_morphed[1] is None:
data = np.r_[[], []]
vertices = [np.array([], dtype=int), np.array([], dtype=int)]
else:
data = data_morphed[1]
vertices = [np.array([], dtype=int), vertices[1]]
elif data_morphed[1] is None:
data = data_morphed[0]
vertices = [vertices[0], np.array([], dtype=int)]
else:
data = np.r_[data_morphed[0], data_morphed[1]]
stc_to = SourceEstimate(data, vertices, stc_from.tmin, stc_from.tstep,
subject=subject_to, verbose=stc_from.verbose)
logger.info('[done]')
return stc_to
@verbose
def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
smooth=None, subjects_dir=None, verbose=None):
"""Get a matrix that morphs data from one subject to another
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
vertices_from : list of arrays of int
Vertices for each hemisphere (LH, RH) for subject_from
vertices_to : list of arrays of int
Vertices for each hemisphere (LH, RH) for subject_to
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string
Path to SUBJECTS_DIR is not set in the environment
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
morph_matrix : sparse matrix
matrix that morphs data from subject_from to subject_to
"""
logger.info('Computing morph matrix...')
subjects_dir = get_subjects_dir(subjects_dir)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir)
morpher = [None] * 2
for hemi in [0, 1]:
e = mesh_edges(tris[hemi])
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices_from[hemi]
if len(idx_use) == 0:
morpher[hemi] = []
continue
m = sparse.eye(len(idx_use), len(idx_use), format='csr')
morpher[hemi] = _morph_buffer(m, idx_use, e, smooth, n_vertices,
vertices_to[hemi], maps[hemi])
# be careful about zero-length arrays
if isinstance(morpher[0], list):
morpher = morpher[1]
elif isinstance(morpher[1], list):
morpher = morpher[0]
else:
morpher = sparse_block_diag(morpher, format='csr')
logger.info('[done]')
return morpher
@verbose
def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1,
verbose=None):
"""Convert a grade to source space vertices for a given subject
Parameters
----------
subject : str
Name of the subject
grade : int
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment
n_jobs : int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
vertices : list of arrays of int
Vertex numbers for LH and RH
"""
# add special case for fsaverage for speed
if subject == 'fsaverage' and grade == 5:
return [np.arange(10242), np.arange(10242)]
subjects_dir = get_subjects_dir(subjects_dir)
spheres_to = [os.path.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
lhs, rhs = [read_surface(s)[0] for s in spheres_to]
if grade is not None: # fill a subset of vertices
if isinstance(grade, list):
if not len(grade) == 2:
raise ValueError('grade as a list must have two elements '
'(arrays of output vertices)')
vertices = grade
else:
# find which vertices to use in "to mesh"
ico = _get_ico_tris(grade, return_surf=True)
lhs /= np.sqrt(np.sum(lhs ** 2, axis=1))[:, None]
rhs /= np.sqrt(np.sum(rhs ** 2, axis=1))[:, None]
# Compute nearest vertices in high dim mesh
parallel, my_compute_nearest, _ = \
parallel_func(_compute_nearest, n_jobs)
lhs, rhs, rr = [a.astype(np.float32)
for a in [lhs, rhs, ico['rr']]]
vertices = parallel(my_compute_nearest(xhs, rr)
for xhs in [lhs, rhs])
# Make sure the vertices are ordered
vertices = [np.sort(verts) for verts in vertices]
else: # potentially fill the surface
vertices = [np.arange(lhs.shape[0]), np.arange(rhs.shape[0])]
return vertices
def morph_data_precomputed(subject_from, subject_to, stc_from, vertices_to,
morph_mat):
"""Morph source estimate between subjects using a precomputed matrix
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
stc_from : SourceEstimate
Source estimates for subject "from" to morph.
vertices_to : list of array of int
The vertices on the destination subject's brain.
morph_mat : sparse matrix
The morphing matrix, typically from compute_morph_matrix.
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
if not sparse.issparse(morph_mat):
raise ValueError('morph_mat must be a sparse matrix')
if not isinstance(vertices_to, list) or not len(vertices_to) == 2:
raise ValueError('vertices_to must be a list of length 2')
if not sum(len(v) for v in vertices_to) == morph_mat.shape[0]:
raise ValueError('number of vertices in vertices_to must match '
'morph_mat.shape[0]')
if not stc_from.data.shape[0] == morph_mat.shape[1]:
raise ValueError('stc_from.data.shape[0] must be the same as '
'morph_mat.shape[0]')
if stc_from.subject is not None and stc_from.subject != subject_from:
raise ValueError('stc_from.subject and subject_from must match')
data = morph_mat * stc_from.data
stc_to = SourceEstimate(data, vertices_to, stc_from.tmin, stc_from.tstep,
verbose=stc_from.verbose, subject=subject_to)
return stc_to
@verbose
def spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):
"""Compute connectivity for a source space activation over time
Parameters
----------
src : source space
The source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if dist is None:
if src[0]['use_tris'] is None:
raise Exception("The source space does not appear to be an ico "
"surface. Connectivity cannot be extracted from "
"non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
lh_tris = np.searchsorted(used_verts[0], src[0]['use_tris'])
rh_tris = np.searchsorted(used_verts[1], src[1]['use_tris'])
tris = np.concatenate((lh_tris, rh_tris + np.max(lh_tris) + 1))
connectivity = spatio_temporal_tris_connectivity(tris, n_times)
# deal with source space only using a subset of vertices
masks = [in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:
raise ValueError('Used vertices do not match connectivity shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warnings.warn('%0.1f%% of original source space vertices have been'
' omitted, tri-based connectivity will have holes.\n'
'Consider using distance-based connectivity or '
'morphing data to all source space vertices.'
% missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
connectivity = connectivity.tocsr()
connectivity = connectivity[masks]
connectivity = connectivity[:, masks]
# return to original format
connectivity = connectivity.tocoo()
return connectivity
else: # use distances computed and saved in the source space file
return spatio_temporal_dist_connectivity(src, n_times, dist)
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade
Parameters
----------
grade : int
Grade of an icosahedral mesh.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_connectivity.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_connectivity(tris, n_times, verbose=None):
"""Compute connectivity from triangles and time instants
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
edges = mesh_edges(tris).tocoo()
return _get_connectivity_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):
"""Compute connectivity from distances in a source space and time instants
Parameters
----------
src : source space
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained using MNE
with a call to mne_add_patch_info with the --dist option.
n_times : int
Number of time points
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using\n'
'mne_add_patch_info with --dist argument')
edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]
for s in src])
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_connectivity_from_edges(edges, n_times)
@verbose
def spatial_src_connectivity(src, dist=None, verbose=None):
"""Compute connectivity for a source space activation
Parameters
----------
src : source space
The source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_src_connectivity(src, 1, dist)
@verbose
def spatial_tris_connectivity(tris, verbose=None):
"""Compute connectivity from triangles
Parameters
----------
tris : array
N x 3 array defining triangles.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_connectivity(tris, 1)
def spatial_dist_connectivity(src, dist, verbose=None):
"""Compute connectivity from distances in a source space
Parameters
----------
src : source space
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained using MNE
with a call to mne_add_patch_info with the --dist option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_connectivity(src, 1, dist)
def sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
try:
return sparse.block_diag(mats, format=format, dtype=dtype)
except AttributeError:
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
@verbose
def _get_connectivity_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create connectivity matrix"""
n_vertices = edges.shape[0]
logger.info("-- number of connected vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None]
+ np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None]
+ np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int)
connectivity = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices, ) * 2)
return connectivity
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
def save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a nifti file
Parameters
----------
fname : string | None
The name of the generated nifti file. If None, the image is only
returned and not saved.
stc : instance of VolSourceEstimate
The source estimate
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
"""
if not isinstance(stc, VolSourceEstimate):
raise Exception('Only volume source estimates can be saved as '
'volumes')
n_times = stc.data.shape[1]
shape = src[0]['shape']
shape3d = (shape[2], shape[1], shape[0])
shape = (n_times, shape[2], shape[1], shape[0])
vol = np.zeros(shape)
mask3d = src[0]['inuse'].reshape(shape3d).astype(np.bool)
if mri_resolution:
mri_shape3d = (src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width'])
mri_shape = (n_times, src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width'])
mri_vol = np.zeros(mri_shape)
interpolator = src[0]['interpolator']
for k, v in enumerate(vol):
v[mask3d] = stc.data[:, k]
if mri_resolution:
mri_vol[k] = (interpolator * v.ravel()).reshape(mri_shape3d)
if mri_resolution:
vol = mri_vol
vol = vol.T
if mri_resolution:
affine = src[0]['vox_mri_t']['trans'].copy()
else:
affine = src[0]['src_mri_t']['trans'].copy()
if dest == 'mri':
affine = np.dot(src[0]['mri_ras_t']['trans'], affine)
affine[:3] *= 1e3
try:
import nibabel as nib # lazy import to avoid dependency
except ImportError:
raise ImportError("nibabel is required to save volume images.")
header = nib.nifti1.Nifti1Header()
header.set_xyzt_units('mm', 'msec')
header['pixdim'][4] = 1e3 * stc.tstep
img = nib.Nifti1Image(vol, affine, header=header)
if fname is not None:
nib.save(img, fname)
return img
def _get_label_flip(labels, label_vertidx, src):
"""Helper function to get sign-flip for labels"""
# do the import here to avoid circular dependency
from .label import label_sign_flip
# get the sign-flip vector for every label
label_flip = list()
for label, vertidx in zip(labels, label_vertidx):
if label.hemi == 'both':
raise ValueError('BiHemiLabel not supported when using sign-flip')
if vertidx is not None:
flip = label_sign_flip(label, src)[:, None]
else:
flip = None
label_flip.append(flip)
return label_flip
@verbose
def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
allow_empty=False, verbose=None):
"""Generator for extract_label_time_course"""
n_labels = len(labels)
# get vertno from source space, they have to be the same as in the stcs
vertno = [s['vertno'] for s in src]
nvert = [len(vn) for vn in vertno]
# do the initialization
label_vertidx = list()
for label in labels:
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertno = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertno)
elif slabel.hemi == 'rh':
this_vertno = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertno)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
if len(this_vertidx) == 0:
msg = ('source space does not contain any vertices for label %s'
% label.name)
if not allow_empty:
raise ValueError(msg)
else:
logger.warning(msg + '. Assigning all-zero time series to '
'label.')
this_vertidx = None # to later check if label is empty
label_vertidx.append(this_vertidx)
# mode-dependent initalization
if mode == 'mean':
pass # we have this here to catch invalid values for mode
elif mode == 'mean_flip':
# get the sign-flip vector for every label
label_flip = _get_label_flip(labels, label_vertidx, src)
elif mode == 'pca_flip':
# get the sign-flip vector for every label
label_flip = _get_label_flip(labels, label_vertidx, src)
else:
raise ValueError('%s is an invalid mode' % mode)
# loop through source estimates and extract time series
for stc in stcs:
# make sure the stc is compatible with the source space
if len(stc.vertno[0]) != nvert[0] or len(stc.vertno[1]) != nvert[1]:
raise ValueError('stc not compatible with source space')
if any([np.any(svn != vn) for svn, vn in zip(stc.vertno, vertno)]):
raise ValueError('stc not compatible with source space')
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels, stc.data.shape[1]),
dtype=stc.data.dtype)
if mode == 'mean':
for i, vertidx in enumerate(label_vertidx):
if vertidx is not None:
label_tc[i] = np.mean(stc.data[vertidx, :], axis=0)
elif mode == 'mean_flip':
for i, (vertidx, flip) in enumerate(zip(label_vertidx,
label_flip)):
if vertidx is not None:
label_tc[i] = np.mean(flip * stc.data[vertidx, :], axis=0)
elif mode == 'pca_flip':
for i, (vertidx, flip) in enumerate(zip(label_vertidx,
label_flip)):
if vertidx is not None:
U, s, V = linalg.svd(stc.data[vertidx, :],
full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = linalg.norm(s) / np.sqrt(len(vertidx))
label_tc[i] = sign * scale * V[0]
else:
raise ValueError('%s is an invalid mode' % mode)
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='mean_flip',
allow_empty=False, return_generator=False,
verbose=None):
"""Extract label time course for lists of labels and source estimates
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter.
Valid values for mode are:
'mean': Average within each label.
'mean_flip': Average within each label with sign flip depending on source
orientation.
'pca_flip': Apply an SVD to the time courses within each label and use the
scaled and sign-flipped first right-singular vector as the label time
course. The scaling is performed such that the power of the label time
course is the same as the average per-vertex time course power within
the label. The sign of the resulting time course is adjusted by multiplying
it with "sign(dot(u, flip))" where u is the first left-singular vector,
and flip is a sing-flip vector based on the vertex normals. This procedure
assures that the phase does not randomly change by 180 degrees from one
stc to the next.
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
labels : Label | list of Label
The labels for which to extract the time course.
src : list
Source spaces for left and right hemisphere.
mode : str
Extraction mode, see explanation above.
allow_empty : bool
Instead of emitting an error, return all-zero time courses for labels
that do not have any vertices in the source estimate.
return_generator : bool
If True, a generator instead of a list is returned.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
label_tc : array | list (or generator) of array,
shape=(len(labels), n_times)
Extracted time course for each label and source estimate.
"""
# convert inputs to lists
if isinstance(stcs, SourceEstimate):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
if not isinstance(labels, list):
labels = [labels]
label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,
allow_empty=allow_empty)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
|
the-stack_0_11927 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
TridentNet Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator
from tridentnet import add_tridentnet_config
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, cfg, True, output_folder)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_tridentnet_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
the-stack_0_11931 | #!/usr/bin/env python3
import argparse
import configparser
import logging
import logging.handlers
import os.path
import subprocess
import sys
import threading
import time
import traceback
from collections import Counter, defaultdict
from io import StringIO
# Global variables
config = None
email_log = None
def tee_log(infile, out_lines, log_level):
"""
Create a thread that saves all the output on infile to out_lines and
logs every line with log_level
"""
def tee_thread():
for line in iter(infile.readline, ""):
logging.log(log_level, line.rstrip())
out_lines.append(line)
infile.close()
t = threading.Thread(target=tee_thread)
t.daemon = True
t.start()
return t
def snapraid_command(command, args={}, *, allow_statuscodes=[]):
"""
Run snapraid command
Raises subprocess.CalledProcessError if errorlevel != 0
"""
arguments = ["--quiet"]
for (k, v) in args.items():
arguments.extend(["--" + k, str(v)])
p = subprocess.Popen(
[config["snapraid"]["executable"], command] + arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# Snapraid always outputs utf-8 on windows. On linux, utf-8
# also seems a sensible assumption.
encoding="utf-8",
errors="replace")
out = []
threads = [
tee_log(p.stdout, out, logging.OUTPUT),
tee_log(p.stderr, [], logging.OUTERR)]
for t in threads:
t.join()
ret = p.wait()
# sleep for a while to make pervent output mixup
time.sleep(0.3)
if ret == 0 or ret in allow_statuscodes:
return out
else:
raise subprocess.CalledProcessError(ret, "snapraid " + command)
def send_email(success):
import smtplib
from email.mime.text import MIMEText
from email import charset
if len(config["smtp"]["host"]) == 0:
logging.error("Failed to send email because smtp host is not set")
return
# use quoted-printable instead of the default base64
charset.add_charset("utf-8", charset.SHORTEST, charset.QP)
if success:
body = "SnapRAID job completed successfully:\n\n\n"
else:
body = "Error during SnapRAID job:\n\n\n"
log = email_log.getvalue()
maxsize = config['email'].get('maxsize', 500) * 1024
if maxsize and len(log) > maxsize:
cut_lines = log.count("\n", maxsize // 2, -maxsize // 2)
log = (
"NOTE: Log was too big for email and was shortened\n\n" +
log[:maxsize // 2] +
"[...]\n\n\n --- LOG WAS TOO BIG - {} LINES REMOVED --\n\n\n[...]".format(
cut_lines) +
log[-maxsize // 2:])
body += log
msg = MIMEText(body, "plain", "utf-8")
msg["Subject"] = config["email"]["subject"] + \
(" SUCCESS" if success else " ERROR")
msg["From"] = config["email"]["from"]
msg["To"] = config["email"]["to"]
smtp = {"host": config["smtp"]["host"]}
if config["smtp"]["port"]:
smtp["port"] = config["smtp"]["port"]
if config["smtp"]["ssl"]:
server = smtplib.SMTP_SSL(**smtp)
else:
server = smtplib.SMTP(**smtp)
if config["smtp"]["tls"]:
server.starttls()
if config["smtp"]["user"]:
server.login(config["smtp"]["user"], config["smtp"]["password"])
server.sendmail(
config["email"]["from"],
[config["email"]["to"]],
msg.as_string())
server.quit()
def finish(is_success):
if ("error", "success")[is_success] in config["email"]["sendon"]:
try:
send_email(is_success)
except Exception:
logging.exception("Failed to send email")
if is_success:
logging.info("Run finished successfully")
else:
logging.error("Run failed")
sys.exit(0 if is_success else 1)
def load_config(args):
global config
parser = configparser.RawConfigParser()
parser.read(args.conf)
sections = ["snapraid", "logging", "email", "smtp", "scrub"]
config = dict((x, defaultdict(lambda: "")) for x in sections)
for section in parser.sections():
for (k, v) in parser.items(section):
config[section][k] = v.strip()
int_options = [
("snapraid", "deletethreshold"), ("logging", "maxsize"),
("scrub", "older-than"), ("email", "maxsize"),
]
for section, option in int_options:
try:
config[section][option] = int(config[section][option])
except ValueError:
config[section][option] = 0
config["smtp"]["ssl"] = (config["smtp"]["ssl"].lower() == "true")
config["smtp"]["tls"] = (config["smtp"]["tls"].lower() == "true")
config["scrub"]["enabled"] = (config["scrub"]["enabled"].lower() == "true")
config["email"]["short"] = (config["email"]["short"].lower() == "true")
config["snapraid"]["touch"] = (config["snapraid"]["touch"].lower() == "true")
# Migration
if config["scrub"]["percentage"]:
config["scrub"]["plan"] = config["scrub"]["percentage"]
if args.scrub is not None:
config["scrub"]["enabled"] = args.scrub
if args.ignore_deletethreshold:
config["snapraid"]["deletethreshold"] = -1
def setup_logger():
log_format = logging.Formatter(
"%(asctime)s [%(levelname)-6.6s] %(message)s")
root_logger = logging.getLogger()
logging.OUTPUT = 15
logging.addLevelName(logging.OUTPUT, "OUTPUT")
logging.OUTERR = 25
logging.addLevelName(logging.OUTERR, "OUTERR")
root_logger.setLevel(logging.OUTPUT)
console_logger = logging.StreamHandler(sys.stdout)
console_logger.setFormatter(log_format)
root_logger.addHandler(console_logger)
if config["logging"]["file"]:
max_log_size = max(config["logging"]["maxsize"], 0) * 1024
file_logger = logging.handlers.RotatingFileHandler(
config["logging"]["file"],
maxBytes=max_log_size,
backupCount=9)
file_logger.setFormatter(log_format)
root_logger.addHandler(file_logger)
if config["email"]["sendon"]:
global email_log
email_log = StringIO()
email_logger = logging.StreamHandler(email_log)
email_logger.setFormatter(log_format)
if config["email"]["short"]:
# Don't send programm stdout in email
email_logger.setLevel(logging.INFO)
root_logger.addHandler(email_logger)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--conf",
default="snapraid-runner.conf",
metavar="CONFIG",
help="Configuration file (default: %(default)s)")
parser.add_argument("--no-scrub", action='store_false',
dest='scrub', default=None,
help="Do not scrub (overrides config)")
parser.add_argument("--ignore-deletethreshold", action='store_true',
help="Sync even if configured delete threshold is exceeded")
args = parser.parse_args()
if not os.path.exists(args.conf):
print("snapraid-runner configuration file not found")
parser.print_help()
sys.exit(2)
try:
load_config(args)
except Exception:
print("unexpected exception while loading config")
print(traceback.format_exc())
sys.exit(2)
try:
setup_logger()
except Exception:
print("unexpected exception while setting up logging")
print(traceback.format_exc())
sys.exit(2)
try:
run()
except Exception:
logging.exception("Run failed due to unexpected exception:")
finish(False)
def run():
logging.info("=" * 60)
logging.info("Run started")
logging.info("=" * 60)
if not os.path.isfile(config["snapraid"]["executable"]):
logging.error("The configured snapraid executable \"{}\" does not "
"exist or is not a file".format(
config["snapraid"]["executable"]))
finish(False)
if config["snapraid"]["touch"]:
logging.info("Running touch...")
snapraid_command("touch")
logging.info("*" * 60)
logging.info("Running diff...")
diff_out = snapraid_command("diff", allow_statuscodes=[2])
logging.info("*" * 60)
diff_results = Counter(line.split(" ")[0] for line in diff_out)
diff_results = dict((x, diff_results[x]) for x in
["add", "remove", "move", "update"])
logging.info(("Diff results: {add} added, {remove} removed, " +
"{move} moved, {update} modified").format(**diff_results))
if (config["snapraid"]["deletethreshold"] >= 0 and
diff_results["remove"] > config["snapraid"]["deletethreshold"]):
logging.error(
"Deleted files exceed delete threshold of {}, aborting".format(
config["snapraid"]["deletethreshold"]))
logging.error("Run again with --ignore-deletethreshold to sync anyways")
finish(False)
if (diff_results["remove"] + diff_results["add"] + diff_results["move"] +
diff_results["update"] == 0):
logging.info("No changes detected, no sync required")
else:
logging.info("Running sync...")
try:
snapraid_command("sync")
except subprocess.CalledProcessError as e:
logging.error(e)
finish(False)
logging.info("*" * 60)
if config["scrub"]["enabled"]:
logging.info("Running scrub...")
try:
# Check if a percentage plan was given
int(config["scrub"]["plan"])
except ValueError:
scrub_args = {"plan": config["scrub"]["plan"]}
else:
scrub_args = {
"plan": config["scrub"]["plan"],
"older-than": config["scrub"]["older-than"],
}
try:
snapraid_command("scrub", scrub_args)
except subprocess.CalledProcessError as e:
logging.error(e)
finish(False)
logging.info("*" * 60)
logging.info("All done")
finish(True)
main()
|
the-stack_0_11935 | #
# Copyright 2017 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
import math
import numpy as np
from pandas import isnull
from toolz import merge
from zipline.assets import Equity, Future
from zipline.errors import HistoryWindowStartsBeforeData
from zipline.finance.constants import ROOT_SYMBOL_TO_ETA, DEFAULT_ETA
from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta
from zipline.finance.transaction import create_transaction
from zipline.utils.cache import ExpiringCache
from zipline.utils.dummy import DummyMapping
from zipline.utils.input_validation import (
expect_bounded,
expect_strictly_bounded,
)
SELL = 1 << 0
BUY = 1 << 1
STOP = 1 << 2
LIMIT = 1 << 3
SQRT_252 = math.sqrt(252)
DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT = 0.025
DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT = 0.05
class LiquidityExceeded(Exception):
pass
def fill_price_worse_than_limit_price(fill_price, order):
"""
Checks whether the fill price is worse than the order's limit price.
Parameters
----------
fill_price: float
The price to check.
order: zipline.finance.order.Order
The order whose limit price to check.
Returns
-------
bool: Whether the fill price is above the limit price (for a buy) or below
the limit price (for a sell).
"""
if order.limit:
# this is tricky! if an order with a limit price has reached
# the limit price, we will try to fill the order. do not fill
# these shares if the impacted price is worse than the limit
# price. return early to avoid creating the transaction.
# buy order is worse if the impacted price is greater than
# the limit price. sell order is worse if the impacted price
# is less than the limit price
if (order.direction > 0 and fill_price > order.limit) or (
order.direction < 0 and fill_price < order.limit
):
return True
return False
class SlippageModel(metaclass=FinancialModelMeta):
"""
Abstract base class for slippage models.
Slippage models are responsible for the rates and prices at which orders
fill during a simulation.
To implement a new slippage model, create a subclass of
:class:`~zipline.finance.slippage.SlippageModel` and implement
:meth:`process_order`.
Methods
-------
process_order(data, order)
Attributes
----------
volume_for_bar : int
Number of shares that have already been filled for the
currently-filling asset in the current minute. This attribute is
maintained automatically by the base class. It can be used by
subclasses to keep track of the total amount filled if there are
multiple open orders for a single asset.
Notes
-----
Subclasses that define their own constructors should call
``super(<subclass name>, self).__init__()`` before performing other
initialization.
"""
# Asset types that are compatible with the given model.
allowed_asset_types = (Equity, Future)
def __init__(self):
self._volume_for_bar = 0
@property
def volume_for_bar(self):
return self._volume_for_bar
@abstractmethod
def process_order(self, data, order):
"""
Compute the number of shares and price to fill for ``order`` in the
current minute.
Parameters
----------
data : zipline.protocol.BarData
The data for the given bar.
order : zipline.finance.order.Order
The order to simulate.
Returns
-------
execution_price : float
The price of the fill.
execution_volume : int
The number of shares that should be filled. Must be between ``0``
and ``order.amount - order.filled``. If the amount filled is less
than the amount remaining, ``order`` will remain open and will be
passed again to this method in the next minute.
Raises
------
zipline.finance.slippage.LiquidityExceeded
May be raised if no more orders should be processed for the current
asset during the current bar.
Notes
-----
Before this method is called, :attr:`volume_for_bar` will be set to the
number of shares that have already been filled for ``order.asset`` in
the current minute.
:meth:`process_order` is not called by the base class on bars for which
there was no historical volume.
"""
raise NotImplementedError("process_order")
def simulate(self, data, asset, orders_for_asset):
self._volume_for_bar = 0
volume = data.current(asset, "volume")
if volume == 0:
return
# can use the close price, since we verified there's volume in this
# bar.
price = data.current(asset, "close")
# BEGIN
#
# Remove this block after fixing data to ensure volume always has
# corresponding price.
if isnull(price):
return
# END
dt = data.current_dt
for order in orders_for_asset:
if order.open_amount == 0:
continue
order.check_triggers(price, dt)
if not order.triggered:
continue
txn = None
try:
execution_price, execution_volume = self.process_order(
data, order
)
if execution_price is not None:
txn = create_transaction(
order,
data.current_dt,
execution_price,
execution_volume,
)
except LiquidityExceeded:
break
if txn:
self._volume_for_bar += abs(txn.amount)
yield order, txn
def asdict(self):
return self.__dict__
class NoSlippage(SlippageModel):
"""A slippage model where all orders fill immediately and completely at the
current close price.
Notes
-----
This is primarily used for testing.
"""
@staticmethod
def process_order(data, order):
return (
data.current(order.asset, "close"),
order.amount,
)
class EquitySlippageModel(SlippageModel, metaclass=AllowedAssetMarker):
"""
Base class for slippage models which only support equities.
"""
allowed_asset_types = (Equity,)
class FutureSlippageModel(SlippageModel, metaclass=AllowedAssetMarker):
"""
Base class for slippage models which only support futures.
"""
allowed_asset_types = (Future,)
class VolumeShareSlippage(SlippageModel):
"""
Model slippage as a quadratic function of percentage of historical volume.
Orders to buy will be filled at::
price * (1 + price_impact * (volume_share ** 2))
Orders to sell will be filled at::
price * (1 - price_impact * (volume_share ** 2))
where ``price`` is the close price for the bar, and ``volume_share`` is the
percentage of minutely volume filled, up to a max of ``volume_limit``.
Parameters
----------
volume_limit : float, optional
Maximum percent of historical volume that can fill in each bar. 0.5
means 50% of historical volume. 1.0 means 100%. Default is 0.025 (i.e.,
2.5%).
price_impact : float, optional
Scaling coefficient for price impact. Larger values will result in more
simulated price impact. Smaller values will result in less simulated
price impact. Default is 0.1.
"""
def __init__(
self,
volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT,
price_impact=0.1,
):
super(VolumeShareSlippage, self).__init__()
self.volume_limit = volume_limit
self.price_impact = price_impact
def __repr__(self):
return """
{class_name}(
volume_limit={volume_limit},
price_impact={price_impact})
""".strip().format(
class_name=self.__class__.__name__,
volume_limit=self.volume_limit,
price_impact=self.price_impact,
)
def process_order(self, data, order):
volume = data.current(order.asset, "volume")
max_volume = self.volume_limit * volume
# price impact accounts for the total volume of transactions
# created against the current minute bar
remaining_volume = max_volume - self.volume_for_bar
if remaining_volume < 1:
# we can't fill any more transactions
raise LiquidityExceeded()
# the current order amount will be the min of the
# volume available in the bar or the open amount.
cur_volume = int(min(remaining_volume, abs(order.open_amount)))
if cur_volume < 1:
return None, None
# tally the current amount into our total amount ordered.
# total amount will be used to calculate price impact
total_volume = self.volume_for_bar + cur_volume
volume_share = min(total_volume / volume, self.volume_limit)
price = data.current(order.asset, "close")
# BEGIN
#
# Remove this block after fixing data to ensure volume always has
# corresponding price.
if isnull(price):
return
# END
simulated_impact = (
volume_share ** 2
* math.copysign(self.price_impact, order.direction)
* price
)
impacted_price = price + simulated_impact
if fill_price_worse_than_limit_price(impacted_price, order):
return None, None
return (impacted_price, math.copysign(cur_volume, order.direction))
class FixedSlippage(SlippageModel):
"""
Simple model assuming a fixed-size spread for all assets.
Parameters
----------
spread : float, optional
Size of the assumed spread for all assets.
Orders to buy will be filled at ``close + (spread / 2)``.
Orders to sell will be filled at ``close - (spread / 2)``.
Notes
-----
This model does not impose limits on the size of fills. An order for an
asset will always be filled as soon as any trading activity occurs in the
order's asset, even if the size of the order is greater than the historical
volume.
"""
def __init__(self, spread=0.0):
super(FixedSlippage, self).__init__()
self.spread = spread
def __repr__(self):
return "{class_name}(spread={spread})".format(
class_name=self.__class__.__name__,
spread=self.spread,
)
def process_order(self, data, order):
price = data.current(order.asset, "close")
return (price * (1 + self.spread / 2.0 * order.direction), order.amount)
class MarketImpactBase(SlippageModel):
"""
Base class for slippage models which compute a simulated price impact
according to a history lookback.
"""
NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 10.0 / 10000
def __init__(self):
super(MarketImpactBase, self).__init__()
self._window_data_cache = ExpiringCache()
@abstractmethod
def get_txn_volume(self, data, order):
"""
Return the number of shares we would like to order in this minute.
Parameters
----------
data : BarData
order : Order
Return
------
int : the number of shares
"""
raise NotImplementedError("get_txn_volume")
@abstractmethod
def get_simulated_impact(
self,
order,
current_price,
current_volume,
txn_volume,
mean_volume,
volatility,
):
"""
Calculate simulated price impact.
Parameters
----------
order : The order being processed.
current_price : Current price of the asset being ordered.
current_volume : Volume of the asset being ordered for the current bar.
txn_volume : Number of shares/contracts being ordered.
mean_volume : Trailing ADV of the asset.
volatility : Annualized daily volatility of returns.
Return
------
int : impact on the current price.
"""
raise NotImplementedError("get_simulated_impact")
def process_order(self, data, order):
if order.open_amount == 0:
return None, None
minute_data = data.current(order.asset, ["volume", "high", "low"])
mean_volume, volatility = self._get_window_data(data, order.asset, 20)
# Price to use is the average of the minute bar's open and close.
price = np.mean([minute_data["high"], minute_data["low"]])
volume = minute_data["volume"]
if not volume:
return None, None
txn_volume = int(
min(self.get_txn_volume(data, order), abs(order.open_amount))
)
# If the computed transaction volume is zero or a decimal value, 'int'
# will round it down to zero. In that case just bail.
if txn_volume == 0:
return None, None
if mean_volume == 0 or np.isnan(volatility):
# If this is the first day the contract exists or there is no
# volume history, default to a conservative estimate of impact.
simulated_impact = price * self.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT
else:
simulated_impact = self.get_simulated_impact(
order=order,
current_price=price,
current_volume=volume,
txn_volume=txn_volume,
mean_volume=mean_volume,
volatility=volatility,
)
impacted_price = price + math.copysign(
simulated_impact, order.direction
)
if fill_price_worse_than_limit_price(impacted_price, order):
return None, None
return impacted_price, math.copysign(txn_volume, order.direction)
def _get_window_data(self, data, asset, window_length):
"""
Internal utility method to return the trailing mean volume over the
past 'window_length' days, and volatility of close prices for a
specific asset.
Parameters
----------
data : The BarData from which to fetch the daily windows.
asset : The Asset whose data we are fetching.
window_length : Number of days of history used to calculate the mean
volume and close price volatility.
Returns
-------
(mean volume, volatility)
"""
try:
values = self._window_data_cache.get(asset, data.current_session)
except KeyError:
try:
# Add a day because we want 'window_length' complete days,
# excluding the current day.
volume_history = data.history(
asset,
"volume",
window_length + 1,
"1d",
)
close_history = data.history(
asset,
"close",
window_length + 1,
"1d",
)
except HistoryWindowStartsBeforeData:
# If there is not enough data to do a full history call, return
# values as if there was no data.
return 0, np.NaN
# Exclude the first value of the percent change array because it is
# always just NaN.
close_volatility = (
close_history[:-1]
.pct_change()[1:]
.std(
skipna=False,
)
)
values = {
"volume": volume_history[:-1].mean(),
"close": close_volatility * SQRT_252,
}
self._window_data_cache.set(asset, values, data.current_session)
return values["volume"], values["close"]
class VolatilityVolumeShare(MarketImpactBase):
"""
Model slippage for futures contracts according to the following formula:
new_price = price + (price * MI / 10000),
where 'MI' is market impact, which is defined as:
MI = eta * sigma * sqrt(psi)
- ``eta`` is a constant which varies by root symbol.
- ``sigma`` is 20-day annualized volatility.
- ``psi`` is the volume traded in the given bar divided by 20-day ADV.
Parameters
----------
volume_limit : float
Maximum percentage (as a decimal) of a bar's total volume that can be
traded.
eta : float or dict
Constant used in the market impact formula. If given a float, the eta
for all futures contracts is the same. If given a dictionary, it must
map root symbols to the eta for contracts of that symbol.
"""
NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 7.5 / 10000
allowed_asset_types = (Future,)
def __init__(self, volume_limit, eta=ROOT_SYMBOL_TO_ETA):
super(VolatilityVolumeShare, self).__init__()
self.volume_limit = volume_limit
# If 'eta' is a constant, use a dummy mapping to treat it as a
# dictionary that always returns the same value.
# NOTE: This dictionary does not handle unknown root symbols, so it may
# be worth revisiting this behavior.
if isinstance(eta, (int, float)):
self._eta = DummyMapping(float(eta))
else:
# Eta is a dictionary. If the user's dictionary does not provide a
# value for a certain contract, fall back on the pre-defined eta
# values per root symbol.
self._eta = merge(ROOT_SYMBOL_TO_ETA, eta)
def __repr__(self):
if isinstance(self._eta, DummyMapping):
# Eta is a constant, so extract it.
eta = self._eta["dummy key"]
else:
eta = "<varies>"
return "{class_name}(volume_limit={volume_limit}, eta={eta})".format(
class_name=self.__class__.__name__,
volume_limit=self.volume_limit,
eta=eta,
)
def get_simulated_impact(
self,
order,
current_price,
current_volume,
txn_volume,
mean_volume,
volatility,
):
try:
eta = self._eta[order.asset.root_symbol]
except Exception:
eta = DEFAULT_ETA
psi = txn_volume / mean_volume
market_impact = eta * volatility * math.sqrt(psi)
# We divide by 10,000 because this model computes to basis points.
# To convert from bps to % we need to divide by 100, then again to
# convert from % to fraction.
return (current_price * market_impact) / 10000
def get_txn_volume(self, data, order):
volume = data.current(order.asset, "volume")
return volume * self.volume_limit
class FixedBasisPointsSlippage(SlippageModel):
"""
Model slippage as a fixed percentage difference from historical minutely
close price, limiting the size of fills to a fixed percentage of historical
minutely volume.
Orders to buy are filled at::
historical_price * (1 + (basis_points * 0.0001))
Orders to sell are filled at::
historical_price * (1 - (basis_points * 0.0001))
Fill sizes are capped at::
historical_volume * volume_limit
Parameters
----------
basis_points : float, optional
Number of basis points of slippage to apply for each fill. Default
is 5 basis points.
volume_limit : float, optional
Fraction of trading volume that can be filled each minute. Default is
10% of trading volume.
Notes
-----
- A basis point is one one-hundredth of a percent.
- This class, default-constructed, is zipline's default slippage model for
equities.
"""
@expect_bounded(
basis_points=(0, None),
__funcname="FixedBasisPointsSlippage",
)
@expect_strictly_bounded(
volume_limit=(0, None),
__funcname="FixedBasisPointsSlippage",
)
def __init__(self, basis_points=5.0, volume_limit=0.1):
super(FixedBasisPointsSlippage, self).__init__()
self.basis_points = basis_points
self.percentage = self.basis_points / 10000.0
self.volume_limit = volume_limit
def __repr__(self):
return """
{class_name}(
basis_points={basis_points},
volume_limit={volume_limit},
)
""".strip().format(
class_name=self.__class__.__name__,
basis_points=self.basis_points,
volume_limit=self.volume_limit,
)
def process_order(self, data, order):
volume = data.current(order.asset, "volume")
max_volume = int(self.volume_limit * volume)
price = data.current(order.asset, "close")
shares_to_fill = min(
abs(order.open_amount), max_volume - self.volume_for_bar
)
if shares_to_fill == 0:
raise LiquidityExceeded()
return (
price + price * (self.percentage * order.direction),
shares_to_fill * order.direction,
)
if __name__ == "__main__":
f = EquitySlippageModel()
# print(f.__meta__)
print(f.__class__)
|
the-stack_0_11936 | # Adapted from ZJULearning/resa
# Better to use a decoupled implementation,
# costs more codes, but clear.
# Diff from RESA official code:
# 1. we use BN+ReLU in channel reducer
# 2. we always use the BUSD decoder in the paper (official code does not use BUSD in CULane)
# 3. we always use 5 RESA iterations (4 in official code)
# 4. we use a higher capacity lane existence classifier (same as ERFNet/ENet baseline)
# 5. we use the SCNN sqrt(5) init trick for RESA, which
# 5.1. enables fewer warmup steps
# 5.2. combined with 4, produces slightly better performance
# 6. we do not use horizontal flip or cutting height in loading, in which
# 6.1. flip does not help performance (at least on the val set)
# 6.2. w.o. cutting height trick probably is the main reason for our lower performance, but we can't use it since
# other pytorch-auto-drive models do not use it.
import torch.nn as nn
from ..common_models import RESA, RESAReducer, BUSD, RESALaneExist, EDLaneExist, PlainDecoder
from .._utils import IntermediateLayerGetter
from .. import resnet
class RESANet(nn.Module):
def __init__(self, num_classes, backbone_name, flattened_size, channel_reduce, pretrained_backbone=True):
super(RESANet, self).__init__()
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained_backbone,
replace_stride_with_dilation=[False, True, True])
return_layers = {'layer3': 'out'}
self.backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
in_channels = 1024 if backbone_name == 'resnet50' or backbone_name == 'resnet101' else 256
# self.channel_reducer = RESAReducer(in_channels=in_channels, reduce=channel_reduce, bn_relu=False)
self.channel_reducer = RESAReducer(in_channels=in_channels, reduce=channel_reduce)
self.spatial_conv = RESA()
self.decoder = BUSD(num_classes=num_classes)
# self.decoder = PlainDecoder(num_classes=num_classes)
self.lane_classifier = EDLaneExist(num_output=num_classes - 1, flattened_size=flattened_size)
# self.lane_classifier = RESALaneExist(num_output=num_classes - 1, flattened_size=flattened_size)
def forward(self, x):
x = self.backbone(x)['out']
x = self.channel_reducer(x)
x = self.spatial_conv(x)
res = {'out': self.decoder(x),
'lane': self.lane_classifier(x)}
return res
|
the-stack_0_11937 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ... import core
from ... import layers
from ... import framework
def append_cast_op(i, o, prog):
"""
Append a cast op in a given Program to cast input `i` to data type `o.dtype`.
Args:
i (Variable): The input Variable.
o (Variable): The output Variable.
prog (Program): The Program to append cast op.
"""
prog.global_block().append_op(
type="cast",
inputs={"X": i},
outputs={"Out": o},
attrs={"in_dtype": i.dtype,
"out_dtype": o.dtype})
def _rename_arg(op, old_name, new_name):
"""
If an op has old_name input and output, rename these input
args new_name.
Args:
op (Operator): Current operator.
old_name (str): The old name of input args.
new_name (str): The new name of input args.
"""
op_desc = op.desc
if isinstance(op_desc, tuple):
op_desc = op_desc[0]
op_desc._rename_input(old_name, new_name)
op_desc._rename_output(old_name, new_name)
def _dtype_to_str(dtype):
"""
Convert specific variable type to its corresponding string.
Args:
dtype (VarType): Variable type.
"""
if dtype == core.VarDesc.VarType.FP16:
return 'fp16'
else:
return 'fp32'
def _insert_cast_op(block, op, idx, src_dtype, dest_dtype):
"""
Insert cast op and rename args of input and output.
Args:
block (Program): The block in which the operator is.
op (Operator): The operator to insert cast op.
idx (int): The index of current operator.
src_dtype (VarType): The input variable dtype of cast op.
desr_dtype (VarType): The output variable dtype of cast op.
Returns:
num_cast_op (int): The number of cast ops that have been inserted.
"""
num_cast_ops = 0
valid_types = [
core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.SELECTED_ROWS,
core.VarDesc.VarType.LOD_TENSOR_ARRAY
]
for in_name in op.input_names:
if src_dtype == core.VarDesc.VarType.FP32 and op.type == 'batch_norm':
if in_name != 'X':
continue
for in_var_name in op.input(in_name):
in_var = block.var(in_var_name)
if in_var.type not in valid_types:
continue
if in_var.dtype == src_dtype:
cast_name = in_var.name + '.cast_' + _dtype_to_str(dest_dtype)
out_var = block.vars.get(cast_name)
if out_var is None or out_var.dtype != dest_dtype:
out_var = block.create_var(
name=cast_name,
dtype=dest_dtype,
persistable=False,
stop_gradient=False)
block._insert_op(
idx,
type="cast",
inputs={"X": in_var},
outputs={"Out": out_var},
attrs={
"in_dtype": in_var.dtype,
"out_dtype": out_var.dtype
})
num_cast_ops += 1
_rename_arg(op, in_var.name, out_var.name)
else:
if op.has_attr('in_dtype'):
op._set_attr('in_dtype', dest_dtype)
if src_dtype == core.VarDesc.VarType.FP32:
for out_name in op.output_names:
if op.type == 'batch_norm' and out_name != 'Y':
continue
for out_var_name in op.output(out_name):
out_var = block.var(out_var_name)
if out_var.type not in valid_types:
continue
if out_var.dtype == core.VarDesc.VarType.FP32:
out_var.desc.set_dtype(core.VarDesc.VarType.FP16)
if op.has_attr('out_dtype'):
op._set_attr('out_dtype', core.VarDesc.VarType.FP16)
return num_cast_ops
def find_true_prev_op(ops, cur_op, var_name):
"""
Find the true prev op that outputs var_name variable.
Args:
ops (list): A list of ops.
cur_op (Operator): Current operator which has var_name variable.
var_name (string): Variable name.
"""
prev_op = []
for op in ops:
if op == cur_op:
break
for out_name in op.output_names:
for out_var_name in op.output(out_name):
if out_var_name == var_name:
prev_op.append(op)
if prev_op:
if not len(prev_op) == 1:
raise ValueError("There must be only one previous op "
"that outputs {0} variable".format(var_name))
else:
return prev_op[0]
return None
def _is_in_black_varnames(op, amp_lists):
for in_name in op.input_arg_names:
if in_name in amp_lists.black_varnames:
return True
for out_name in op.output_arg_names:
if out_name in amp_lists.black_varnames:
return True
return False
def rewrite_program(main_prog, amp_lists):
"""
Traverse all ops in current block and insert cast op according to
which set current op belongs to.
1. When an op belongs to the black list, add it to black set
2. When an op belongs to the white list, add it to white set
3. When an op belongs to the gray list. If one
of its inputs is the output of black set op or black list op,
add it to black set. If all of its previous ops are not black
op and one of its inputs is the output of white set op or
white list op, add it to white set.
4. When an op isn't in the lists, add it to black op set.
5. Add necessary cast ops to make sure that black set op will be
computed in fp32 mode, while white set op will be computed in
fp16 mode.
Args:
main_prog (Program): The main program for training.
"""
block = main_prog.global_block()
ops = block.ops
white_op_set = set()
black_op_set = set()
for op in ops:
if amp_lists.black_varnames is not None and _is_in_black_varnames(
op, amp_lists):
black_op_set.add(op)
continue
if op.type in amp_lists.black_list:
black_op_set.add(op)
elif op.type in amp_lists.white_list:
white_op_set.add(op)
elif op.type in amp_lists.gray_list:
is_black_op = False
is_white_op = False
for in_name in op.input_names:
# if this op has inputs
if in_name:
for in_var_name in op.input(in_name):
in_var = block.var(in_var_name)
# this in_var isn't the output of other op
if in_var.op is None:
continue
elif in_var.op is op:
prev_op = find_true_prev_op(ops, op, in_var_name)
if prev_op is None:
continue
else:
prev_op = in_var.op
# if it's one of inputs
if prev_op in black_op_set or \
prev_op.type in amp_lists.black_list:
is_black_op = True
elif prev_op in white_op_set or \
prev_op.type in amp_lists.white_list:
is_white_op = True
if is_black_op:
black_op_set.add(op)
elif is_white_op:
white_op_set.add(op)
else:
pass
else:
# For numerical safe, we apply fp32 computation on ops that
# are not determined which list they should stay.
black_op_set.add(op)
idx = 0
while idx < len(ops):
op = ops[idx]
num_cast_ops = 0
if op in black_op_set:
num_cast_ops = _insert_cast_op(block, op, idx,
core.VarDesc.VarType.FP16,
core.VarDesc.VarType.FP32)
elif op in white_op_set:
num_cast_ops = _insert_cast_op(block, op, idx,
core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP16)
else:
pass
idx += num_cast_ops + 1
def update_role_var_grad(main_prog, params_grads):
"""
Update op_role_var attr for some ops to make sure the gradients
transfered across gpus is FP16.
1. Check whether the op that outputs gradient is cast or not.
2. If op is cast and gradient is FP32, remove the op_role_var
and find the prev op which outputs FP16 gradient
3. Update the op_role_var of the prev op.
Args:
main_prog (Program): The main program for training.
params_grads (list): A list of params and grads.
"""
block = main_prog.global_block()
BACKWARD = core.op_proto_and_checker_maker.OpRole.Backward
OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize
for p, g in params_grads:
op = g.op
if g.dtype == core.VarDesc.VarType.FP32 and op.type == 'cast':
role = op.attr('op_role')
if role & int(BACKWARD) and op.has_attr('op_role_var'):
op.desc.remove_attr("op_role_var")
else:
raise ValueError("The cast op {0} must be in BACKWARD role "
"and have op_role_var attr.".format(op))
fp16_grad_name = op.input(op.input_names[0])[0]
op_for_fp16_grad = find_true_prev_op(block.ops, op, fp16_grad_name)
op_role_var_attr_name = \
core.op_proto_and_checker_maker.kOpRoleVarAttrName()
attr_val = [p.name, fp16_grad_name]
if op_for_fp16_grad.has_attr(op_role_var_attr_name):
attr_val.extend(op_for_fp16_grad.attr(op_role_var_attr_name))
op_for_fp16_grad._set_attr(op_role_var_attr_name, attr_val)
# maximize the allreduce overlap
op._set_attr('op_role', OPTIMIZE)
def update_loss_scaling(is_overall_finite, prev_loss_scaling, num_good_steps,
num_bad_steps, incr_every_n_steps,
decr_every_n_nan_or_inf, incr_ratio, decr_ratio):
"""
Update loss scaling according to overall gradients. If all gradients is
finite after incr_every_n_steps, loss scaling will increase by incr_ratio.
Otherwisw, loss scaling will decrease by decr_ratio after
decr_every_n_nan_or_inf steps and each step some gradients are infinite.
Args:
is_overall_finite (Variable): A boolean variable indicates whether
all gradients are finite.
prev_loss_scaling (Variable): Previous loss scaling.
num_good_steps (Variable): A variable accumulates good steps in which
all gradients are finite.
num_bad_steps (Variable): A variable accumulates bad steps in which
some gradients are infinite.
incr_every_n_steps (Variable): A variable represents increasing loss
scaling every n consecutive steps with
finite gradients.
decr_every_n_nan_or_inf (Variable): A variable represents decreasing
loss scaling every n accumulated
steps with nan or inf gradients.
incr_ratio(float): The multiplier to use when increasing the loss
scaling.
decr_ratio(float): The less-than-one-multiplier to use when decreasing
loss scaling.
"""
zero_steps = layers.fill_constant(shape=[1], dtype='int32', value=0)
with layers.Switch() as switch:
with switch.case(is_overall_finite):
should_incr_loss_scaling = layers.less_than(incr_every_n_steps,
num_good_steps + 1)
with layers.Switch() as switch1:
with switch1.case(should_incr_loss_scaling):
new_loss_scaling = prev_loss_scaling * incr_ratio
loss_scaling_is_finite = layers.isfinite(new_loss_scaling)
with layers.Switch() as switch2:
with switch2.case(loss_scaling_is_finite):
layers.assign(new_loss_scaling, prev_loss_scaling)
with switch2.default():
pass
layers.assign(zero_steps, num_good_steps)
layers.assign(zero_steps, num_bad_steps)
with switch1.default():
layers.increment(num_good_steps)
layers.assign(zero_steps, num_bad_steps)
with switch.default():
should_decr_loss_scaling = layers.less_than(decr_every_n_nan_or_inf,
num_bad_steps + 1)
with layers.Switch() as switch3:
with switch3.case(should_decr_loss_scaling):
new_loss_scaling = prev_loss_scaling * decr_ratio
static_loss_scaling = \
layers.fill_constant(shape=[1],
dtype='float32',
value=1.0)
less_than_one = layers.less_than(new_loss_scaling,
static_loss_scaling)
with layers.Switch() as switch4:
with switch4.case(less_than_one):
layers.assign(static_loss_scaling,
prev_loss_scaling)
with switch4.default():
layers.assign(new_loss_scaling, prev_loss_scaling)
layers.assign(zero_steps, num_good_steps)
layers.assign(zero_steps, num_bad_steps)
with switch3.default():
layers.assign(zero_steps, num_good_steps)
layers.increment(num_bad_steps)
|
the-stack_0_11939 | import requests
import re
import lxml.html
import MySQLdb
conn = MySQLdb.connect(db='Crawler', user='cloud', passwd='1111', charset='utf8mb4')
c=conn.cursor()
delete_sql = 'DELETE FROM re_info WHERE site_name = "인크루트"'
c.execute(delete_sql)
def crawling(page_count):
front_url="http://job.incruit.com/entry/searchjob.asp?ct=12&ty=1&cd=1&page="
for i in range(1, page_count+1):
url = front_url+str(i)
list_page=requests.get(url)
root=lxml.html.fromstring(list_page.content)
for everything in root.cssselect('tbody'):
for thing in everything.cssselect('tr'):
t = 0
companies = thing.cssselect('th > div > .check_list_r > .links > a')
if not companies:
company = ' '
elif companies:
company = companies[0].text.strip()
titles = thing.cssselect('td > .subjects > .accent > a')
if not titles:
title = ' '
title_url = ' '
if titles:
title = titles[0].text_content()
title_url = titles[0].get('href')
site_name = '인크루트'
field1 = thing.cssselect('td > .subjects > .details_txts.firstChild > em')
if not field1:
field1 = ' '
elif field1:
field1 = field1[0].text
if title_url != ' ':
#title_url = "https://"+title_url
detail_page = requests.get(title_url)
detail = lxml.html.fromstring(detail_page.content)
careers = detail.cssselect('.jobpost_sider_jbinfo > div:nth-child(3) > dl:nth-child(2) > dd > div > div > em')
if not careers:
career = ' '
elif careers:
career = careers[0].text
academics = detail.cssselect('.jobpost_sider_jbinfo > div:nth-child(3) > dl:nth-child(3) > dd > div > div > em')
if not academics:
academic = ' '
elif academics:
academic = academics[0].text
working = detail.cssselect('.jobpost_sider_jbinfo > div.jobpost_sider_jbinfo_inlay.jobpost_sider_jbinfo_inlay_last > dl:nth-child(2) > dd > div > div.tooltip_layer_warp > ul > li')
if not working:
workingcondition = ''
elif working:
workingcondition = working[0].text
areas = detail.cssselect('.jobpost_sider_jbinfo > div.jobpost_sider_jbinfo_inlay.jobpost_sider_jbinfo_inlay_last > dl:nth-child(3) > dd > div > div.inset_ely_lay')
if not areas:
area = ' '
if areas:
area = areas[0].text
area = area.split('> ')[0]
deadlines = thing.cssselect('.ddays')
if not deadlines:
deadline = ' '
if deadlines:
deadline = deadlines[0].text
insert_sql = 'INSERT INTO re_info(company, title, title_url, site_name, field1, career, academic, area, workingcondition, deadline) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
insert_val = company, title, title_url, site_name, field1, career, academic, area, workingcondition, deadline
c.execute(insert_sql, insert_val)
conn.commit()
def main():
page_count = 6
crawling(page_count)
conn.close()
main()
|
the-stack_0_11941 | # -*- coding: utf-8 -*-
#
# Copyright 2020-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tree level output for python
This module defines functions that generate python code to make local
predictions
"""
from bigml.tree_utils import INDENT, COMPOSED_FIELDS
from bigml.predict_utils.common import missing_branch, \
none_value, get_node, get_predicate, mintree_split
from bigml.generators.tree_common import value_to_print, map_data, \
missing_prefix_code, filter_nodes, split_condition_code
MISSING_OPERATOR = {
"=": "is",
"!=": "is not"
}
def missing_check_code(tree, offsets, fields, objective_id,
field, depth, input_map, cmv, metric):
"""Builds the code to predict when the field is missing
"""
code = "%sif (%s is None):\n" % \
(INDENT * depth,
map_data(fields[field]['slug'], input_map, True))
node = get_node(tree)
value = value_to_print(node[offsets["output"]],
fields[objective_id]['optype'])
code += "%sreturn {\"prediction\": %s," \
" \"%s\": %s}\n" % \
(INDENT * (depth + 1), value, metric, node[offsets["confidence"]])
cmv.append(fields[field]['slug'])
return code
def plug_in_body(tree, offsets, fields, objective_id, regression,
depth=1, cmv=None, input_map=False,
ids_path=None, subtree=True):
"""Translate the model into a set of "if" python statements.
`depth` controls the size of indentation. As soon as a value is missing
that node is returned without further evaluation.
"""
# label for the confidence measure and initialization
metric = "error" if regression else "confidence"
if cmv is None:
cmv = []
body = ""
term_analysis_fields = []
item_analysis_fields = []
node = get_node(tree)
children = [] if node[offsets["children#"]] == 0 else \
node[offsets["children"]]
children = filter_nodes(children, offsets, ids=ids_path,
subtree=subtree)
if children:
# field used in the split
field = mintree_split(children)
has_missing_branch = (missing_branch(children) or
none_value(children))
# the missing is singled out as a special case only when there's
# no missing branch in the children list
one_branch = not has_missing_branch or \
fields[field]['optype'] in COMPOSED_FIELDS
if (one_branch and
not fields[field]['slug'] in cmv):
body += missing_check_code(tree, offsets, fields, objective_id,
field, depth, input_map, cmv, metric)
for child in children:
[_, field, value, _, _] = get_predicate(child)
pre_condition = ""
# code when missing_splits has been used
if has_missing_branch and value is not None:
pre_condition = missing_prefix_code(child, fields, field,
input_map, cmv)
# complete split condition code
body += split_condition_code( \
child, fields, depth, input_map, pre_condition,
term_analysis_fields, item_analysis_fields, cmv)
# value to be determined in next node
next_level = plug_in_body(child, offsets, fields, objective_id,
regression, depth + 1, cmv=cmv[:],
input_map=input_map, ids_path=ids_path,
subtree=subtree)
body += next_level[0]
term_analysis_fields.extend(next_level[1])
item_analysis_fields.extend(next_level[2])
else:
value = value_to_print(node[offsets["output"]],
fields[objective_id]['optype'])
body = "%sreturn {\"prediction\":%s, \"%s\":%s}\n" % ( \
INDENT * depth, value, metric, node[offsets["confidence"]])
return body, term_analysis_fields, item_analysis_fields
|
the-stack_0_11942 | from datetime import datetime
from typing import Union
from dateutil import tz
def format_iso_string(iso_string: str) -> str:
utc_time = datetime.fromisoformat(iso_string)
local_time = utc_time.astimezone(tz.tzlocal())
return local_time.strftime("%Y-%m-%d %H:%M:%S")
def auto_unit(number: Union[int, float]) -> str:
"""
Returns a human-readable formatted size
credit: glances
"""
if number is None:
return "-"
units = [
(1208925819614629174706176, "Y"),
(1180591620717411303424, "Z"),
(1152921504606846976, "E"),
(1125899906842624, "P"),
(1099511627776, "T"),
(1073741824, "G"),
(1048576, "M"),
(1024, "K"),
]
for unit, suffix in units:
value = float(number) / unit
if value > 1:
precision = 0
if value < 10:
precision = 2
elif value < 100:
precision = 1
if suffix == "K":
precision = 0
return "{:.{decimal}f}{suffix}".format(value, decimal=precision, suffix=suffix)
return "{!s}".format(number)
|
the-stack_0_11944 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v4.resources.types import campaign_label
from google.ads.googleads.v4.services.types import campaign_label_service
from .base import CampaignLabelServiceTransport, DEFAULT_CLIENT_INFO
class CampaignLabelServiceGrpcTransport(CampaignLabelServiceTransport):
"""gRPC backend transport for CampaignLabelService.
Service to manage labels on campaigns.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_campaign_label(
self,
) -> Callable[
[campaign_label_service.GetCampaignLabelRequest],
campaign_label.CampaignLabel,
]:
r"""Return a callable for the get campaign label method over gRPC.
Returns the requested campaign-label relationship in
full detail.
Returns:
Callable[[~.GetCampaignLabelRequest],
~.CampaignLabel]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_campaign_label" not in self._stubs:
self._stubs["get_campaign_label"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.CampaignLabelService/GetCampaignLabel",
request_serializer=campaign_label_service.GetCampaignLabelRequest.serialize,
response_deserializer=campaign_label.CampaignLabel.deserialize,
)
return self._stubs["get_campaign_label"]
@property
def mutate_campaign_labels(
self,
) -> Callable[
[campaign_label_service.MutateCampaignLabelsRequest],
campaign_label_service.MutateCampaignLabelsResponse,
]:
r"""Return a callable for the mutate campaign labels method over gRPC.
Creates and removes campaign-label relationships.
Operation statuses are returned.
Returns:
Callable[[~.MutateCampaignLabelsRequest],
~.MutateCampaignLabelsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_campaign_labels" not in self._stubs:
self._stubs[
"mutate_campaign_labels"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.CampaignLabelService/MutateCampaignLabels",
request_serializer=campaign_label_service.MutateCampaignLabelsRequest.serialize,
response_deserializer=campaign_label_service.MutateCampaignLabelsResponse.deserialize,
)
return self._stubs["mutate_campaign_labels"]
__all__ = ("CampaignLabelServiceGrpcTransport",)
|
the-stack_0_11946 | from restfly.iterator import APIIterator
from box import BoxList
from copy import copy
class OTIterator(APIIterator):
_path = None
limit = 500
offset = 0
def __init__(self, api, **kwargs):
self._path = kwargs.pop('path')
self._payload = kwargs.pop('payload', {})
self.limit = kwargs.get('limit', self.limit)
self.offset = kwargs.get('offset', self.offset)
super(OTIterator, self).__init__(api, **kwargs)
def _get_page(self):
'''
Retrieves the next page of data
'''
# if the size of the page is less than the limit, then we will simply
# bail and let iterator stop.
if self.num_pages > 0 and len(self.page) < self.limit:
raise StopIteration()
# make a copy of the payload (so not to pollute it) and then set the
# offset and limits.
p = copy(self._payload)
p['offset'] = self.offset
p['limit'] = self.limit
# make the call and update the offset.
self.page = self._api.post(self._path, json=p, box=BoxList)
self.offset += self.limit |
the-stack_0_11948 | #
# Metrix++, Copyright 2009-2013, Metrix++ Project
# Link: http://metrixplusplus.sourceforge.net
#
# This file is a part of Metrix++ Tool.
#
# Metrix++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Metrix++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metrix++. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import re
import mpp.api
import mpp.utils
import mpp.cout
class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable):
MODE_NEW = 0x01
MODE_TREND = 0x03
MODE_TOUCHED = 0x07
MODE_ALL = 0x15
def declare_configuration(self, parser):
self.parser = parser
parser.add_option("--hotspots", "--hs", default=None, help="If not set (none), all exceeded limits are printed."
" If set, exceeded limits are sorted (the worst is the first) and only first HOTSPOTS limits are printed."
" [default: %default]", type=int)
parser.add_option("--disable-suppressions", "--ds", action="store_true", default=False,
help = "If not set (none), all suppressions are ignored"
" and associated warnings are printed. [default: %default]")
parser.add_option("--warn-mode", "--wm", default='all', choices=['new', 'trend', 'touched', 'all'],
help="Defines the warnings mode. "
"'all' - all warnings active, "
"'new' - warnings for new regions/files only, "
"'trend' - warnings for new regions/files and for bad trend of modified regions/files, "
"'touched' - warnings for new and modified regions/files "
"[default: %default]")
parser.add_option("--min-limit", "--min", action="multiopt",
help="A threshold per 'namespace:field' metric in order to select regions, "
"which have got metric value less than the specified limit. "
"This option can be specified multiple times, if it is necessary to apply several limits. "
"Should be in the format: <namespace>:<field>:<limit-value>, for example: "
"'std.code.lines:comments:1'.")
parser.add_option("--max-limit", "--max", action="multiopt",
help="A threshold per 'namespace:field' metric in order to select regions, "
"which have got metric value more than the specified limit. "
"This option can be specified multiple times, if it is necessary to apply several limits. "
"Should be in the format: <namespace>:<field>:<limit-value>, for example: "
"'std.code.complexity:cyclomatic:7'.")
def configure(self, options):
self.hotspots = options.__dict__['hotspots']
self.no_suppress = options.__dict__['disable_suppressions']
if options.__dict__['warn_mode'] == 'new':
self.mode = self.MODE_NEW
elif options.__dict__['warn_mode'] == 'trend':
self.mode = self.MODE_TREND
elif options.__dict__['warn_mode'] == 'touched':
self.mode = self.MODE_TOUCHED
elif options.__dict__['warn_mode'] == 'all':
self.mode = self.MODE_ALL
if self.mode != self.MODE_ALL and options.__dict__['db_file_prev'] == None:
self.parser.error("option --warn-mode: The mode '" + options.__dict__['warn_mode'] + "' requires '--db-file-prev' option set")
class Limit(object):
def __init__(self, limit_type, limit, namespace, field, db_filter):
self.type = limit_type
self.limit = limit
self.namespace = namespace
self.field = field
self.filter = db_filter
def __repr__(self):
return "namespace '" + self.namespace + "', filter '" + str(self.filter) + "'"
self.limits = []
pattern = re.compile(r'''([^:]+)[:]([^:]+)[:]([-+]?[0-9]+(?:[.][0-9]+)?)''')
if options.__dict__['max_limit'] != None:
for each in options.__dict__['max_limit']:
match = re.match(pattern, each)
if match == None:
self.parser.error("option --max-limit: Invalid format: " + each)
limit = Limit("max", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '>', float(match.group(3))))
self.limits.append(limit)
if options.__dict__['min_limit'] != None:
for each in options.__dict__['min_limit']:
match = re.match(pattern, each)
if match == None:
self.parser.error("option --min-limit: Invalid format: " + each)
limit = Limit("min", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '<', float(match.group(3))))
self.limits.append(limit)
def initialize(self):
super(Plugin, self).initialize()
db_loader = self.get_plugin('mpp.dbf').get_loader()
self._verify_namespaces(db_loader.iterate_namespace_names())
for each in db_loader.iterate_namespace_names():
self._verify_fields(each, db_loader.get_namespace(each).iterate_field_names())
def _verify_namespaces(self, valid_namespaces):
valid = []
for each in valid_namespaces:
valid.append(each)
for each in self.limits:
if each.namespace not in valid:
self.parser.error("option --{0}-limit: metric '{1}:{2}' is not available in the database file.".
format(each.type, each.namespace, each.field))
def _verify_fields(self, namespace, valid_fields):
valid = []
for each in valid_fields:
valid.append(each)
for each in self.limits:
if each.namespace == namespace:
if each.field not in valid:
self.parser.error("option --{0}-limit: metric '{1}:{2}' is not available in the database file.".
format(each.type, each.namespace, each.field))
def iterate_limits(self):
for each in self.limits:
yield each
def is_mode_matched(self, limit, value, diff, is_modified):
if is_modified == None:
# means new region, True in all modes
return True
if self.mode == self.MODE_ALL:
return True
if self.mode == self.MODE_TOUCHED and is_modified == True:
return True
if self.mode == self.MODE_TREND and is_modified == True:
if limit < value and diff > 0:
return True
if limit > value and diff < 0:
return True
return False
def run(self, args):
return main(self, args)
def main(plugin, args):
exit_code = 0
loader_prev = plugin.get_plugin('mpp.dbf').get_loader_prev()
loader = plugin.get_plugin('mpp.dbf').get_loader()
paths = None
if len(args) == 0:
paths = [""]
else:
paths = args
# Try to optimise iterative change scans
modified_file_ids = None
if plugin.mode != plugin.MODE_ALL:
modified_file_ids = get_list_of_modified_files(loader, loader_prev)
for path in paths:
path = mpp.utils.preprocess_path(path)
for limit in plugin.iterate_limits():
logging.info("Applying limit: " + str(limit))
filters = [limit.filter]
if modified_file_ids != None:
filters.append(('file_id', 'IN', modified_file_ids))
sort_by = None
limit_by = None
limit_warnings = None
if plugin.hotspots != None:
sort_by = limit.field
if limit.type == "max":
sort_by = "-" + sort_by
if plugin.mode == plugin.MODE_ALL:
# if it is not ALL mode, the tool counts number of printed warnings below
limit_by = plugin.hotspots
limit_warnings = plugin.hotspots
selected_data = loader.load_selected_data(limit.namespace,
fields = [limit.field],
path=path,
filters = filters,
sort_by=sort_by,
limit_by=limit_by)
if selected_data == None:
mpp.utils.report_bad_path(path)
exit_code += 1
continue
for select_data in selected_data:
if limit_warnings != None and limit_warnings <= 0:
break
is_modified = None
diff = None
file_data = loader.load_file_data(select_data.get_path())
file_data_prev = loader_prev.load_file_data(select_data.get_path())
if file_data_prev != None:
if file_data.get_checksum() == file_data_prev.get_checksum():
diff = 0
is_modified = False
else:
matcher = mpp.utils.FileRegionsMatcher(file_data, file_data_prev)
prev_id = matcher.get_prev_id(select_data.get_region().get_id())
if matcher.is_matched(select_data.get_region().get_id()):
if matcher.is_modified(select_data.get_region().get_id()):
is_modified = True
else:
is_modified = False
diff = mpp.api.DiffData(select_data,
file_data_prev.get_region(prev_id)).get_data(limit.namespace, limit.field)
if (plugin.is_mode_matched(limit.limit,
select_data.get_data(limit.namespace, limit.field),
diff,
is_modified) == False):
continue
is_sup = is_metric_suppressed(limit.namespace, limit.field, loader, select_data)
if is_sup == True and plugin.no_suppress == False:
continue
exit_code += 1
region_cursor = 0
region_name = None
if select_data.get_region() != None:
region_cursor = select_data.get_region().cursor
region_name = select_data.get_region().name
report_limit_exceeded(select_data.get_path(),
region_cursor,
limit.namespace,
limit.field,
region_name,
select_data.get_data(limit.namespace, limit.field),
diff,
limit.limit,
is_modified,
is_sup)
if limit_warnings != None:
limit_warnings -= 1
return exit_code
def get_list_of_modified_files(loader, loader_prev):
logging.info("Identifying changed files...")
old_files_map = {}
for each in loader_prev.iterate_file_data():
old_files_map[each.get_path()] = each.get_checksum()
if len(old_files_map) == 0:
return None
modified_file_ids = []
for each in loader.iterate_file_data():
if len(modified_file_ids) > 1000: # If more than 1000 files changed, skip optimisation
return None
if (each.get_path() not in old_files_map.keys()) or old_files_map[each.get_path()] != each.get_checksum():
modified_file_ids.append(str(each.get_id()))
old_files_map = None
if len(modified_file_ids) != 0:
modified_file_ids = " , ".join(modified_file_ids)
modified_file_ids = "(" + modified_file_ids + ")"
return modified_file_ids
return None
def is_metric_suppressed(metric_namespace, metric_field, loader, select_data):
data = loader.load_file_data(select_data.get_path())
if select_data.get_region() != None:
data = data.get_region(select_data.get_region().get_id())
sup_data = data.get_data('std.suppress', 'list')
else:
sup_data = data.get_data('std.suppress.file', 'list')
if sup_data != None and sup_data.find('[' + metric_namespace + ':' + metric_field + ']') != -1:
return True
return False
def report_limit_exceeded(path, cursor, namespace, field, region_name,
stat_level, trend_value, stat_limit,
is_modified, is_suppressed):
if region_name != None:
message = "Metric '" + namespace + ":" + field + "' for region '" + region_name + "' exceeds the limit."
else:
message = "Metric '" + namespace + ":" + field + "' exceeds the limit."
details = [("Metric name", namespace + ":" + field),
("Region name", region_name),
("Metric value", stat_level),
("Modified", is_modified),
("Change trend", '{0:{1}}'.format(trend_value, '+' if trend_value else '')),
("Limit", stat_limit),
("Suppressed", is_suppressed)]
mpp.cout.notify(path, cursor, mpp.cout.SEVERITY_WARNING, message, details)
|
the-stack_0_11949 | # -*- coding: utf-8 -*-
import datapackage
import os
# filenames
FD_DIR = "../data/2020-02-21_fd/"
RAW_DIR = "../data/2020-02-21/"
files = os.listdir(FD_DIR)
fd_files = [os.path.join(FD_DIR, f) for f in files]
raw_files = []
for file in files:
base = os.path.splitext(file)[0]
path = os.path.join(RAW_DIR, base + ".txt")
raw_files.append(path)
# compute number of spikes
for raw, fd in zip(raw_files, fd_files):
with open(raw) as f:
lines = f.readlines()
# +1 might not be needed on Unix
active_channels_raw = 0
for i in range(len(lines) - 1):
if lines[i] == "[ms] \t[µV] \t \n" and \
lines[i+1] != "\n":
active_channels_raw += 1
spikes_raw = (len(lines) - 60 * 4 - 2 + 1 + active_channels_raw) / 76
package = datapackage.Package(fd)
spikes_fd = len(package.get_resource("spikes").read())
active_channels_fd = len(package.get_resource("spike-trains").read())
assert spikes_raw == spikes_fd, "Difference in number of spikes in file " \
+ raw
assert active_channels_raw == active_channels_fd, "Difference in number of\
active channels in file " + raw
|
the-stack_0_11950 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# TopicDB documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 24 10:06:55 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation ROOT, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of STRING:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'TopicDB'
copyright = '2016, Brett Alistair Kromkamp'
author = 'Brett Alistair Kromkamp'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TopicDBdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TopicDB.tex', 'TopicDB Documentation',
'Brett Alistair Kromkamp', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'topicdb', 'TopicDB Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TopicDB', 'TopicDB Documentation',
author, 'TopicDB', 'One line description of project.',
'Miscellaneous'),
]
|
the-stack_0_11952 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import numpy as np
import time
import os
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
import data
from args import *
import lm_model
import logging
logging.basicConfig()
import pickle
def prepare_batch_input(batch, args):
x = batch['token_ids']
x_r = batch['token_ids_reverse']
y = batch['next_token_id']
y_r = batch['next_token_id_reverse']
inst = []
for i in range(len(x)):
if args.use_custom_samples:
custom_samples_array = np.zeros(
(args.num_steps, args.n_negative_samples_batch + 1),
dtype='int64')
custom_samples_array_r = np.zeros(
(args.num_steps, args.n_negative_samples_batch + 1),
dtype='int64')
custom_probabilities_array = np.zeros(
(args.num_steps, args.n_negative_samples_batch + 1),
dtype='float32')
for j in range(args.num_steps):
for k in range(args.n_negative_samples_batch + 1):
custom_samples_array[j][k] = k
custom_samples_array_r[j][k] = k
custom_probabilities_array[j][k] = 1.0
custom_samples_array[j][0] = y[i][j]
custom_samples_array_r[j][0] = y_r[i][j]
inst.append([
x[i], y[i], x_r[i], y_r[i], custom_samples_array,
custom_samples_array_r, custom_probabilities_array
])
else:
inst.append([x[i], y[i], x_r[i], y_r[i]])
return inst
def batch_reader(batch_list, args):
res = []
for batch in batch_list:
res.append(prepare_batch_input(batch, args))
return res
def read_multiple(reader, batch_size, count, clip_last=True):
"""
Stack data from reader for multi-devices.
"""
def __impl__():
# one time read batch_size * count data for rnn
for data in reader():
inst_num_per_part = batch_size
split_data = {}
len_check = True
for k in data.keys():
if data[k] is not None:
if len(data[k]) != batch_size * count:
len_check = False
print("data check error!!, data=" + data[k] + ", k=" + k)
break
if len_check:
res = []
for i in range(count):
split_data = {}
for k in data.keys():
if data[k] is not None:
split_data[k] = data[k][inst_num_per_part * i:inst_num_per_part * (i + 1)]
res.append(split_data)
yield res
return __impl__
def LodTensor_Array(lod_tensor):
lod = lod_tensor.lod()
array = np.array(lod_tensor)
new_array = []
for i in range(len(lod[0]) - 1):
new_array.append(array[lod[0][i]:lod[0][i + 1]])
return new_array
def get_current_model_para(train_prog, train_exe):
param_list = train_prog.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
vals = {}
for p_name in param_name_list:
p_array = np.array(fluid.global_scope().find_var(p_name).get_tensor())
vals[p_name] = p_array
return vals
def save_para_npz(train_prog, train_exe):
logger.info("begin to save model to model_base")
param_list = train_prog.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
vals = {}
for p_name in param_name_list:
p_array = np.array(fluid.global_scope().find_var(p_name).get_tensor())
vals[p_name] = p_array
emb = vals["embedding_para"]
logger.info("begin to save model to model_base")
np.savez("mode_base", **vals)
def prepare_input(batch, epoch_id=0, with_lr=True):
x, y = batch
inst = []
for i in range(len(x)):
inst.append([x[i], y[i]])
return inst
def eval(vocab, infer_progs, dev_count, logger, args):
infer_prog, infer_startup_prog, infer_model = infer_progs
feed_order = infer_model.feed_order
loss = infer_model.loss
# prepare device
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
exe = Executor(place)
if not args.use_gpu:
place = fluid.CPUPlace()
import multiprocessing
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
total_loss = 0.0
total_cnt = 0
n_batch_cnt = 0
n_batch_loss = 0.0
val_feed_list = [
infer_prog.global_block().var(var_name) for var_name in feed_order
]
val_feeder = fluid.DataFeeder(val_feed_list, place)
dev_data = data.BidirectionalLMDataset(
args.test_path, vocab, test=True, shuffle_on_load=False)
dev_data_iter = lambda: dev_data.iter_batches(args.batch_size * dev_count, args.num_steps)
dev_reader = read_multiple(dev_data_iter, args.batch_size, dev_count)
last_hidden_values = np.zeros(
(dev_count, args.num_layers * 2 * args.batch_size * args.embed_size),
dtype='float32')
last_cell_values = np.zeros(
(dev_count, args.num_layers * 2 * args.batch_size * args.hidden_size),
dtype='float32')
for batch_id, batch_list in enumerate(dev_reader(), 1):
feed_data = batch_reader(batch_list, args)
feed = list(val_feeder.feed_parallel(feed_data, dev_count))
for i in range(dev_count):
init_hidden_tensor = fluid.core.LoDTensor()
if args.use_gpu:
placex = fluid.CUDAPlace(i)
else:
placex = fluid.CPUPlace()
init_hidden_tensor.set(last_hidden_values[i], placex)
init_cell_tensor = fluid.core.LoDTensor()
init_cell_tensor.set(last_cell_values[i], placex)
feed[i]['init_hiddens'] = init_hidden_tensor
feed[i]['init_cells'] = init_cell_tensor
last_hidden_values = []
last_cell_values = []
for i in range(dev_count):
val_fetch_outs = exe.run(
program=infer_prog,
feed=feed[i],
fetch_list=[
infer_model.loss.name, infer_model.last_hidden.name,
infer_model.last_cell.name
],
return_numpy=False)
last_hidden_values.append(np.array(val_fetch_outs[1]))
last_cell_values.append(np.array(val_fetch_outs[2]))
total_loss += np.array(val_fetch_outs[0]).sum()
n_batch_cnt += len(np.array(val_fetch_outs[0]))
total_cnt += len(np.array(val_fetch_outs[0]))
n_batch_loss += np.array(val_fetch_outs[0]).sum()
last_hidden_values = np.array(last_hidden_values).reshape((
dev_count, args.num_layers * 2 * args.batch_size * args.embed_size))
last_cell_values = np.array(last_cell_values).reshape(
(dev_count,
args.num_layers * 2 * args.batch_size * args.hidden_size))
log_every_n_batch = args.log_interval
if log_every_n_batch > 0 and batch_id % log_every_n_batch == 0:
logger.info('Average dev loss from batch {} to {} is {}'.format(
batch_id - log_every_n_batch + 1, batch_id, "%.10f" % (
n_batch_loss / n_batch_cnt)))
n_batch_loss = 0.0
n_batch_cnt = 0
batch_offset = 0
ppl = np.exp(total_loss / total_cnt)
return ppl
def train():
args = parse_args()
if args.random_seed == 0:
args.random_seed = None
print("random seed is None")
if args.enable_ce:
random.seed(args.random_seed)
np.random.seed(args.random_seed)
logger = logging.getLogger("lm")
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.info('Running with args : {}'.format(args))
logger.info('Running paddle : {}'.format(paddle.version.commit))
hidden_size = args.hidden_size
batch_size = args.batch_size
data_path = args.data_path
logger.info("begin to load vocab")
vocab = data.Vocabulary(args.vocab_path, validate_file=True)
vocab_size = vocab.size
logger.info("finished load vocab")
logger.info('build the model...')
# build model
train_prog = fluid.Program()
train_startup_prog = fluid.Program()
if args.enable_ce:
train_prog.random_seed = args.random_seed
train_startup_prog.random_seed = args.random_seed
# build infer model
infer_prog = fluid.Program()
infer_startup_prog = fluid.Program()
with fluid.program_guard(infer_prog, infer_startup_prog):
with fluid.unique_name.guard():
# Infer process
infer_model = lm_model.LanguageModel(
args, vocab_size, test_mode=True)
infer_model.build()
infer_progs = infer_prog, infer_startup_prog, infer_model
with fluid.program_guard(train_prog, train_startup_prog):
with fluid.unique_name.guard():
# Training process
train_model = lm_model.LanguageModel(
args, vocab_size, test_mode=False)
train_model.build()
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(
clip_norm=args.max_grad_norm))
# build optimizer
if args.optim == 'adagrad':
optimizer = fluid.optimizer.Adagrad(
learning_rate=args.learning_rate,
epsilon=0.0,
initial_accumulator_value=1.0)
elif args.optim == 'sgd':
optimizer = fluid.optimizer.SGD(
learning_rate=args.learning_rate)
elif args.optim == 'adam':
optimizer = fluid.optimizer.Adam(
learning_rate=args.learning_rate)
elif args.optim == 'rprop':
optimizer = fluid.optimizer.RMSPropOptimizer(
learning_rate=args.learning_rate)
else:
logger.error('Unsupported optimizer: {}'.format(args.optim))
exit(-1)
optimizer.minimize(train_model.loss * args.num_steps)
# initialize parameters
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
exe = Executor(place)
train_progs = train_prog, train_startup_prog, train_model
if args.local:
logger.info("local start_up:")
train_loop(args, logger, vocab, train_progs, infer_progs, optimizer)
else:
if args.update_method == "nccl2":
trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
if args.test_nccl:
worker_endpoints_env = os.getenv("PADDLE_WORK_ENDPOINTS")
worker_endpoints = worker_endpoints_env.split(',')
trainers_num = len(worker_endpoints)
current_endpoint = worker_endpoints[trainer_id]
else:
port = os.getenv("PADDLE_PORT")
worker_ips = os.getenv("PADDLE_TRAINERS")
worker_endpoints = []
for ip in worker_ips.split(","):
worker_endpoints.append(':'.join([ip, port]))
worker_endpoints_env = ','.join(worker_endpoints)
trainers_num = len(worker_endpoints)
current_endpoint = os.getenv("POD_IP") + ":" + port
if trainer_id == 0:
logger.info("train_id == 0, sleep 60s")
time.sleep(60)
logger.info("trainers_num:{}".format(trainers_num))
logger.info("worker_endpoints:{}".format(worker_endpoints))
logger.info("current_endpoint:{}".format(current_endpoint))
config = fluid.DistributeTranspilerConfig()
config.mode = "nccl2"
t = fluid.DistributeTranspiler(config=config)
t.transpile(
trainer_id,
trainers=worker_endpoints_env,
current_endpoint=current_endpoint,
program=train_prog,
startup_program=train_startup_prog)
train_progs = train_prog, train_startup_prog, train_model
train_loop(args, logger, vocab, train_progs, infer_progs, optimizer,
trainers_num, trainer_id, worker_endpoints)
else:
port = os.getenv("PADDLE_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVERS")
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist)
trainers = int(os.getenv("PADDLE_TRAINERS_NUM", "0"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
logger.info("pserver_endpoints:{}".format(pserver_endpoints))
logger.info("current_endpoint:{}".format(current_endpoint))
logger.info("trainer_id:{}".format(trainer_id))
logger.info("pserver_ips:{}".format(pserver_ips))
logger.info("port:{}".format(port))
t = fluid.DistributeTranspiler()
t.transpile(
trainer_id,
pservers=pserver_endpoints,
trainers=trainers,
program=train_prog,
startup_program=startup_prog)
if training_role == "PSERVER":
logger.info("distributed: pserver started")
current_endpoint = os.getenv("POD_IP") + ":" + os.getenv(
"PADDLE_PORT")
if not current_endpoint:
logger.critical("need env SERVER_ENDPOINT")
exit(1)
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
logger.info("distributed: trainer started")
trainer_prog = t.get_trainer_program()
train_loop(args, logger, vocab, train_progs, infer_progs,
optimizer)
else:
logger.critical(
"environment var TRAINER_ROLE should be TRAINER os PSERVER")
exit(1)
def train_loop(args,
logger,
vocab,
train_progs,
infer_progs,
optimizer,
nccl2_num_trainers=1,
nccl2_trainer_id=0,
worker_endpoints=None):
train_prog, train_startup_prog, train_model = train_progs
infer_prog, infer_startup_prog, infer_model = infer_progs
# prepare device
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
exe = Executor(place)
if not args.use_gpu:
place = fluid.CPUPlace()
import multiprocessing
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
if args.load_dir:
logger.info('load pretrained checkpoints from {}'.format(args.load_dir))
fluid.io.load_persistables(exe, args.load_dir, main_program=train_prog)
elif args.load_pretraining_params:
logger.info('load pretrained params from {}'.format(args.load_pretraining_params))
exe.run(train_startup_prog)
init_pretraining_params(exe, args.load_pretraining_params, main_program=train_prog)
else:
exe.run(train_startup_prog)
# prepare data
feed_list = [
train_prog.global_block().var(var_name)
for var_name in train_model.feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
logger.info('Training the model...')
exe_strategy = fluid.parallel_executor.ExecutionStrategy()
parallel_executor = fluid.ParallelExecutor(
loss_name=train_model.loss.name,
main_program=train_prog,
use_cuda=bool(args.use_gpu),
exec_strategy=exe_strategy,
num_trainers=nccl2_num_trainers,
trainer_id=nccl2_trainer_id)
logger.info("begin to load data")
train_data = data.BidirectionalLMDataset(
args.train_path,
vocab,
test=(not args.shuffle),
shuffle_on_load=args.shuffle)
logger.info("finished load vocab")
# get train epoch size
log_interval = args.log_interval
total_time = 0.0
batch_size = args.batch_size
hidden_size = args.hidden_size
custom_samples_array = np.zeros(
(batch_size, args.num_steps, args.n_negative_samples_batch + 1),
dtype='int64')
custom_probabilities_array = np.zeros(
(batch_size, args.num_steps, args.n_negative_samples_batch + 1),
dtype='float32')
for i in range(batch_size):
for j in range(0, args.num_steps):
for k in range(0, args.n_negative_samples_batch + 1):
custom_samples_array[i][j][k] = k
custom_probabilities_array[i][j][k] = 1.0
start_time = time.time()
train_data_iter = lambda: train_data.iter_batches(batch_size * dev_count, args.num_steps)
train_reader = read_multiple(train_data_iter, batch_size, dev_count)
total_num = 0
n_batch_loss = 0.0
n_batch_cnt = 0
last_hidden_values = np.zeros(
(dev_count, args.num_layers * 2 * batch_size * args.embed_size),
dtype='float32')
last_cell_values = np.zeros(
(dev_count, args.num_layers * 2 * batch_size * hidden_size),
dtype='float32')
n_tokens_per_batch = args.batch_size * args.num_steps
n_batches_per_epoch = int(args.all_train_tokens / n_tokens_per_batch)
n_batches_total = args.max_epoch * n_batches_per_epoch
begin_time = time.time()
for batch_id, batch_list in enumerate(train_reader(), 1):
if batch_id > n_batches_total:
break
feed_data = batch_reader(batch_list, args)
feed = list(feeder.feed_parallel(feed_data, dev_count))
for i in range(dev_count):
init_hidden_tensor = fluid.core.LoDTensor()
if args.use_gpu:
placex = fluid.CUDAPlace(i)
else:
placex = fluid.CPUPlace()
init_hidden_tensor.set(last_hidden_values[i], placex)
init_cell_tensor = fluid.core.LoDTensor()
init_cell_tensor.set(last_cell_values[i], placex)
feed[i]['init_hiddens'] = init_hidden_tensor
feed[i]['init_cells'] = init_cell_tensor
fetch_outs = parallel_executor.run(
feed=feed,
fetch_list=[
train_model.loss.name, train_model.last_hidden.name,
train_model.last_cell.name
],
return_numpy=False)
cost_train = np.array(fetch_outs[0]).mean()
last_hidden_values = np.array(fetch_outs[1])
last_hidden_values = last_hidden_values.reshape(
(dev_count, args.num_layers * 2 * batch_size * args.embed_size))
last_cell_values = np.array(fetch_outs[2])
last_cell_values = last_cell_values.reshape((
dev_count, args.num_layers * 2 * batch_size * args.hidden_size))
total_num += args.batch_size * dev_count
n_batch_loss += np.array(fetch_outs[0]).sum()
n_batch_cnt += len(np.array(fetch_outs[0]))
if batch_id > 0 and batch_id % log_interval == 0:
smoothed_ppl = np.exp(n_batch_loss / n_batch_cnt)
ppl = np.exp(
np.array(fetch_outs[0]).sum() /
len(np.array(fetch_outs[0])))
used_time = time.time() - begin_time
speed = log_interval / used_time
logger.info(
"[train] step:{}, loss:{:.3f}, ppl:{:.3f}, smoothed_ppl:{:.3f}, speed:{:.3f}".
format(batch_id, n_batch_loss / n_batch_cnt, ppl,
smoothed_ppl, speed))
n_batch_loss = 0.0
n_batch_cnt = 0
begin_time = time.time()
if batch_id > 0 and batch_id % args.dev_interval == 0:
valid_ppl = eval(vocab, infer_progs, dev_count, logger, args)
logger.info("valid ppl {}".format(valid_ppl))
if batch_id > 0 and batch_id % args.save_interval == 0:
model_path = os.path.join(args.para_save_dir,
str(batch_id + epoch_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(
executor=exe, dirname=model_path, main_program=train_prog)
end_time = time.time()
total_time += end_time - start_time
epoch_id = int(batch_id/n_batches_per_epoch)
model_path = os.path.join(args.para_save_dir, str(epoch_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(
executor=exe, dirname=model_path, main_program=train_prog)
valid_ppl = eval(vocab, infer_progs, dev_count, logger, args)
logger.info("valid ppl {}".format(valid_ppl))
test_ppl = eval(vocab, infer_progs, dev_count, logger, args)
if __name__ == '__main__':
train()
|
the-stack_0_11953 | import copy
import gym
import os
import sys
import random
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from scipy.misc import imresize
##### testing only
MAX_EXPERIENCES = 10000
MIN_EXPERIENCES = 1000
#MAX_EXPERIENCES = 500000
#MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 80
K = 4 #env.action_space.n
def downsample_image(A):
B = A[31:195] # select the important parts of the image
B = B.mean(axis=2) # convert to grayscale
# downsample image
# changing aspect ratio doesn't significantly distort the image
# nearest neighbor interpolation produces a much sharper image
# than default bilinear
B = imresize(B, size=(IM_SIZE, IM_SIZE), interp='nearest')
return B
def update_state(state, obs):
obs_small = downsample_image(obs)
return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
class DQN:
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, scope):
self.K = K
self.scope = scope
with tf.variable_scope(scope):
# inputs and targets
self.X = tf.placeholder(tf.float32, shape=(None, 4, IM_SIZE, IM_SIZE), name='X')
# tensorflow convolution needs the order to be:
# (num_samples, height, width, "color")
# so we need to tranpose later
self.G = tf.placeholder(tf.float32, shape=(None,), name='G')
self.actions = tf.placeholder(tf.int32, shape=(None,), name='actions')
# calculate output and cost
# convolutional layers
# these built-in layers are faster and don't require us to
# calculate the size of the output of the final conv layer!
Z = self.X / 255.0
Z = tf.transpose(Z, [0, 2, 3, 1])
for num_output_filters, filtersz, poolsz in conv_layer_sizes:
Z = tf.contrib.layers.conv2d(
Z,
num_output_filters,
filtersz,
poolsz,
activation_fn=tf.nn.relu
)
# fully connected layers
Z = tf.contrib.layers.flatten(Z)
for M in hidden_layer_sizes:
Z = tf.contrib.layers.fully_connected(Z, M)
# final output layer
self.predict_op = tf.contrib.layers.fully_connected(Z, K)
selected_action_values = tf.reduce_sum(
self.predict_op * tf.one_hot(self.actions, K),
reduction_indices=[1]
)
cost = tf.reduce_mean(tf.square(self.G - selected_action_values))
# self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost)
# self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost)
# self.train_op = tf.train.RMSPropOptimizer(2.5e-4, decay=0.99, epsilon=1e-3).minimize(cost)
self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost)
# self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost)
# self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost)
self.cost = cost
def copy_from(self, other):
mine = [t for t in tf.trainable_variables() if t.name.startswith(self.scope)]
mine = sorted(mine, key=lambda v: v.name)
theirs = [t for t in tf.trainable_variables() if t.name.startswith(other.scope)]
theirs = sorted(theirs, key=lambda v: v.name)
ops = []
for p, q in zip(mine, theirs):
actual = self.session.run(q)
op = p.assign(actual)
ops.append(op)
self.session.run(ops)
def set_session(self, session):
self.session = session
def predict(self, states):
return self.session.run(self.predict_op, feed_dict={self.X: states})
def update(self, states, actions, targets):
c, _ = self.session.run(
[self.cost, self.train_op],
feed_dict={
self.X: states,
self.G: targets,
self.actions: actions
}
)
return c
def sample_action(self, x, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([x])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
# Sample experiences
samples = random.sample(experience_replay_buffer, batch_size)
states, actions, rewards, next_states, dones = map(np.array, zip(*samples))
# Calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
# Update model
loss = model.update(states, actions, targets)
return loss
def play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_size,
epsilon,
epsilon_change,
epsilon_min):
t0 = datetime.now()
# Reset the environment
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
assert(state.shape == (4, 80, 80))
loss = None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done = False
while not done:
# Update target network
if total_t % TARGET_UPDATE_PERIOD == 0:
target_model.copy_from(model)
print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD))
# Take action
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = downsample_image(obs)
next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
# assert(state.shape == (4, 80, 80))
episode_reward += reward
# Remove oldest experience if replay buffer is full
if len(experience_replay_buffer) == MAX_EXPERIENCES:
experience_replay_buffer.pop(0)
# Save the latest experience
experience_replay_buffer.append((state, action, reward, next_state, done))
# Train the model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
if __name__ == '__main__':
# hyperparams and initialize stuff
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_sz = 32
num_episodes = 2
total_t = 0
experience_replay_buffer = []
episode_rewards = np.zeros(num_episodes)
# epsilon
# decays linearly until 0.1
epsilon = 1.0
epsilon_min = 0.1
epsilon_change = (epsilon - epsilon_min) / 500000
# Create environment
env = gym.envs.make("Breakout-v0")
# Create models
model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma,
scope="model")
target_model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma,
scope="target_model"
)
with tf.Session() as sess:
model.set_session(sess)
target_model.set_session(sess)
sess.run(tf.global_variables_initializer())
print("Populating experience replay buffer...")
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
# assert(state.shape == (4, 80, 80))
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
next_state = update_state(state, obs)
# assert(state.shape == (4, 80, 80))
experience_replay_buffer.append((state, action, reward, next_state, done))
if done:
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
# assert(state.shape == (4, 80, 80))
else:
state = next_state
# Play a number of episodes and learn!
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_sz,
epsilon,
epsilon_change,
epsilon_min,
)
episode_rewards[i] = episode_reward
last_100_avg = episode_rewards[max(0, i - 100):i + 1].mean()
print("Episode:", i,
"Duration:", duration,
"Num steps:", num_steps_in_episode,
"Reward:", episode_reward,
"Training time per step:", "%.3f" % time_per_step,
"Avg Reward (Last 100):", "%.3f" % last_100_avg,
"Epsilon:", "%.3f" % epsilon
)
sys.stdout.flush()
|
the-stack_0_11957 | # The actual simulation goes here
# This is the main application framework for the Race Simulation which contains the MainWindow,
# based on PyQt, and spawns a Qthread SimulationThread thread. Qt signals/slots are used to
# communicate in both directions between them to control (start/pause/stop) and report results
# between them.
#
#
# To Execute: python3 simulation.py
#
# Dependencies: python3, PyQt5 etc.
#
# Description: MainWindow is created by the app, which in turn starts a SimulationThread thread. o
# Note: the MainWindow is not a QMainWindow, rather a QWidget which allows for more flexibility
# in placing controls, plots, etc.
# The MainWindow contains user controls such push button (QPushButton) that when pressed,
# emits a signal that is captured but the "slot" on the SimulationThread thread which acts on it
# (thread_start_calculating).
# Likewise, the SimulationThread thread emits various signals which are captured by associated slots
# in the MainWindow and acted upon.
# In either direction data (e.g. input parameters to the SimulationThread thread or results of
# calculation from the SimulationThread thread) passed with emitted signal is then displayed on the
# PushButton.
#
# This is based on :
# https://stackoverflow.com/questions/52993677/how-do-i-setup-signals-and-slots-in-pyqt-with-qthreads-in-both-directions
# Author: RMH 10/28/2020
#
# Status:
# 11/25/20 This version does NO simulating and provides only the very basic GUI framework
# with a simple placeholder graph/plot, threading, and signalling between the thread and
# the main window.
# 12/1/20 Adding a data storage area to share between the SimulationThread and MainWindow thread
# which incorporates a mutex mechanism (QReadWriteLock) to allow coordinating sharing of the
# data which MainWindow will be consuming (reading).
# 12/52/20 Manual merge in branch 'one-lock-rules-them-all' simulation code with the QThread
# architecture framed in from the previous versions of this branch
# USE ONLY SI UNITS
import sys
import time
import logging
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import pyqtgraph as pg
import cProfile
from datastore import (DataStore, RacingSimulationResults)
from logging_config import configure_logging
from physics_equations import (max_negative_power_physics_simulation,
max_positive_power_physics_simulation,
constrained_velocity_physics_simulation,
)
from electric_car_properties import ElectricCarProperties
from track_properties import (TrackProperties,
high_plains_raceway)
logger = logging.getLogger(__name__)
class MainWindow(QWidget):
# define the SIGNALs that MainWindow will send to other threads
mainWindowStartCalculatingSignal = pyqtSignal(int)
def __init__(self, *args, **kwargs):
QWidget.__init__(self, parent=None)
self.data_store = DataStore()
logger.info("MainWindow: DataStore initialized",
extra={'sim_index': self.data_store.get_simulation_index()})
# Create GUI related resources
self.setWindowTitle('Race Simulation')
# create the user play controls and data results graphs to run the simulation
self.createUserDisplayControls()
# create placeholders for the plots MainWindow will delivering (updating)
# data into.
self.graphs = pg.GraphicsLayoutWidget(show=True, title="Race Sim plots")
self.graphs.resize(1000, 540)
self.p1 = self.graphs.addPlot(name="Plot1", title="Time (s)")
self.p2 = self.graphs.addPlot(name="Plot2", title="Distance (m)")
self.p2.hide()
self.p3 = self.graphs.addPlot(name="Plot3", title="Velocity (m/s)")
self.p3.hide()
self.p4 = self.graphs.addPlot(name="Plot4", title="Acceleration (m/s^2)")
self.p4.hide()
self.p5 = self.graphs.addPlot(name="Plot5", title="Motor Power")
self.p5.hide()
self.p6 = self.graphs.addPlot(name="Plot6", title="Battery Power")
self.p6.hide()
self.p7 = self.graphs.addPlot(name="Plot7", title="Battery Energy (joules)")
self.p7.hide()
# Links user X-coordinate movements of all plots together. Practically, there has
# to be one plot they all link to, and in this case it's self.p1 (Time) b
self.p2.setXLink(self.p1)
self.p3.setXLink(self.p1)
self.p4.setXLink(self.p1)
self.p5.setXLink(self.p1)
self.p6.setXLink(self.p1)
self.p7.setXLink(self.p1)
# Layout the major GUI components
#self.layout = QtGui.QVBoxLayout()
self.layout = QHBoxLayout()
self.layout.addWidget(self.userDisplayControlsGroup)
self.layout.addWidget(self.graphs)
self.setLayout(self.layout)
# Create the instances of our worker threads
self.simulationThread = SimulationThread(self.data_store)
self.plotRefreshTimingThread = PlotRefreshTimingThread()
# Setup the SIGNALs to be received from the worker threads
self.simulationThread.simulationThreadSignal.connect(self.signalRcvFromSimulationThread)
self.plotRefreshTimingThread.plotRefreshTimingSignal.connect(self.signalPlotRefresh)
# TODO - what mechanism and what to do when SimulationThread or dies like
# refresh GUI and save/close results file??
#self.simulationThread.finished.connect(self.simulationThreadFinished)
#self.simulationThread.terminated.connect(self.simulationThreadTerminated)
# Now that the SimulationThread has been created (but not yet running), connect the
# Button clicked in MainWindow - call a SimulationThread method to do something
self.buttonRun.clicked.connect(self.createStartCalculatingSignal)
self.buttonStop.clicked.connect(self.simulationThread.thread_stop_calculating)
self.checkboxDistanceBreakpoint.clicked.connect(self.enableBreakpointSpinbox)
self.simulationThread.start()
self.plotRefreshTimingThread.start()
def enableBreakpointSpinbox(self):
if self.checkboxDistanceBreakpoint.isChecked() == True:
self.spinboxDistanceBreakpoint.setEnabled(True)
self.spinboxDistanceBreakpoint.setReadOnly(False)
else:
self.spinboxDistanceBreakpoint.setEnabled(False)
self.spinboxDistanceBreakpoint.setReadOnly(True)
def createStartCalculatingSignal(self):
"""
Send a SIGNAL to the simulation thread to start the simulation calculations.
Based on the user's control settings in the GUI, figure out what "distance" value
to send with the signal to Simulation Thread to start/continue simulation
"distance" value sent to the SimulationThread is overload with these meanings:
>0 distance in meters from the start on the track...
=0 singlestep,
<0 whole track,
"""
if self.checkboxDistanceBreakpoint.isChecked() == True:
distance = self.spinboxDistanceBreakpoint.value()
else:
# No breakpoint indicated on GUI so run the whole track or
# until user hits "pause" button
distance = -1
# signal the thread
self.simulationThread.thread_start_calculating(distance)
def createUserDisplayControls(self):
self.labelDisplayControl = QLabel("Display Control")
# Note - FYI - created in the order the controls appear on screen
self.labelStatus = QLabel("Status")
self.textboxStatus = QLineEdit("Initialized", self)
self.textboxStatus.setReadOnly(True)
self.buttonRun = QPushButton('Run/Continue', self)
self.buttonRun.setEnabled(True)
self.buttonStop = QPushButton('Pause', self)
self.buttonStop.setEnabled(True)
self.checkboxDistanceBreakpoint = QCheckBox('Distance Breakpoint (m)', self)
self.checkboxDistanceBreakpoint.setChecked(False)
self.spinboxDistanceBreakpoint = QDoubleSpinBox()
self.spinboxDistanceBreakpoint.setReadOnly(True)
self.spinboxDistanceBreakpoint.setRange(0,999999)
#outputs of simulation
self.labelSimulationIndex = QLabel("Current Sim. Index")
self.textboxSimulationIndex = QLineEdit("0",self)
self.textboxSimulationIndex.setReadOnly(False)
self.checkboxTime = QCheckBox('Time (s)', self)
self.checkboxTime.setChecked(False)
self.spinboxTime = QDoubleSpinBox()
self.spinboxTime.setReadOnly(True)
self.spinboxTime.setRange(0, 999999)
self.checkboxDistance = QCheckBox('Distance (m)', self)
self.checkboxDistance.setChecked(False)
self.spinboxDistance = QDoubleSpinBox()
self.spinboxDistance.setReadOnly(True)
self.spinboxDistance.setRange(0,999999)
self.checkboxVelocity = QCheckBox('Velocity (m/s)', self)
self.checkboxVelocity.setChecked(False)
self.spinboxVelocity = QDoubleSpinBox()
self.spinboxVelocity.setReadOnly(True)
self.spinboxVelocity.setRange(0,999999)
self.checkboxAcceleration = QCheckBox('Acceleration (m/s^2)', self)
self.checkboxAcceleration.setChecked(False)
self.spinboxAcceleration = QDoubleSpinBox()
self.spinboxAcceleration.setReadOnly(True)
self.checkboxMotorPower = QCheckBox('Motor Power', self)
self.checkboxMotorPower.setChecked(False)
self.spinboxMotorPower = QDoubleSpinBox()
self.spinboxMotorPower.setReadOnly(True)
self.spinboxMotorPower.setRange(0,999999)
self.checkboxBatteryPower = QCheckBox('Battery Power', self)
self.checkboxBatteryPower.setChecked(False)
self.spinboxBatteryPower = QDoubleSpinBox()
self.spinboxBatteryPower.setReadOnly(True)
self.spinboxBatteryPower.setRange(0,999999)
self.checkboxBatteryEnergy = QCheckBox('Battery Energy (j)', self)
self.checkboxBatteryEnergy.setChecked(False)
self.spinboxBatteryEnergy = QDoubleSpinBox()
self.spinboxBatteryEnergy.setReadOnly(True)
self.spinboxBatteryEnergy.setRange(0,999999)
#self.userDisplayControlsGroup = QtGui.QGroupBox('User Display Controls')
self.userDisplayControlsGroup = QGroupBox('User Display Controls')
#self.userDisplayControlsLayout= QtGui.QGridLayout()
self.userDisplayControlsLayout= QGridLayout()
self.userDisplayControlsLayout.addWidget(self.labelStatus, 0, 0)
self.userDisplayControlsLayout.addWidget(self.textboxStatus, 0, 1)
self.userDisplayControlsLayout.addWidget(self.buttonRun, 1, 0)
self.userDisplayControlsLayout.addWidget(self.buttonStop, 1, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxDistanceBreakpoint, 2, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxDistanceBreakpoint, 2, 1)
self.userDisplayControlsLayout.addWidget(self.labelSimulationIndex, 3, 0)
self.userDisplayControlsLayout.addWidget(self.textboxSimulationIndex, 3, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxTime, 4, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxTime, 4, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxDistance, 5, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxDistance, 5, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxVelocity, 6, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxVelocity, 6, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxAcceleration, 7, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxAcceleration, 7, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxMotorPower, 8, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxMotorPower, 8, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxBatteryPower, 9, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxBatteryPower, 9, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxBatteryEnergy, 10, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxBatteryEnergy, 10, 1)
self.userDisplayControlsGroup.setLayout(self.userDisplayControlsLayout)
def simulationThreadResultsDataDisplay(self):
# TODO placeholder for real work to be done when the SimulationThread (a simulationThread thread)
# SIGNALs MainWindow new data is available in shared memory
print('Window SIGNAL from SimulationThread: Results_data_ready')
def simulationThreadFinished(self):
# TODO placeholder for SimulationThread SIGNALs ??exiting
# data is available in shared memory
print('Window: SIGNAL From SimulationThread: Finished')
def simulationThreadTerminated(self):
# TODO placeholder for SimulationThread SIGNALs terminated
print('Window: SIGNAL From SimulationThread: Terminated')
"""
Slots routines to handle SIGNALs sent to MainWindow from other threads
"""
@pyqtSlot(str)
def signalRcvFromSimulationThread(self, text):
#self.buttonRun.setText(text)
self.textboxStatus.setText(text)
@pyqtSlot()
def signalPlotRefresh(self):
#Display/update the window to display computation status, data, and plots selected by the user
# This is called periodically because of the signal emitted from PlotRefreshTimingThread
current_sim_index = (self.data_store.get_simulation_index())
logger.info("MainWindow:", extra={'sim_index': current_sim_index})
self.textboxSimulationIndex.setText("{}".format(current_sim_index))
"""
Only refresh data if the simulations calculations have begun, indicated by
current_sim-index > 0
Note: current_sim_index is descremented "-1" for the following calls
because the lap_velocity_simulation calculations may be incomplete for the index
when this "plot" signal was received and interrupted it. That is, the
SimulationThread is/could be still updating a DataStore data (lists) records
simulation_index and not all lists # have been calculated, so we should
just plot upto the last complete record.
"""
if current_sim_index > 0 :
# Get the current data values and update the corresponding display field textbox
time = self.data_store.get_time_at_index(current_sim_index-1)
self.spinboxTime.setValue(time)
distance = self.data_store.get_distance_at_index(current_sim_index-1)
self.spinboxDistance.setValue(distance)
velocity = self.data_store.get_velocity_at_index(current_sim_index-1)
self.spinboxVelocity.setValue(velocity)
acceleration = self.data_store.get_acceleration_at_index(current_sim_index-1)
self.spinboxAcceleration.setValue(acceleration)
motor_power = self.data_store.get_motor_power_at_index(current_sim_index-1)
self.spinboxMotorPower.setValue(motor_power)
battery_power = self.data_store.get_battery_power_at_index(current_sim_index-1)
self.spinboxBatteryPower.setValue(battery_power)
# TBD not yet implemented in physics_equations
#battery_energy = self.data_store.get_battery_energy_at_index(current_sim_index-1)
#self.spinboxBatteryEnergy.setValue(battery_energy)
# Display the data values
# create a new plot for every point simulated so far
x = [z for z in range(current_sim_index)]
_time = []
_distance = []
_velocity = []
_max_velocity = []
_acceleration = []
_motor_power = []
_battery_power = []
_battery_energy = []
_time = self.data_store.get_time_list(current_sim_index)
_distance = self.data_store.get_distance_list(current_sim_index)
_velocity = self.data_store.get_velocity_list(current_sim_index)
_max_velocity = self.data_store.get_track_max_velocity_list(current_sim_index)
_acceleration = self.data_store.get_acceleration_list(current_sim_index)
_motor_power = self.data_store.get_motor_power_list(current_sim_index)
_battery_power = self.data_store.get_battery_power_list(current_sim_index)
#TODO not yet implemented
#_battery_energy = self.data_store.get_battery_energy_list(current_sim_index)
self.p1.plot(x=x, y=_time, name="Plot1", title="Time")
# selectively display the plots based on the checkboxes
if self.checkboxDistance.isChecked() == True :
self.p2.show()
self.p2.plot(x=x, y=_distance, name="Plot2", title="Distance (m)")
else:
self.p2.hide()
if self.checkboxVelocity.isChecked() == True :
self.p3.show()
self.p3.plot(x=x, y=_max_velocity, name="Plot3", title="Max Velocity (m/sec)", pen='r')
self.p3.plot(x=x, y=_velocity, name="Plot3", title="Velocity (m/sec)")
else:
self.p3.hide()
if self.checkboxAcceleration.isChecked() == True :
self.p4.show()
self.p4.plot(x=x, y=_acceleration, name="Plot4", title="Acceleration (m/sec^2)")
else:
self.p4.hide()
if self.checkboxMotorPower.isChecked() == True :
self.p5.show()
self.p5.plot(x=x, y=_motor_power, name="Plot5", title="Motor Power")
else:
self.p5.hide()
if self.checkboxBatteryPower.isChecked() == True :
self.p6.show()
self.p6.plot(x=x, y=_battery_power, name="Plot6", title="Battery Power")
else:
self.p6.hide()
"""TBD - to be added once Battery Energy is working in physics_equations
if self.checkboxBatteryEnergy.isChecked() == True :
self.p7.show()
self.p7.plot(x=x, y=_battery_energy, name="Plot7", title="Battery Energy (joules)")
else:
self.p7.hide()
"""
class SimulationThread(QThread):
# Define the Signals we'll be emitting to the MainWindow
simulationThreadSignal = pyqtSignal(str)
simulationThreadPlotSignal = pyqtSignal(int)
breakpointDistance = 0
def __init__(self, passed_data_store, parent=None):
QThread.__init__(self, parent)
self.exiting = False
self.setObjectName("SimulationThread")
""" SimulationComputing is used for staring/stopping loop control logic which is
controlled ( signalled) from the MainWindow.
Start without compution in the simulationThread running
"""
self.simulationComputing = False
self.breakpointDistance = 0
# Initialize the simulation universe
self._data_store = passed_data_store
self.initialize_race()
#print('SimulationThread: __init()__')
#print("SimulationThread: Simulation Index = {}".format(self._data_store.get_simulation_index()))
#connect some signals from the main window to us
#self.connect(self, QtCore.SIGNAL('To_End',self.processToEnd)
def __del__(self):
# Before a SimulationThread object is destroyed, we need to ensure that it stops processing.
# For this reason, we implement the following method in a way that indicates to
# the part of the object that performs the processing that it must stop, and waits
# until it does so.
self.exiting = True
self.wait()
# rotational inertia estimation: http://www.hpwizard.com/rotational-inertia.html
def initialize_race(self):
segment_distance = 0.005 # meters, this must be very very small
battery_power = 40000 # 40kW
motor_efficiency = 0.8
wheel_radius = 0.25 # m, ~20 in OD on tires
rotational_inertia = 10 # kg*m^2
mass = 1000 # kg
drag_coefficient = 0.4
frontal_area = 7 # m^2
air_density = 1 # kg/m^3
wheel_pressure_bar = 3 # bar
track = TrackProperties()
track.set_air_density(air_density)
for distance in high_plains_raceway:
track.add_critical_point(distance, high_plains_raceway[distance], track.FREE_ACCELERATION)
track.generate_track_list(segment_distance)
car = ElectricCarProperties()
car.set_car_parameters(mass=mass, rotational_inertia=rotational_inertia,
motor_power=battery_power, motor_efficiency=motor_efficiency,
battery_capacity=10, drag_coefficient=drag_coefficient,
frontal_area=frontal_area, wheel_radius=wheel_radius,
wheel_pressure_bar=wheel_pressure_bar)
self._data_store.initialize_lap_lists(len(track.distance_list))
self._data_store.set_car_properties(car)
self._data_store.set_track_properties(track)
""" SimulationThread signal handling routines. This is the collection of SLOTS
that get signaled (emitted) from the MainWindow and tell the SimulationThread
what to do, like change states and start calculating, pause, etc.
"""
@pyqtSlot()
def thread_start_calculating(self, distance_value):
"""
This signal (slot) handler takes the distance value
and updates SimulationThread computing state and interprets
the distance_value into appropriate values for "breakpoints" to,
if necessary, to stop computing.
"""
print("Breakpoint Distance value:{}".format(distance_value))
logger.info('Slot:thread_start_calculating :',
extra={'sim_index': self._data_store.get_simulation_index()})
if distance_value == 0:
logger.info('Slot:thread_start_calculating SINGLE STEP NOT IMPLEMENTED:',
extra={'sim_index': self._data_store.get_simulation_index()})
#TODO - finish this breakpoint case
self.simulationComputing = False
elif distance_value == -1:
logger.info('Slot:thread_start_calculating RUN TO COMPLETION :',
extra={'sim_index': self._data_store.get_simulation_index()})
# set the breakpoint to be a very large number to indicate run to completion
self.breakpointDistance = 9999999
self.simulationComputing = True
else:
# run to the distance value point in the track
sim_index = self._data_store.get_simulation_index()
if distance_value > self._data_store.get_distance_at_index(sim_index) :
logger.info('Slot:thread_start_calculating RUN TO DISTANCE :',
extra={'sim_index': sim_index})
# requested breakpoint is further down the track
self.breakpointDistance = distance_value
# Start computing and acknowledge to MainWindow by sending a signal back
self.simulationThreadSignal.emit("Calculating...")
# "state" variable indicating thread should be calculating
self.simulationComputing = True
else:
logger.info('Slot:thread_start_calculating PAST REQUESTED DISTANCE :',
extra={'sim_index': sim_index})
# simulation has already past this point in the track, don't proceed
self.simulationComputing = False
@pyqtSlot()
def thread_stop_calculating(self):
logger.info('Slot:thread_stop_calculating :',
extra={'sim_index': self._data_store.get_simulation_index()})
# Now send a signal back to the main window
self.simulationThreadSignal.emit("Paused")
# "state" variable indicating thread should stop calculating
self.simulationComputing = False
def racing_simulation(self):
"""Function accepts a car and a track and executes
a simulation to ouput critical metrics related
to battery life and track speed.
Args:
Nothing, all required vars are defined in class
Returns:
Nothing, all required vars are defined in class
"""
results = RacingSimulationResults()
self.lap_velocity_simulation()
# only calculate results if the simulation ran through without an interruption
if not self._data_store.exit_event.is_set():
lap_results = self._data_store.get_lap_results()
# TODO fix this
#results.laps_per_pit_stop = car["battery_capacity"] / lap_results.motor_energy_list[-1]
results.lap_time = lap_results.end_velocity
results.lap_results = lap_results
self._data_store.set_race_results(results)
def lap_velocity_simulation(self):
"""Function calculates the velocity profile of a car with
car_properties on a track with track_properties. The car
starts with an ititial velocity of initial_velocity.
Args:
data_store (DataStore): Thread safe storage for all simulation data
Returns:
Nothing (all data saved in the datastore)
"""
# performance increases by assigning local functions
# https://towardsdatascience.com/10-techniques-to-speed-up-python-runtime-95e213e925dc
add_physics_result_to_datastore = self._data_store.add_physics_results_to_lap_results
get_velocity = self._data_store.get_velocity_at_index
track = self._data_store.get_track_properties()
air_density = track.get_air_density()
car = self._data_store.get_car_properties()
# need to populate the time profile be the same length as the distance list
# to complete a lap of simulation
list_len = len(track.distance_list)
logger.debug('track.distance_list length={}'.format(list_len),
extra={'sim_index': self._data_store.get_simulation_index()})
# TODO - Add self.simulationComputing to loop control to while
while self._data_store.get_simulation_index() < list_len:
# get the new index we are going to calculate
sim_index = self._data_store.get_simulation_index()
if self._data_store.exit_event.is_set():
break
distance_of_travel = (track.distance_list[sim_index] -
track.distance_list[sim_index - 1])
# only continue simulation computing if the GUI says to do so.
if (self.simulationComputing == True and self.breakpointDistance > track.distance_list[sim_index]):
velocity = get_velocity(sim_index - 1)
physics_results = max_positive_power_physics_simulation(velocity,
distance_of_travel,
car,
air_density)
add_physics_result_to_datastore(physics_results, sim_index)
# check if velocity constraints are violated
if get_velocity(sim_index) > track.max_velocity_list[sim_index]:
# velocity constraint violated!!
# start walking back until velocity constraint at sim_index is met
logger.info("velocity constraint violated starting walk back, current v: {}, max: {}"
.format(physics_results.final_velocity, track.max_velocity_list[sim_index]),
extra={'sim_index': self._data_store.get_simulation_index()})
max_velocity_constraint = track.max_velocity_list[sim_index]
while get_velocity(sim_index) > max_velocity_constraint:
"""This while loop's purpose is to recalculate a portion of the
car's car profile because the car ended up going too fast at a point on the
track. To recalculate the following happens:
1. a "walk back" index is used to track how far back the recalculation occurs
2. from the index (sim_index - walk_back_index) to (sim_index - 1) the results
are calculated as a maximum regeneration effort by the motor
3. at the sim_index the results are calculated as a constrained velocity
- if the results of the calculation are realistic then the walk back is done
- if the results are not realistic then increment the
walk back counter and recalculate
"""
walk_back_counter = self._data_store.get_walk_back_counter()
recalculation_start_index = sim_index - walk_back_counter
logger.debug("starting and ending walkback index: {}, {}"
.format(recalculation_start_index, sim_index),
extra={'sim_index': self._data_store.get_simulation_index()})
for i in range(recalculation_start_index, sim_index):
velocity = get_velocity(i - 1)
logger.debug("velocity: {}"
.format(velocity),
extra={'sim_index': i})
# recalculate with negative motor power
physics_results = max_negative_power_physics_simulation(velocity,
distance_of_travel,
car,
air_density)
logger.debug("next velocity: {}"
.format(physics_results.final_velocity),
extra={'sim_index': i})
add_physics_result_to_datastore(physics_results, i)
velocity = get_velocity(sim_index - 1)
# last deceleration will be a constrained velocity because
# it will be neither max positive or negative motor power
physics_results = \
constrained_velocity_physics_simulation(velocity,
max_velocity_constraint,
distance_of_travel,
car,
air_density)
logger.debug("velocity start, end, max: {} {} {}"
.format(velocity,
physics_results.final_velocity,
max_velocity_constraint),
extra={'sim_index': sim_index})
# check if constrained velocity calculation is realistic
# TODO other checks here can be on acceleration or wheel force
if physics_results.motor_power < -car["motor_power"]:
logger.debug(
"velocity constraint still violated, calculated power: {}, max power: {}"
.format(physics_results.motor_power, car["motor_power"]),
extra={'sim_index': sim_index})
logger.debug("sim_index, walkback: {} {}, incrementing walk back"
.format(sim_index, walk_back_counter),
extra={'sim_index': sim_index})
self._data_store.increment_walk_back_counter()
else:
logger.info(
"velocity constraint accepted, calculated power: {}, max power: {}"
.format(physics_results.motor_power, car["motor_power"]),
extra={'sim_index': sim_index})
logger.info("constrained velocity equation accepted",
extra={'sim_index': sim_index})
add_physics_result_to_datastore(physics_results, sim_index)
#end of while while get_velocity(sim_index) > max_velocity_constraint:
# walk back complete, reset walk back index for next time
self._data_store.reset_walk_back_counter()
# completed calculation for the latest simulation index,
self._data_store.increment_simulation_index()
else:
# self.simulationComputing is False or we've reached a breakpoint,
# so wait for GUI user to indicate proceed
if self.simulationComputing == True :
# if we're computing and got here, we must have hit a breakpoint, therefore pause
# Now send a signal back to the main window
self.simulationThreadSignal.emit("Paused")
# "state" variable indicating thread should stop calculating
self.simulationComputing = False
#else:
# we've began not computing or a breakpoint already has sent us there
# so do nothing more than waitk
# in any case, wait until user gives us a new condition to continue computing
time.sleep(1.0)
logger.debug("waiting for simulationComputing==True",
extra={'sim_index': sim_index})
# end of while data_store.get_simulation_index() < list_len:
logger.info("SIMULATION COMPLETE!", extra={'sim_index': 'N/A'})
self.simulationThreadSignal.emit("Finished!")
self._data_store.exit_event.set()
def run(self):
# Note: This is never called directly. It is called by Qt once the
# thread environment with the thread's start() method has been setup,
# and then runs "continuously"
logger.info("SimulationThread: entering cProfile.runctx() ",
extra={'sim_index': 'N/A'})
# profiling tool, look at results with runsnake:
# https://kupczynski.info/2015/01/16/profiling-python-scripts.html
# this has relatively little overhead for the overall runtime of the program
# I have only been able to get the runsnake files to work on linux
# alternative profile results viewer for windows (untried): https://sourceforge.net/projects/qcachegrindwin/
cProfile.runctx("self.racing_simulation()", globals(), locals(), 'profile-simulation.out')
class PlotRefreshTimingThread(QThread):
# Thread responsible for a periodic signal to the MainWindow which when received causes
# MainWindow to refresh it's plots.
# Define the Signals we'll be emitting to the MainWindow
plotRefreshTimingSignal = pyqtSignal()
# start without compution in the simulationThread running
def __init__(self, parent=None):
QThread.__init__(self, parent)
self.exiting = False
logger.info("PlotRefreshTimingThread: __init()__",
extra={'sim_index': 'N/A'})
# TODO connect some signals from the main window to us
#self.connect(self, QtCore.SIGNAL('To_End',self.processToEnd)
def __del__(self):
# Before a PlotRefreshTimingThread object is destroyed, we need to ensure that it stops
# processing. For this reason, we implement the following method in a way that
# indicates to the part of the object that performs the processing that it must stop,
# and waits until it does so.
self.exiting = True
self.wait()
def run(self):
# Note: This is never called directly. It is called by Qt once the
# thread environment with the thread's start() method has been setup,
# and then runs "continuously" to do the work of the thread as it's main
# processing loop
logger.info("PlotRefreshTimingThread: entering while() ",
extra={'sim_index': 'N/A'})
while True:
time.sleep(5.0)
self.plotRefreshTimingSignal.emit()
if __name__ == "__main__":
MainApp = QApplication(sys.argv)
if __name__ == "__main__":
configure_logging()
window = MainWindow()
window.show()
sys.exit(cProfile.runctx("MainApp.exec_()", globals(), locals(), 'profile-display.out'))
|
the-stack_0_11958 | """
Plot a geodesic of SO(3) equipped
with its left-invariant canonical METRIC.
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import geomstats.visualization as visualization
from geomstats.special_orthogonal_group import SpecialOrthogonalGroup
SO3_GROUP = SpecialOrthogonalGroup(n=3)
METRIC = SO3_GROUP.bi_invariant_metric
def main():
initial_point = SO3_GROUP.identity
initial_tangent_vec = [0.5, 0.5, 0.8]
geodesic = METRIC.geodesic(initial_point=initial_point,
initial_tangent_vec=initial_tangent_vec)
n_steps = 10
t = np.linspace(0, 1, n_steps)
points = geodesic(t)
visualization.plot(points, space='SO3_GROUP')
plt.show()
if __name__ == "__main__":
if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow':
print('Examples with visualizations are only implemented '
'with numpy backend.\n'
'To change backend, write: '
'export GEOMSTATS_BACKEND = \'numpy\'.')
else:
main()
|
the-stack_0_11959 | from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
import cv2
from optparse import OptionParser
import pickle
import os
import traceback
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Input
from keras.models import Model, load_model, model_from_json
from keras_frcnn import config, data_generators
from keras_frcnn import losses as losses
import keras_frcnn.roi_helpers as roi_helpers
from keras.utils import generic_utils
if 'tensorflow' == K.backend():
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config2 = tf.ConfigProto()
config2.gpu_options.allow_growth = True
set_session(tf.Session(config=config2))
sys.setrecursionlimit(40000)
def kl_div(P, Q):
return np.nansum([p * np.log2(p / (q + 1e-8)) for p, q in zip(P, Q) if p != 0])
def js_distance(P, Q):
M = 0.5 * (P + Q)
return np.sqrt(0.5 * kl_div(P, M) + 0.5 * kl_div(Q, M))
def get_optimal_alpha(p_img, p_curr, rule_mode = "max"):
js_dist_list = [js_distance(p_img[0,i,:], p_curr[0,i,:]) for i in range(p_img.shape[1])]
if rule_mode == "max":
dist_diff = np.nanmax(js_dist_list)
elif rule_mode == "min":
dist_diff = np.nanmin(js_dist_list)
else:
dist_diff = np.nanmean(js_dist_list)
return np.max([alpha_final, dist_diff / (1 - dist_diff + 1e-8)])
def make_target_probas(p_img, p_curr, alpha, constrain_hard_examples = False):
target_probas = (np.log(p_curr[0] + 1e-8) + alpha * np.log(p_img[0] + 1e-8)) / (1 + alpha)
target_probas = np.exp(target_probas) / np.exp(target_probas).sum(axis = 1)[:, None]
idx = []
if constrain_hard_examples:
# Confident predictions in img_classifier
idx_conf = np.where(p_img[0] >= 0.90)
target_probas[idx_conf[0],:] = 0
target_probas[idx_conf] = 1
# Easy predictions (agreement between img and current)
idx_agree = np.where((p_img[0].argmax(1) == p_curr[0].argmax(1)) & (p_curr[0].max(1) >= 0.50))[0]
cols_agree = p_curr[0].argmax(1)[idx_agree]
target_probas[idx_agree,:] = 0
target_probas[idx_agree, cols_agree] = 1
idx = np.unique(idx_conf[0].tolist() + idx_agree.tolist()).tolist()
return np.expand_dims(target_probas, axis = 0), idx
def make_target_bbs(bb_curr, bb_phase1, alpha):
target_bbs = (bb_curr + alpha * bb_phase1) / (1 + alpha)
return target_bbs
def get_img_probas(img_path, P_cls, P_regr, ROIs, C, f):
img = cv2.imread(img_path)
new_height = 299
new_width = 299
img_probas = np.zeros((P_cls.shape[1], len(class_mapping)))
for ii in range(P_cls.shape[1]):
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except:
pass
# Get the true BB coordinates
x1, y1, x2, y2 = C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h)
x1, y1, x2, y2 = data_generators.get_real_coordinates(f, x1, y1, x2, y2)
# Get the probabilities from the image classifier
cropped_img = img[y1:y2, x1:x2, :]
x_resized = cv2.resize(np.copy(cropped_img), (int(new_width), int(new_height)), interpolation = cv2.INTER_CUBIC)
x_resized = x_resized / 255.
x_resized = np.expand_dims(x_resized, axis = 0)
img_probas[ii, :] = img_classifier.predict(x_resized)[0]
return np.expand_dims(img_probas, axis = 0)
def rpn_to_class_inputs(X, img_data, C, mode = "source", eps = 0.05):
[Y1, Y2] = model_rpn.predict(X)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), use_regr = True, overlap_thresh = 0.4, max_boxes = 300)
X2, Y1, Y2, _ = roi_helpers.calc_iou(R, img_data, C, class_mapping, mode, eps)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
raise NameError('No quality ROIs in X2. Training on another sample')
neg_samples = np.where(Y1[0, :, :].argmax(1) == len(class_mapping) - 1)
pos_samples = np.where(Y1[0, :, :].argmax(1) != len(class_mapping) - 1)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois//2, replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# In the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
X2 = X2[:, sel_samples, :]
Y1 = Y1[:, sel_samples, :]
Y2 = Y2[:, sel_samples, :]
return X2, Y1, Y2, len(selected_pos_samples)
def get_target_img_data(X_target, img_data, alpha, constrain_hard_examples = False, use_optimal_alpha = False):
[Y1, Y2, F] = phase1_rpn.predict(X_target)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh = 0.7)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
all_probs = {}
for jk in range(R.shape[0] // C.num_rois + 1):
ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis = 0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0] // C.num_rois:
# Pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0], C.num_rois, curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
# Make predictions with current FRCNN and phase 1 detector
[_, P_regr_phase1] = phase1_classifier.predict([F, ROIs])
[P_cls_curr, P_regr_curr] = model_classifier.predict([X_target, ROIs]) # <- This returns a (1, n_ROIs, n_class) and (1, n_ROIs, 4) tensors
# Get the probabilities from the image classifier
img_probas = get_img_probas(filepath, P_cls_curr, P_regr_curr, ROIs, C, f)
# Optional re-computation of the alpha parameter
if use_optimal_alpha:
alpha = get_optimal_alpha(img_probas, P_cls_curr, "mean")
# Get the target probabilities
P_cls, no_change_bb_idx = make_target_probas(img_probas, P_cls_curr, alpha, constrain_hard_examples)
for ii in range(P_cls.shape[1]):
# If the detected object is bg skip
if np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
cls_name = inv_map[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
all_probs[cls_name] = []
cls_num = np.argmax(P_cls[0, ii, :])
(x1, y1, w1, h1) = ROIs[0, ii, :]
(x2, y2, w2, h2) = ROIs[0, ii, :]
try:
(tx, ty, tw, th) = P_regr_phase1[0, ii, 4 * cls_num:4 * (cls_num + 1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x1, y1, w1, h1 = roi_helpers.apply_regr(x1, y1, w1, h1, tx, ty, tw, th)
except:
pass
try:
(tx, ty, tw, th) = P_regr_curr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x2, y2, w2, h2 = roi_helpers.apply_regr(x2, y2, w2, h2, tx, ty, tw, th)
except:
pass
if ii in no_change_bb_idx:
x, y, w, h = x2, y2, w2, h2
else:
x, y, w, h = make_target_bbs(np.array([x2, y2, w2, h2]), np.array([x1, y1, w1, h1]), alpha)
bboxes[cls_name].append([C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
all_probs[cls_name].append(P_cls[0, ii, :])
for key in bboxes:
new_boxes, _, chosen_idx = roi_helpers.non_max_suppression_fast(np.array(bboxes[key]), np.array(probs[key]), overlap_thresh = 0.1)
probas = np.array(all_probs[key])[chosen_idx, :]
# img_data = {"filepath" : filepath, "width" : width, "height" : height, "bboxes" : []}
# all_imgs[filename]['bboxes'].append({'class': class_name, 'x1': int(x1), 'x2': int(x2), 'y1': int(y1), 'y2': int(y2)})
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk, :]
(x1, y1, x2, y2) = data_generators.get_real_coordinates(f, x1, y1, x2, y2)
img_data["bboxes"].append({'class': key, 'x1': int(x1), 'x2': int(x2), 'y1': int(y1), 'y2': int(y2), 'probas': probas[jk, :]})
return img_data
parser = OptionParser()
parser.add_option("-s", "--source_path", dest="source_path", help="Path to source training txt file.")
parser.add_option("-t", "--target_path", dest="target_path", help="Path to target training detections txt file.")
parser.add_option("-p", "--parser", dest="parser", help="Parser to use. One of general or pascal_voc", default="general")
parser.add_option("-r", "--num_rois", type="int", dest="num_rois", help="Number of ROIs to process at once.", default=32)
parser.add_option("--num_epochs", type="int", dest="num_epochs", help="Number of epochs.", default=50)
parser.add_option("--elen", dest="epoch_length", help="Set the epoch length. def=1000", default=1000)
parser.add_option("--opt", dest="optimizers", help="Set the optimizer to use", default="SGD")
parser.add_option("--lr", dest="lr", help="Initial learning rate", type=float, default=1e-3)
parser.add_option("--load_checkpoint", dest="load_checkpoint", help="Path to model weights from past checkpoint. Used to resume training.", default=None)
parser.add_option("--alpha_init", type=float, dest="alpha_init", help="Starting alpha value.", default=100.)
parser.add_option("--alpha_final", type=float, dest="alpha_final", help="Final/smallest alpha value.", default=0.5)
parser.add_option("--hard_constraints", dest="hard_constraints", help="Set hard thresholds on confident predictions", action="store_true", default=False)
parser.add_option("--recompute_alpha", dest="recompute_alpha", help="Recompute alpha automatically using Hausdorf distance.", action="store_true", default=False)
parser.add_option("--phase1_config_file", dest="phase1_config", help="Path of the config file of phase 1 F-RCNN.", default="config.pickle")
parser.add_option("--phase1_weights", dest="phase1_weights", help="Path to .hdf5 file with phase 1 F-RCNN model weights")
parser.add_option("--img_json", dest="img_json_path", help="Path to JSON file with phase 2 img model architecture")
parser.add_option("--img_weights", dest="img_weight_path", help="Path to .hdf5 file with phase 2 img model weights")
parser.add_option("--output_config_file", dest="output_config", help="Path to save final phase 3 config file (for testing)", default="config_phase3.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path", help="Output path for weights.", default='models/phase3/phase3_weights.hdf5')
(options, args) = parser.parse_args()
# Check for user errors
if not options.phase1_weights:
parser.error('Error: path to phase 1 weights must be specified. Pass --phase1_weights to command line')
if not options.img_json_path:
parser.error('Error: path to phase 2 JSON file must be specified. Pass --img_json to command line')
if not options.img_weight_path:
parser.error('Error: path to phase 2 weights must be specified. Pass --img_weights to command line')
if not options.source_path:
parser.error('Error: path to source training data must be specified. Pass --source_path to command line')
if not options.target_path:
parser.error('Error: path to target training data must be specified. Pass --target_path to command line')
# Loading the selected parser
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == "general":
from keras_frcnn.general_parser import get_data
else:
raise ValueError("Command line option parser must be a valid one")
# mkdir to save models.
if not os.path.isdir("models"):
os.mkdir("models")
if not os.path.isdir("models/phase3"):
os.mkdir("models/phase3")
# Loading the config file from phase 1
with open(options.phase1_config, 'rb') as f_in:
C = pickle.load(f_in)
C.num_rois = int(options.num_rois)
C.model_path = options.output_weight_path
# Select the proper backbone configuration
if C.network == 'vgg16':
from keras_frcnn import vgg as nn
feature_dim = 512
elif C.network == 'resnet50':
from keras_frcnn import resnet as nn
feature_dim = 1024
elif C.network == 'vgg19':
from keras_frcnn import vgg19 as nn
feature_dim = 512
elif C.network == 'mobilenetv1':
from keras_frcnn import mobilenetv1 as nn
feature_dim = 512
elif C.network == 'mobilenetv2':
from keras_frcnn import mobilenetv2 as nn
feature_dim = 320
elif C.network == 'densenet':
from keras_frcnn import densenet as nn
feature_dim = 1024
else:
print('Check network name in phase 1 config file.')
raise ValueError
# Load source and target data and creating the generators
source_imgs, classes_count, _ = get_data(options.source_path)
target_imgs, _, _ = get_data(options.target_path)
class_mapping = C.class_mapping
if 'bg' not in classes_count:
classes_count['bg'] = 0
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
inv_map = {v: k for k, v in class_mapping.items()}
print('Source training images per class:')
pprint.pprint(classes_count)
print('Num source classes (including bg) = {}'.format(len(classes_count)))
with open(options.output_config, 'wb') as config_f:
pickle.dump(C, config_f)
print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(options.output_config))
source_train_imgs = [s for s in source_imgs if s['imageset'] == 'train']
target_train_imgs = [s for s in target_imgs if s['imageset'] == 'train']
source_val_imgs = [s for s in source_imgs if s['imageset'] == 'test'] # Feeling pretty, might delete later
random.shuffle(source_train_imgs)
random.shuffle(source_val_imgs)
random.shuffle(target_train_imgs)
print('Num source train images {}'.format(len(source_train_imgs)))
#print('Num source val images {}'.format(len(source_val_imgs)))
print('Num target train images {}'.format(len(target_train_imgs)))
data_gen_source_train = data_generators.get_anchor_gt(source_train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode = 'train')
#data_gen_source_val = data_generators.get_anchor_gt(source_val_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode = 'val')
data_gen_target_train = data_generators.get_anchor_gt(target_train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode = 'val')
if K.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
input_shape_features = (feature_dim, None, None)
else:
input_shape_img = (None, None, 3)
input_shape_features = (None, None, feature_dim)
# Loading the phase 1 detector
img_input = Input(shape = input_shape_img)
roi_input = Input(shape = (C.num_rois, 4))
feature_map_input = Input(shape = input_shape_features)
shared_layers = nn.nn_base(img_input, trainable = True)
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes = len(class_mapping), trainable = True)
phase1_rpn = Model(img_input, rpn_layers)
phase1_classifier = Model([feature_map_input, roi_input], classifier)
phase1_rpn.load_weights(options.phase1_weights, by_name = True)
phase1_classifier.load_weights(options.phase1_weights, by_name = True)
phase1_rpn.compile(optimizer = 'sgd', loss = 'mse')
phase1_classifier.compile(optimizer = 'sgd', loss = 'mse')
print("Loaded phase 1 Faster R-CNN detector")
# Loading the image classifier
# load json and create model
json_file = open(options.img_json_path, 'r')
img_classifier = model_from_json(json_file.read())
json_file.close()
# load weights into new model
img_classifier.load_weights(options.img_weight_path)
print("Loaded phase 2 image classifier")
# Creating the phase 3 detector
img_input = Input(shape = input_shape_img)
roi_input = Input(shape = (None, 4))
shared_layers = nn.nn_base(img_input, trainable = True)
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes = len(classes_count), trainable = True)
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# This is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
# Load pretrained Imagenet weights
try:
print('Loading weights from {}'.format(C.base_net_weights))
model_rpn.load_weights(C.base_net_weights, by_name = True)
model_classifier.load_weights(C.base_net_weights, by_name = True)
except:
print('Could not load pretrained model weights. Weights can be found in the keras application folder \
https://github.com/fchollet/keras/tree/master/keras/applications')
# Use this to resume from previous training. Specify the frcnn model to load
if options.load_checkpoint is not None:
print("Loading previous model from", options.load_checkpoint)
model_rpn.load_weights(options.load_checkpoint, by_name = True)
model_classifier.load_weights(options.load_checkpoint, by_name = True)
else:
print("No previous model checkpoint was loaded")
# Optimizer setup
clipnorm_val = 1e-5
lr_val = options.lr
if options.optimizers == "SGD":
optimizer = SGD(lr = lr_val, momentum = 0.9, clipnorm = clipnorm_val)
optimizer_classifier = SGD(lr = lr_val, momentum = 0.9, clipnorm = clipnorm_val)
else:
optimizer = Adam(lr = lr_val, clipnorm = clipnorm_val)
optimizer_classifier = Adam(lr = lr_val, clipnorm = clipnorm_val / 1)
# Compile the model AFTER loading weights!
model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_regr(num_anchors)])
model_classifier.compile(optimizer=optimizer_classifier, loss=[losses.class_loss_cls, losses.class_loss_regr(len(classes_count)-1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
model_all.compile(optimizer = 'sgd', loss = 'mae')
epoch_length = int(options.epoch_length)
num_epochs = int(options.num_epochs)
iter_num = 0
losses = np.zeros((epoch_length, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
best_loss = np.Inf
class_mapping_inv = {v: k for k, v in class_mapping.items()}
# Hyperparameters of the robust F-RCNN
eps = 0.05
alpha_init = float(options.alpha_init)
alpha_final = float(options.alpha_final)
constant_thresh = int(5 / 7 * epoch_length * num_epochs)
iter_count = 0
print('Starting training')
for epoch_num in range(num_epochs):
start_time = time.time()
progbar = generic_utils.Progbar(epoch_length, stateful_metrics = ["rpn_cls", "rpn_regr", "detector_cls", "detector_regr", "avg nb of objects"])
print('Epoch {} / {}'.format(epoch_num + 1, num_epochs))
# if epoch_num > 0 and epoch_num < 45:
# clipnorm_val = np.array(clipnorm_val * 0.95)
# lr_val = lr_val * 0.95
# K.set_value(model_rpn.optimizer.lr, lr_val)
# K.set_value(model_classifier.optimizer.lr, lr_val)
# K.set_value(model_rpn.optimizer.clipnorm, clipnorm_val)
# K.set_value(model_classifier.optimizer.clipnorm, clipnorm_val)
while True:
try:
if iter_count <= constant_thresh:
alpha = alpha_init - iter_count * (alpha_init - alpha_final) / constant_thresh
if iter_count == constant_thresh and options.load_checkpoint is None:
lr_val = lr_val * 0.1
K.set_value(model_rpn.optimizer.lr, lr_val)
K.set_value(model_classifier.optimizer.lr, lr_val)
if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print('\nAverage number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length))
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
# Get next batch samples
X, Y, img_data = next(data_gen_source_train)
#X, Y, img_data = next(data_gen_source_val)
# Unaltered RPN training with source data
loss_rpn = model_rpn.train_on_batch(X, Y)
# NOTE: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
# Y1 is the output with the one-hot hard labels [0,0,0,0,1,0]
# X2 is the 1 x R x 4 tensor with the ROI coordinates to be trained, they're already in (x1,y1,w,h) format
X2, Y1, Y2, n_pos_samples_1 = rpn_to_class_inputs(X, img_data, C, mode = "source")
loss_class_1 = model_classifier.train_on_batch([X, X2], [Y1, Y2])
# VERY IMPORTANT: This loop guarantees that there will always be one target step per source step
while True:
try:
X_target, filepath, width, height, f = next(data_gen_target_train)
img_data = {"filepath" : filepath, "width" : width, "height" : height, "bboxes" : []}
img_data = get_target_img_data(X_target, img_data, alpha, options.hard_constraints, options.recompute_alpha)
X2, Y1, Y2, n_pos_samples_2 = rpn_to_class_inputs(X_target, img_data, C, mode = "target", eps = eps)
loss_class_2 = model_classifier.train_on_batch([X_target, X2], [Y1, Y2])
break
except Exception as e:
#print(traceback.format_exc())
#print('Exception: {} at line {}'.format(e, sys.exc_info()[2].tb_lineno))
continue
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
losses[iter_num, 2] = loss_class_1[1] + loss_class_2[1]
losses[iter_num, 3] = loss_class_1[2] + loss_class_2[2]
losses[iter_num, 4] = np.mean([loss_class_1[3], loss_class_2[3]])
progbar.update(iter_num, [('rpn_cls', losses[iter_num, 0]), ('rpn_regr', losses[iter_num, 1]),
('detector_cls', losses[iter_num, 2]), ('detector_regr', losses[iter_num, 3]),
("avg nb of objects", np.mean([n_pos_samples_1, n_pos_samples_2]))])
iter_num += 1
iter_count += 1
if iter_num == epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4]).round(1)
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print('\nMean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes))
print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))
print('Loss RPN classifier: {}'.format(loss_rpn_cls))
print('Loss RPN regression: {}'.format(loss_rpn_regr))
print('Loss Detector classifier: {}'.format(loss_class_cls))
print('Loss Detector regression: {}'.format(loss_class_regr))
print('Total Loss: {}'.format(curr_loss))
print('Elapsed time: {}'.format(time.time() - start_time))
iter_num = 0
if curr_loss < best_loss:
if C.verbose:
print('Total loss decreased from {} to {}, saving weights'.format(best_loss, curr_loss))
best_loss = curr_loss
model_all.save_weights(C.model_path)
break
except Exception as e:
#print(traceback.format_exc())
#print('Exception: {} at line {}'.format(e, sys.exc_info()[2].tb_lineno))
continue
print('Training complete, exiting.')
|
the-stack_0_11960 | # This file contains various useful constants for py3status
GENERAL_DEFAULTS = {
"color_bad": "#FF0000",
"color_degraded": "#FFFF00",
"color_good": "#00FF00",
"color_separator": "#333333",
"colors": False,
"interval": 5,
"output_format": "i3bar",
}
MAX_NESTING_LEVELS = 4
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
TZTIME_FORMAT = "%Y-%m-%d %H:%M:%S %Z"
TIME_MODULES = ["time", "tztime"]
I3S_INSTANCE_MODULES = [
"battery",
"cpu_temperature",
"disk",
"ethernet",
"memory",
"path_exists",
"run_watch",
"tztime",
"volume",
"wireless",
]
I3S_SINGLE_NAMES = ["cpu_usage", "ddate", "ipv6", "load", "time"]
I3S_ALLOWED_COLORS = ["color_bad", "color_good", "color_degraded"]
# i3status modules that allow colors to be passed.
# general section also allows colors so is included.
I3S_COLOR_MODULES = ["general", "battery", "cpu_temperature", "disk", "load"]
I3S_MODULE_NAMES = I3S_SINGLE_NAMES + I3S_INSTANCE_MODULES
CONFIG_FILE_SPECIAL_SECTIONS = ["general", "py3status"]
ERROR_CONFIG = """
general {colors = true interval = 60}
order += "static_string py3status"
order += "tztime local"
order += "group error"
static_string py3status {format = "py3status"}
tztime local {format = "%c"}
group error{
button_next = 1
button_prev = 0
fixed_width = False
format = "{output}"
static_string error_min {format = "CONFIG ERROR" color = "#FF0000"}
static_string error {format = "$error" color = "#FF0000"}
}
"""
COLOR_NAMES_EXCLUDED = ["good", "bad", "degraded", "separator", "threshold", "None"]
COLOR_NAMES = {
"aliceblue": "#F0F8FF",
"antiquewhite": "#FAEBD7",
"aqua": "#00FFFF",
"aquamarine": "#7FFFD4",
"azure": "#F0FFFF",
"beige": "#F5F5DC",
"bisque": "#FFE4C4",
"black": "#000000",
"blanchedalmond": "#FFEBCD",
"blue": "#0000FF",
"blueviolet": "#8A2BE2",
"brown": "#A52A2A",
"burlywood": "#DEB887",
"cadetblue": "#5F9EA0",
"chartreuse": "#7FFF00",
"chocolate": "#D2691E",
"coral": "#FF7F50",
"cornflowerblue": "#6495ED",
"cornsilk": "#FFF8DC",
"crimson": "#DC143C",
"cyan": "#00FFFF",
"darkblue": "#00008B",
"darkcyan": "#008B8B",
"darkgoldenrod": "#B8860B",
"darkgray": "#A9A9A9",
"darkgrey": "#A9A9A9",
"darkgreen": "#006400",
"darkkhaki": "#BDB76B",
"darkmagenta": "#8B008B",
"darkolivegreen": "#556B2F",
"darkorange": "#FF8C00",
"darkorchid": "#9932CC",
"darkred": "#8B0000",
"darksalmon": "#E9967A",
"darkseagreen": "#8FBC8F",
"darkslateblue": "#483D8B",
"darkslategray": "#2F4F4F",
"darkslategrey": "#2F4F4F",
"darkturquoise": "#00CED1",
"darkviolet": "#9400D3",
"deeppink": "#FF1493",
"deepskyblue": "#00BFFF",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1E90FF",
"firebrick": "#B22222",
"floralwhite": "#FFFAF0",
"forestgreen": "#228B22",
"fuchsia": "#FF00FF",
"gainsboro": "#DCDCDC",
"ghostwhite": "#F8F8FF",
"gold": "#FFD700",
"goldenrod": "#DAA520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#ADFF2F",
"honeydew": "#F0FFF0",
"hotpink": "#FF69B4",
"indianred": "#CD5C5C",
"indigo": "#4B0082",
"ivory": "#FFFFF0",
"khaki": "#F0E68C",
"lavender": "#E6E6FA",
"lavenderblush": "#FFF0F5",
"lawngreen": "#7CFC00",
"lemonchiffon": "#FFFACD",
"lightblue": "#ADD8E6",
"lightcoral": "#F08080",
"lightcyan": "#E0FFFF",
"lightgoldenrodyellow": "#FAFAD2",
"lightgray": "#D3D3D3",
"lightgrey": "#D3D3D3",
"lightgreen": "#90EE90",
"lightpink": "#FFB6C1",
"lightsalmon": "#FFA07A",
"lightseagreen": "#20B2AA",
"lightskyblue": "#87CEFA",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#B0C4DE",
"lightyellow": "#FFFFE0",
"lime": "#00FF00",
"limegreen": "#32CD32",
"linen": "#FAF0E6",
"magenta": "#FF00FF",
"maroon": "#800000",
"mediumaquamarine": "#66CDAA",
"mediumblue": "#0000CD",
"mediumorchid": "#BA55D3",
"mediumpurple": "#9370DB",
"mediumseagreen": "#3CB371",
"mediumslateblue": "#7B68EE",
"mediumspringgreen": "#00FA9A",
"mediumturquoise": "#48D1CC",
"mediumvioletred": "#C71585",
"midnightblue": "#191970",
"mintcream": "#F5FFFA",
"mistyrose": "#FFE4E1",
"moccasin": "#FFE4B5",
"navajowhite": "#FFDEAD",
"navy": "#000080",
"oldlace": "#FDF5E6",
"olive": "#808000",
"olivedrab": "#6B8E23",
"orange": "#FFA500",
"orangered": "#FF4500",
"orchid": "#DA70D6",
"palegoldenrod": "#EEE8AA",
"palegreen": "#98FB98",
"paleturquoise": "#AFEEEE",
"palevioletred": "#DB7093",
"papayawhip": "#FFEFD5",
"peachpuff": "#FFDAB9",
"peru": "#CD853F",
"pink": "#FFC0CB",
"plum": "#DDA0DD",
"powderblue": "#B0E0E6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#FF0000",
"rosybrown": "#BC8F8F",
"royalblue": "#4169E1",
"saddlebrown": "#8B4513",
"salmon": "#FA8072",
"sandybrown": "#F4A460",
"seagreen": "#2E8B57",
"seashell": "#FFF5EE",
"sienna": "#A0522D",
"silver": "#C0C0C0",
"skyblue": "#87CEEB",
"slateblue": "#6A5ACD",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#FFFAFA",
"springgreen": "#00FF7F",
"steelblue": "#4682B4",
"tan": "#D2B48C",
"teal": "#008080",
"thistle": "#D8BFD8",
"tomato": "#FF6347",
"turquoise": "#40E0D0",
"violet": "#EE82EE",
"wheat": "#F5DEB3",
"white": "#FFFFFF",
"whitesmoke": "#F5F5F5",
"yellow": "#FFFF00",
"yellowgreen": "#9ACD32",
}
ON_TRIGGER_ACTIONS = ["refresh", "refresh_and_freeze"]
POSITIONS = ["left", "center", "right"]
RETIRED_MODULES = {
"nvidia_temp": {
"new": ["nvidia_smi"],
"msg": "Module {old} has been replaced with a module {new}.",
},
"scratchpad_async": {
"new": ["scratchpad"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"scratchpad_counter": {
"new": ["scratchpad"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"window_title": {
"new": ["window"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"window_title_async": {
"new": ["window"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"weather_yahoo": {
"new": ["weather_owm"],
"msg": "Module {old} is no longer available due to retired Yahoo Weather APIs and new Oath requirements. You can try a different module {new}.",
},
"xkb_layouts": {
"new": ["xkb_input"],
"msg": "Module {old} has been replaced with a module {new} to support sway too.",
},
}
MARKUP_LANGUAGES = ["pango", "none"]
|
the-stack_0_11963 | import os
import numpy as np
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import data_prep_util
import indoor3d_util
# Constants
data_dir = os.path.join(ROOT_DIR, 'data')
indoor3d_data_dir = os.path.join(data_dir, 'stanford_indoor3d')
NUM_POINT = 4096
H5_BATCH_SIZE = 1000
data_dim = [NUM_POINT, 9]
label_dim = [NUM_POINT]
data_dtype = 'float32'
label_dtype = 'uint8'
# Set paths
filelist = os.path.join(BASE_DIR, 'meta/all_data_label.txt')
data_label_files = [os.path.join(indoor3d_data_dir, line.rstrip()) for line in open(filelist)]
output_dir = os.path.join(data_dir, 'indoor3d_sem_seg_hdf5_data')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_filename_prefix = os.path.join(output_dir, 'ply_data_all')
output_room_filelist = os.path.join(output_dir, 'room_filelist.txt')
fout_room = open(output_room_filelist, 'w')
# --------------------------------------
# ----- BATCH WRITE TO HDF5 -----
# --------------------------------------
batch_data_dim = [H5_BATCH_SIZE] + data_dim
batch_label_dim = [H5_BATCH_SIZE] + label_dim
h5_batch_data = np.zeros(batch_data_dim, dtype=np.float32)
h5_batch_label = np.zeros(batch_label_dim, dtype=np.uint8)
buffer_size = 0 # state: record how many samples are currently in buffer
h5_index = 0 # state: the next h5 file to save
def insert_batch(data, label, last_batch=False):
global h5_batch_data, h5_batch_label
global buffer_size, h5_index
data_size = data.shape[0]
# If there is enough space, just insert
if buffer_size + data_size <= h5_batch_data.shape[0]:
h5_batch_data[buffer_size:buffer_size + data_size, ...] = data
h5_batch_label[buffer_size:buffer_size + data_size] = label
buffer_size += data_size
else: # not enough space
capacity = h5_batch_data.shape[0] - buffer_size
assert (capacity >= 0)
if capacity > 0:
h5_batch_data[buffer_size:buffer_size + capacity, ...] = data[0:capacity, ...]
h5_batch_label[buffer_size:buffer_size + capacity, ...] = label[0:capacity, ...]
# Save batch data and label to h5 file, reset buffer_size
h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label, data_dtype, label_dtype)
print('Stored {0} with size {1}'.format(h5_filename, h5_batch_data.shape[0]))
h5_index += 1
buffer_size = 0
# recursive call
insert_batch(data[capacity:, ...], label[capacity:, ...], last_batch)
if last_batch and buffer_size > 0:
h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...], h5_batch_label[0:buffer_size, ...],
data_dtype, label_dtype)
print('Stored {0} with size {1}'.format(h5_filename, buffer_size))
h5_index += 1
buffer_size = 0
return
sample_cnt = 0
for i, data_label_filename in enumerate(data_label_files):
print(data_label_filename)
data, label = indoor3d_util.room2blocks_wrapper_normalized(data_label_filename, NUM_POINT, block_size=1.0,
stride=0.5,
random_sample=False, sample_num=None)
print('{0}, {1}'.format(data.shape, label.shape))
for _ in range(data.shape[0]):
fout_room.write(os.path.basename(data_label_filename)[0:-4] + '\n')
sample_cnt += data.shape[0]
insert_batch(data, label, i == len(data_label_files) - 1)
fout_room.close()
print("Total samples: {0}".format(sample_cnt))
|
the-stack_0_11964 | import base64
import pytest
from h.models.auth_client import GrantType
class TestUpdateGroup:
def test_it_returns_http_200_with_valid_payload_and_user_token(
self, app, token_auth_header, first_party_group
):
group = {"name": "Rename My Group"}
res = app.patch_json(
"/api/groups/{id}".format(id=first_party_group.pubid),
group,
headers=token_auth_header,
)
assert res.status_code == 200
assert res.json_body["name"] == "Rename My Group"
assert res.json_body["groupid"] is None
def test_it_does_not_update_group_if_empty_payload_and_user_token(
self, app, token_auth_header, first_party_group
):
payload = {}
res = app.patch_json(
"/api/groups/{id}".format(id=first_party_group.pubid),
payload,
headers=token_auth_header,
)
assert res.status_code == 200
assert res.json_body["name"] == "My First Group"
assert res.json_body["groupid"] is None
def test_it_ignores_non_whitelisted_fields_in_payload_and_user_token(
self, app, token_auth_header, first_party_group
):
group = {
"id": "fbdzzz",
"name": "My Group",
"organization": "foobar",
"joinable_by": "whoever",
}
res = app.patch_json(
"/api/groups/{id}".format(id=first_party_group.pubid),
group,
headers=token_auth_header,
)
assert res.status_code == 200
assert res.json_body["id"] != group["id"]
assert res.json_body["organization"] is None
def test_it_returns_http_400_with_invalid_payload_and_user_token(
self, app, token_auth_header, first_party_group
):
group = {
"name": "Oooopoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo"
}
res = app.patch_json(
"/api/groups/{id}".format(id=first_party_group.pubid),
group,
headers=token_auth_header,
expect_errors=True,
)
assert res.status_code == 400
def test_it_returns_http_400_if_groupid_set_on_default_authority_and_user_token(
self, app, token_auth_header, first_party_group
):
group = {"groupid": "3434kjkjk"}
res = app.patch_json(
"/api/groups/{id}".format(id=first_party_group.pubid),
group,
headers=token_auth_header,
expect_errors=True,
)
assert res.status_code == 400
def test_it_returns_http_404_if_no_authenticated_user(self, app, first_party_group):
group = {"name": "My Group"}
res = app.patch_json(
"/api/groups/{id}".format(id=first_party_group.pubid),
group,
expect_errors=True,
)
assert res.status_code == 404
def test_it_returns_http_404_if_token_user_unauthorized(
self, app, token_auth_header, factories, db_session
):
# Not created by user represented by token_auth_header
group = factories.Group()
db_session.commit()
group_payload = {"name": "My Group"}
res = app.patch_json(
"/api/groups/{id}".format(id=group.pubid),
group_payload,
headers=token_auth_header,
expect_errors=True,
)
assert res.status_code == 404
def test_it_allows_auth_client_with_valid_forwarded_user(
self, app, auth_client_header, third_party_user, factories, db_session
):
group = factories.Group(
creator=third_party_user, authority=third_party_user.authority
)
db_session.commit()
headers = auth_client_header
headers["X-Forwarded-User"] = third_party_user.userid
group_payload = {"name": "My Group"}
path = "/api/groups/{id}".format(id=group.pubid)
res = app.patch_json(path, group_payload, headers=headers)
assert res.status_code == 200
assert res.json_body["name"] == "My Group"
def test_it_allows_auth_client_with_matching_authority(
self, app, auth_client_header, third_party_user, factories, db_session
):
group = factories.Group(
creator=third_party_user, authority=third_party_user.authority
)
db_session.commit()
group_payload = {"name": "My Group"}
path = "/api/groups/{id}".format(id=group.pubid)
res = app.patch_json(path, group_payload, headers=auth_client_header)
assert res.status_code == 200
assert res.json_body["name"] == "My Group"
def test_it_does_not_allow_auth_client_with_mismatched_authority(
self, app, auth_client_header, factories, db_session
):
group = factories.Group(authority="rando.biz")
db_session.commit()
group_payload = {"name": "My Group"}
path = "/api/groups/{id}".format(id=group.pubid)
res = app.patch_json(
path, group_payload, headers=auth_client_header, expect_errors=True
)
assert res.status_code == 404
def test_it_allows_groupid_from_auth_client_with_forwarded_user(
self, app, auth_client_header, third_party_user, factories, db_session
):
group = factories.Group(
creator=third_party_user, authority=third_party_user.authority
)
db_session.commit()
headers = auth_client_header
headers["X-Forwarded-User"] = third_party_user.userid
group_payload = {
"name": "My Group",
"groupid": "group:[email protected]",
}
path = "/api/groups/{id}".format(id=group.pubid)
res = app.patch_json(path, group_payload, headers=headers)
assert res.status_code == 200
assert "groupid" in res.json_body
assert res.json_body["groupid"] == "group:[email protected]"
def test_it_returns_HTTP_Conflict_if_groupid_is_duplicate(
self, app, auth_client_header, third_party_user, factories, db_session
):
group1 = factories.Group(
creator=third_party_user,
authority=third_party_user.authority,
groupid="group:[email protected]",
)
group2 = factories.Group(
creator=third_party_user,
authority=third_party_user.authority,
groupid="group:[email protected]",
)
db_session.commit()
headers = auth_client_header
headers["X-Forwarded-User"] = third_party_user.userid
group_payload = {"groupid": "group:[email protected]"}
# Attempting to set group2's `groupid` to one already taken by group1
path = "/api/groups/{id}".format(id=group2.pubid)
res = app.patch_json(path, group_payload, headers=headers, expect_errors=True)
assert group1.groupid in res.json_body["reason"]
assert res.status_code == 409
@pytest.fixture
def first_party_user(db_session, factories):
user = factories.User()
db_session.commit()
return user
@pytest.fixture
def first_party_group(db_session, factories, first_party_user):
group = factories.Group(
name="My First Group",
description="Original description",
creator=first_party_user,
authority=first_party_user.authority,
)
db_session.commit()
return group
@pytest.fixture
def user_with_token(db_session, factories, first_party_user):
token = factories.DeveloperToken(userid=first_party_user.userid)
db_session.add(token)
db_session.commit()
return (first_party_user, token)
@pytest.fixture
def token_auth_header(user_with_token):
user, token = user_with_token
return {"Authorization": "Bearer {}".format(token.value)}
@pytest.fixture
def third_party_user(factories, db_session):
user = factories.User(authority="thirdparty.com")
db_session.commit()
return user
@pytest.fixture
def auth_client(db_session, factories):
auth_client = factories.ConfidentialAuthClient(
authority="thirdparty.com", grant_type=GrantType.client_credentials
)
db_session.commit()
return auth_client
@pytest.fixture
def auth_client_header(auth_client):
user_pass = "{client_id}:{secret}".format(
client_id=auth_client.id, secret=auth_client.secret
)
encoded = base64.standard_b64encode(user_pass.encode("utf-8"))
return {"Authorization": "Basic {creds}".format(creds=encoded.decode("ascii"))}
|
the-stack_0_11966 | from typing import Optional
from typing import Union
import pytest
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureDef
from _pytest.fixtures import SubRequest
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("debugconfig")
group.addoption(
"--setupplan",
"--setup-plan",
action="store_true",
help="Show what fixtures and tests would be executed but "
"don't execute anything",
)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(
fixturedef: FixtureDef[object], request: SubRequest
) -> Optional[object]:
# Will return a dummy fixture if the setuponly option is provided.
if request.config.option.setupplan:
my_cache_key = fixturedef.cache_key(request)
fixturedef.cached_result = (None, my_cache_key, None)
return fixturedef.cached_result
return None
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
if config.option.setupplan:
config.option.setuponly = True
config.option.setupshow = True
return None
|
the-stack_0_11967 | """HTML slide show Exporter class"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from IPython.utils.traitlets import Unicode
from IPython.nbconvert import preprocessors
from IPython.config import Config
from .html import HTMLExporter
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class SlidesExporter(HTMLExporter):
"""Exports HTML slides with reveal.js"""
file_extension = Unicode(
'slides.html', config=True,
help="Extension of the file that should be written to disk"
)
output_mimetype = 'text/html'
default_template = Unicode('reveal', config=True, help="""Template of the
data format to use. I.E. 'reveal'""")
@property
def default_config(self):
c = Config({
'RevealHelpPreprocessor': {
'enabled': True,
},
})
c.merge(super(SlidesExporter,self).default_config)
return c
|
the-stack_0_11970 | import os
from collections import OrderedDict
from itertools import chain
import torch
from torch import nn as nn
from models.alexnet import Id
from models.model_utils import ReverseLayerF
from torch.autograd import Variable
import numpy.random as npr
import numpy as np
import torch.nn.functional as F
import random
class AlexNetCaffe(nn.Module):
def __init__(self, jigsaw_classes=1000, n_classes=100, domains=3, dropout=True):
super(AlexNetCaffe, self).__init__()
print("Using Caffe AlexNet")
self.features = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)),
("relu1", nn.ReLU(inplace=True)),
("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)),
("relu2", nn.ReLU(inplace=True)),
("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)),
("relu3", nn.ReLU(inplace=True)),
("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)),
("relu4", nn.ReLU(inplace=True)),
("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)),
("relu5", nn.ReLU(inplace=True)),
("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
]))
self.classifier = nn.Sequential(OrderedDict([
("fc6", nn.Linear(256 * 6 * 6, 4096)),
("relu6", nn.ReLU(inplace=True)),
("drop6", nn.Dropout() if dropout else Id()),
("fc7", nn.Linear(4096, 4096)),
("relu7", nn.ReLU(inplace=True)),
("drop7", nn.Dropout() if dropout else Id())]))
self.jigsaw_classifier = nn.Linear(4096, jigsaw_classes)
self.class_classifier = nn.Linear(4096, n_classes)
# self.domain_classifier = nn.Sequential(
# nn.Linear(256 * 6 * 6, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, domains))
def get_params(self, base_lr):
return [{"params": self.features.parameters(), "lr": 0.},
{"params": chain(self.classifier.parameters(), self.jigsaw_classifier.parameters()
, self.class_classifier.parameters()#, self.domain_classifier.parameters()
), "lr": base_lr}]
def is_patch_based(self):
return False
def forward(self, x, lambda_val=0):
x = self.features(x*57.6) #57.6 is the magic number needed to bring torch data back to the range of caffe data, based on used std
x = x.view(x.size(0), -1)
#d = ReverseLayerF.apply(x, lambda_val)
x = self.classifier(x)
return self.jigsaw_classifier(x), self.class_classifier(x)#, self.domain_classifier(d)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class AlexNetCaffeAvgPool(AlexNetCaffe):
def __init__(self, jigsaw_classes=1000, n_classes=100):
super().__init__()
print("Global Average Pool variant")
self.features = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)),
("relu1", nn.ReLU(inplace=True)),
("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)),
("relu2", nn.ReLU(inplace=True)),
("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)),
("relu3", nn.ReLU(inplace=True)),
("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)),
("relu4", nn.ReLU(inplace=True)),
("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)),
# ("relu5", nn.ReLU(inplace=True)),
# ("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
]))
self.classifier = nn.Sequential(
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 512, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 1024, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True))
self.jigsaw_classifier = nn.Sequential(
nn.Conv2d(1024, 128, kernel_size=3, stride=2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
Flatten(),
nn.Linear(128 * 6 * 6, jigsaw_classes)
)
self.class_classifier = nn.Sequential(
nn.Conv2d(1024, n_classes, kernel_size=3, padding=1, bias=False),
nn.AvgPool2d(13),
Flatten(),
# nn.Linear(1024, n_classes)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0.)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class AlexNetCaffeFC7(AlexNetCaffe):
def __init__(self, jigsaw_classes=1000, n_classes=100, dropout=True):
super(AlexNetCaffeFC7, self).__init__()
print("FC7 branching variant")
self.features = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)),
("relu1", nn.ReLU(inplace=True)),
("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)),
("relu2", nn.ReLU(inplace=True)),
("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)),
("relu3", nn.ReLU(inplace=True)),
("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)),
("relu4", nn.ReLU(inplace=True)),
("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)),
("relu5", nn.ReLU(inplace=True)),
("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
]))
self.classifier = nn.Sequential(OrderedDict([
("fc6", nn.Linear(256 * 6 * 6, 4096)),
("relu6", nn.ReLU(inplace=True)),
("drop6", nn.Dropout() if dropout else Id())]))
self.jigsaw_classifier = nn.Sequential(OrderedDict([
("fc7", nn.Linear(4096, 4096)),
("relu7", nn.ReLU(inplace=True)),
("drop7", nn.Dropout()),
("fc8", nn.Linear(4096, jigsaw_classes))]))
self.class_classifier = nn.Sequential(OrderedDict([
("fc7", nn.Linear(4096, 4096)),
("relu7", nn.ReLU(inplace=True)),
("drop7", nn.Dropout()),
("fc8", nn.Linear(4096, n_classes))]))
def caffenet(jigsaw_classes, classes):
model = AlexNetCaffe(jigsaw_classes, classes)
for m in model.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, .1)
nn.init.constant_(m.bias, 0.)
state_dict = torch.load(os.path.join(os.path.dirname(__file__), "pretrained/alexnet_caffe.pth.tar"))
del state_dict["classifier.fc8.weight"]
del state_dict["classifier.fc8.bias"]
model.load_state_dict(state_dict, strict=False)
return model
def caffenet_gap(jigsaw_classes, classes):
model = AlexNetCaffe(jigsaw_classes, classes)
state_dict = torch.load(os.path.join(os.path.dirname(__file__), "pretrained/alexnet_caffe.pth.tar"))
del state_dict["classifier.fc6.weight"]
del state_dict["classifier.fc6.bias"]
del state_dict["classifier.fc7.weight"]
del state_dict["classifier.fc7.bias"]
del state_dict["classifier.fc8.weight"]
del state_dict["classifier.fc8.bias"]
model.load_state_dict(state_dict, strict=False)
# weights are initialized in the constructor
return model
def caffenet_fc7(jigsaw_classes, classes):
model = AlexNetCaffeFC7(jigsaw_classes, classes)
state_dict = torch.load("models/pretrained/alexnet_caffe.pth.tar")
state_dict["jigsaw_classifier.fc7.weight"] = state_dict["classifier.fc7.weight"]
state_dict["jigsaw_classifier.fc7.bias"] = state_dict["classifier.fc7.bias"]
state_dict["class_classifier.fc7.weight"] = state_dict["classifier.fc7.weight"]
state_dict["class_classifier.fc7.bias"] = state_dict["classifier.fc7.bias"]
del state_dict["classifier.fc8.weight"]
del state_dict["classifier.fc8.bias"]
del state_dict["classifier.fc7.weight"]
del state_dict["classifier.fc7.bias"]
model.load_state_dict(state_dict, strict=False)
nn.init.xavier_uniform_(model.jigsaw_classifier.fc8.weight, .1)
nn.init.constant_(model.jigsaw_classifier.fc8.bias, 0.)
nn.init.xavier_uniform_(model.class_classifier.fc8.weight, .1)
nn.init.constant_(model.class_classifier.fc8.bias, 0.)
return model
class AlexNetCaffeRSC(nn.Module):
def __init__(self, n_classes=100, percent=6, dropout=True):
super(AlexNetCaffeRSC, self).__init__()
print("Using Caffe AlexNet")
self.percent = percent
print("Using Total Percent Sample: 1 / {}".format(self.percent))
self.features = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)),
("relu1", nn.ReLU(inplace=True)),
("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)),
("relu2", nn.ReLU(inplace=True)),
("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)),
("relu3", nn.ReLU(inplace=True)),
("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)),
("relu4", nn.ReLU(inplace=True)),
("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)),
("relu5", nn.ReLU(inplace=True)),
("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
]))
self.classifier = nn.Sequential(OrderedDict([
("fc6", nn.Linear(256 * 6 * 6, 4096)),
("relu6", nn.ReLU(inplace=True)),
("drop6", nn.Dropout() if dropout else Id()),
("fc7", nn.Linear(4096, 4096)),
("relu7", nn.ReLU(inplace=True)),
("drop7", nn.Dropout() if dropout else Id())]))
# self.jigsaw_classifier = nn.Linear(4096, jigsaw_classes)
self.class_classifier = nn.Linear(4096, n_classes)
# self.domain_classifier = nn.Sequential(
# nn.Linear(256 * 6 * 6, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, domains))
# def get_params(self, base_lr):
# return [{"params": self.features.parameters(), "lr": 0.},
# {"params": chain(self.classifier.parameters()
# , self.class_classifier.parameters()#, self.domain_classifier.parameters()
# ), "lr": base_lr}]
def is_patch_based(self):
return False
def forward(self, x, gt=None, flag=None):
# x = self.features(x*57.6) #57.6 is the magic number needed to bring torch data back to the range of caffe data, based on used std
# x = x.view(x.size(0), -1)
# #d = ReverseLayerF.apply(x, lambda_val)
# x = self.classifier(x)
# return self.class_classifier(x)#, self.domain_classifier(d)
# -------------------------------------------------------------------
x = self.features(x * 57.6)
# x = self.features.conv1(x * 57.6)
# x = self.features.relu1(x)
# x = self.features.pool1(x)
# x = self.features.norm1(x)
# x = self.features.conv2(x)
# x = self.features.relu2(x)
# x = self.features.pool2(x)
# x = self.features.norm2(x)
# x = self.features.conv3(x)
# x = self.features.relu3(x)
# x = self.features.conv4(x)
# x = self.features.relu4(x)
# x = self.features.conv5(x)
# x = self.features.relu5(x)
# x = self.features.pool5(x)
if flag:
self.eval()
x_new = x.clone().detach()
# x_new = self.features.conv4(x_new)
# x_new = self.features.relu4(x_new)
# x_new = self.features.conv5(x_new)
# x_new = self.features.relu5(x_new)
# x_new = self.features.pool5(x_new)
x_new = Variable(x_new.data, requires_grad=True)
x_new_view = x_new.view(x_new.size(0), -1)
x_new_view = self.classifier(x_new_view)
output = self.class_classifier(x_new_view)
class_num = output.shape[1]
index = gt
num_rois = x_new.shape[0]
num_channel = x_new.shape[1]
H = x_new.shape[2]
HW = x_new.shape[2] * x_new.shape[3]
one_hot = torch.zeros((1), dtype=torch.float32).cuda()
one_hot = Variable(one_hot, requires_grad=False)
sp_i = torch.ones([2, num_rois]).long()
sp_i[0, :] = torch.arange(num_rois)
sp_i[1, :] = index
sp_v = torch.ones([num_rois])
one_hot_sparse = torch.sparse.FloatTensor(sp_i, sp_v, torch.Size([num_rois, class_num])).to_dense().cuda()
one_hot_sparse = Variable(one_hot_sparse, requires_grad=False) # [256, 21]
one_hot = torch.sum(output * one_hot_sparse)
self.zero_grad()
one_hot.backward()
grads_val = x_new.grad.clone().detach()
grad_channel_mean = torch.mean(grads_val.view(num_rois, num_channel, -1), dim=2)
channel_mean = grad_channel_mean
spatial_mean = torch.mean(grads_val, dim=1)
spatial_mean = spatial_mean.view(num_rois, H, H).view(num_rois, HW)
self.zero_grad()
choose_one = random.randint(0, 9)
if choose_one <= 4:
# ---------------------------- spatial -----------------------
spatial_drop_num = int(HW * 1 / 3.0)
th_mask_value = torch.sort(spatial_mean, dim=1, descending=True)[0][:, spatial_drop_num]
th_mask_value = th_mask_value.view(num_rois, 1).expand(num_rois, HW)
mask_all_cuda = torch.where(spatial_mean >= th_mask_value, torch.zeros(spatial_mean.shape).cuda(),
torch.ones(spatial_mean.shape).cuda())
mask_all = mask_all_cuda.detach().cpu().numpy()
for q in range(num_rois):
mask_all_temp = np.ones((HW), dtype=np.float32)
zero_index = np.where(mask_all[q, :] == 0)[0]
num_zero_index = zero_index.size
if num_zero_index >= spatial_drop_num:
dumy_index = npr.choice(zero_index, size=spatial_drop_num, replace=False)
else:
zero_index = np.arange(HW)
dumy_index = npr.choice(zero_index, size=spatial_drop_num, replace=False)
mask_all_temp[dumy_index] = 0
mask_all[q, :] = mask_all_temp
mask_all = torch.from_numpy(mask_all.reshape(num_rois, 7, 7)).cuda()
mask_all = mask_all.view(num_rois, 1, 7, 7)
else:
# -------------------------- channel ----------------------------
mask_all = torch.zeros((num_rois, num_channel, 1, 1)).cuda()
vector_thresh_percent = int(num_channel * 1 / 3.0)
vector_thresh_value = torch.sort(channel_mean, dim=1, descending=True)[0][:, vector_thresh_percent]
vector_thresh_value = vector_thresh_value.view(num_rois, 1).expand(num_rois, num_channel)
vector = torch.where(channel_mean > vector_thresh_value,
torch.zeros(channel_mean.shape).cuda(),
torch.ones(channel_mean.shape).cuda())
vector_all = vector.detach().cpu().numpy()
channel_drop_num = int(num_channel * 1 / 3.2)
vector_all_new = np.ones((num_rois, num_channel), dtype=np.float32)
for q in range(num_rois):
vector_all_temp = np.ones((num_channel), dtype=np.float32)
zero_index = np.where(vector_all[q, :] == 0)[0]
num_zero_index = zero_index.size
if num_zero_index >= channel_drop_num:
dumy_index = npr.choice(zero_index, size=channel_drop_num, replace=False)
else:
zero_index = np.arange(num_channel)
dumy_index = npr.choice(zero_index, size=channel_drop_num, replace=False)
vector_all_temp[dumy_index] = 0
vector_all_new[q, :] = vector_all_temp
vector = torch.from_numpy(vector_all_new).cuda()
for m in range(num_rois):
index_channel = vector[m, :].nonzero()[:, 0].long()
index_channel = index_channel.detach().cpu().numpy().tolist()
mask_all[m, index_channel, :, :] = 1
# ----------------------------------- batch ----------------------------------------
cls_prob_before = F.softmax(output, dim=1)
x_new_view_after = x_new * mask_all
x_new_view_after = x_new_view_after.view(x_new_view_after.size(0), -1)
x_new_view_after = self.classifier(x_new_view_after)
x_new_view_after = self.class_classifier(x_new_view_after)
cls_prob_after = F.softmax(x_new_view_after, dim=1)
sp_i = torch.ones([2, num_rois]).long()
sp_i[0, :] = torch.arange(num_rois)
sp_i[1, :] = index
sp_v = torch.ones([num_rois])
one_hot_sparse = torch.sparse.FloatTensor(sp_i, sp_v, torch.Size([num_rois, class_num])).to_dense().cuda()
before_vector = torch.sum(one_hot_sparse * cls_prob_before, dim=1)
after_vector = torch.sum(one_hot_sparse * cls_prob_after, dim=1)
change_vector = before_vector - after_vector - 0.0001
change_vector = torch.where(change_vector > 0, change_vector, torch.zeros(change_vector.shape).cuda())
th_fg_value = torch.sort(change_vector, dim=0, descending=True)[0][int(round(float(num_rois) * 1 / 3.0))]
drop_index_fg = change_vector.gt(th_fg_value)
ignore_index_fg = 1 - drop_index_fg
not_01_ignore_index_fg = ignore_index_fg.nonzero()[:, 0]
mask_all[not_01_ignore_index_fg.long(), :] = 1
self.train()
mask_all = Variable(mask_all, requires_grad=True)
x = x * mask_all
x = x.view(x.size(0), -1)
x = self.classifier(x)
return self.class_classifier(x) # , self.domain_classifier(d)
def caffenetRSC(classes, percent):
model = AlexNetCaffeRSC(classes, percent)
for m in model.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, .1)
nn.init.constant_(m.bias, 0.)
state_dict = torch.load(os.path.join(os.path.dirname(__file__), "pretrained/alexnet_caffe.pth.tar"))
del state_dict["classifier.fc8.weight"]
del state_dict["classifier.fc8.bias"]
model.load_state_dict(state_dict, strict=False)
return model
|
the-stack_0_11972 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <[email protected]>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from __future__ import print_function
from .NotifyBase import NotifyBase
from ..common import NotifyImageSize
from ..common import NotifyType
from ..utils import parse_bool
from ..AppriseLocale import gettext_lazy as _
# Default our global support flag
NOTIFY_GNOME_SUPPORT_ENABLED = False
try:
# 3rd party modules (Gnome Only)
import gi
# require_version() call is required otherwise we generate a warning
gi.require_version("Notify", "0.7")
# We can import the actual libraries we care about now:
from gi.repository import Notify
from gi.repository import GdkPixbuf
# We're good to go!
NOTIFY_GNOME_SUPPORT_ENABLED = True
except (ImportError, ValueError, AttributeError):
# No problem; we just simply can't support this plugin; we could
# be in microsoft windows, or we just don't have the python-gobject
# library available to us (or maybe one we don't support)?
# Alternativey A ValueError will get thrown upon calling
# gi.require_version() if the requested Notify namespace isn't available
pass
# Urgencies
class GnomeUrgency(object):
LOW = 0
NORMAL = 1
HIGH = 2
GNOME_URGENCIES = {
GnomeUrgency.LOW: 'low',
GnomeUrgency.NORMAL: 'normal',
GnomeUrgency.HIGH: 'high',
}
GNOME_URGENCY_MAP = {
# Maps against string 'low'
'l': GnomeUrgency.LOW,
# Maps against string 'moderate'
'm': GnomeUrgency.LOW,
# Maps against string 'normal'
'n': GnomeUrgency.NORMAL,
# Maps against string 'high'
'h': GnomeUrgency.HIGH,
# Maps against string 'emergency'
'e': GnomeUrgency.HIGH,
# Entries to additionally support (so more like Gnome's API)
'0': GnomeUrgency.LOW,
'1': GnomeUrgency.NORMAL,
'2': GnomeUrgency.HIGH,
}
class NotifyGnome(NotifyBase):
"""
A wrapper for local Gnome Notifications
"""
# Set our global enabled flag
enabled = NOTIFY_GNOME_SUPPORT_ENABLED
requirements = {
# Define our required packaging in order to work
'details': _('A local Gnome environment is required.')
}
# The default descriptive name associated with the Notification
service_name = _('Gnome Notification')
# The service URL
service_url = 'https://www.gnome.org/'
# The default protocol
protocol = 'gnome'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_gnome'
# Allows the user to specify the NotifyImageSize object
image_size = NotifyImageSize.XY_128
# Disable throttle rate for Gnome requests since they are normally
# local anyway
request_rate_per_sec = 0
# Limit results to just the first 10 line otherwise there is just to much
# content to display
body_max_line_count = 10
# A title can not be used for Gnome Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://',
)
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'urgency': {
'name': _('Urgency'),
'type': 'choice:int',
'values': GNOME_URGENCIES,
'default': GnomeUrgency.NORMAL,
},
'priority': {
# Apprise uses 'priority' everywhere; it's just a nice consistent
# feel to be able to use it here as well. Just map the
# value back to 'priority'
'alias_of': 'urgency',
},
'image': {
'name': _('Include Image'),
'type': 'bool',
'default': True,
'map_to': 'include_image',
},
})
def __init__(self, urgency=None, include_image=True, **kwargs):
"""
Initialize Gnome Object
"""
super(NotifyGnome, self).__init__(**kwargs)
# The urgency of the message
self.urgency = int(
NotifyGnome.template_args['urgency']['default']
if urgency is None else
next((
v for k, v in GNOME_URGENCY_MAP.items()
if str(urgency).lower().startswith(k)),
NotifyGnome.template_args['urgency']['default']))
# Track whether or not we want to send an image with our notification
# or not.
self.include_image = include_image
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Gnome Notification
"""
try:
# App initialization
Notify.init(self.app_id)
# image path
icon_path = None if not self.include_image \
else self.image_path(notify_type, extension='.ico')
# Build message body
notification = Notify.Notification.new(body)
# Assign urgency
notification.set_urgency(self.urgency)
# Always call throttle before any remote server i/o is made
self.throttle()
if icon_path:
try:
# Use Pixbuf to create the proper image type
image = GdkPixbuf.Pixbuf.new_from_file(icon_path)
# Associate our image to our notification
notification.set_icon_from_pixbuf(image)
notification.set_image_from_pixbuf(image)
except Exception as e:
self.logger.warning(
"Could not load Gnome notification icon ({}): {}"
.format(icon_path, e))
notification.show()
self.logger.info('Sent Gnome notification.')
except Exception:
self.logger.warning('Failed to send Gnome notification.')
self.logger.exception('Gnome Exception')
return False
return True
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any URL parameters
params = {
'image': 'yes' if self.include_image else 'no',
'urgency':
GNOME_URGENCIES[self.template_args['urgency']['default']]
if self.urgency not in GNOME_URGENCIES
else GNOME_URGENCIES[self.urgency],
}
# Extend our parameters
params.update(self.url_parameters(privacy=privacy, *args, **kwargs))
return '{schema}://?{params}'.format(
schema=self.protocol,
params=NotifyGnome.urlencode(params),
)
@staticmethod
def parse_url(url):
"""
There are no parameters nessisary for this protocol; simply having
gnome:// is all you need. This function just makes sure that
is in place.
"""
results = NotifyBase.parse_url(url, verify_host=False)
# Include images with our message
results['include_image'] = \
parse_bool(results['qsd'].get('image', True))
# Gnome supports urgency, but we we also support the keyword priority
# so that it is consistent with some of the other plugins
if 'priority' in results['qsd'] and len(results['qsd']['priority']):
# We intentionally store the priority in the urgency section
results['urgency'] = \
NotifyGnome.unquote(results['qsd']['priority'])
if 'urgency' in results['qsd'] and len(results['qsd']['urgency']):
results['urgency'] = \
NotifyGnome.unquote(results['qsd']['urgency'])
return results
|
the-stack_0_11975 | """
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
# 2018-6-23
# Add Binary
class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
res = ''
carry = '0'
i = 0
lena = len(a)
lenb = len(b)
while i < max(lena, lenb) or carry == '1':
aa = a[-1 - i] if i < lena else '0'
bb = b[-1 - i] if i < lenb else '0'
sums = int(aa) + int(bb) + int(carry)
res = str(sums%2) + res
carry = '1' if sums//2 > 0 else '0'
i += 1
return res
# test
a = "1010"
b = "1011"
test = Solution()
res = test.addBinary(a,b)
print(res) |
the-stack_0_11977 | # Copyright (C) 2002, Thomas Hamelryck ([email protected])
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Map residues of two structures to each other based on a FASTA alignment."""
from __future__ import print_function
from Bio.Data import SCOPData
from Bio.PDB import Selection
from Bio.PDB.Polypeptide import is_aa
class StructureAlignment(object):
"""Class to align two structures based on an alignment of their sequences."""
def __init__(self, fasta_align, m1, m2, si=0, sj=1):
"""Initialize.
Attributes:
- fasta_align - Alignment object
- m1, m2 - two models
- si, sj - the sequences in the Alignment object that
correspond to the structures
"""
length = fasta_align.get_alignment_length()
# Get the residues in the models
rl1 = Selection.unfold_entities(m1, "R")
rl2 = Selection.unfold_entities(m2, "R")
# Residue positions
p1 = 0
p2 = 0
# Map equivalent residues to each other
map12 = {}
map21 = {}
# List of residue pairs (None if -)
duos = []
for i in range(length):
column = fasta_align[:, i]
aa1 = column[si]
aa2 = column[sj]
if aa1 != "-":
# Position in seq1 is not -
while True:
# Loop until an aa is found
r1 = rl1[p1]
p1 = p1 + 1
if is_aa(r1):
break
self._test_equivalence(r1, aa1)
else:
r1 = None
if aa2 != "-":
# Position in seq2 is not -
while True:
# Loop until an aa is found
r2 = rl2[p2]
p2 = p2 + 1
if is_aa(r2):
break
self._test_equivalence(r2, aa2)
else:
r2 = None
if r1:
# Map residue in seq1 to its equivalent in seq2
map12[r1] = r2
if r2:
# Map residue in seq2 to its equivalent in seq1
map21[r2] = r1
# Append aligned pair (r is None if gap)
duos.append((r1, r2))
self.map12 = map12
self.map21 = map21
self.duos = duos
def _test_equivalence(self, r1, aa1):
"""Test if aa in sequence fits aa in structure (PRIVATE)."""
resname = r1.get_resname()
resname = SCOPData.protein_letters_3to1[resname]
assert(aa1 == resname)
def get_maps(self):
"""Map residues between the structures.
Return two dictionaries that map a residue in one structure to
the equivealent residue in the other structure.
"""
return self.map12, self.map21
def get_iterator(self):
"""Create an iterator over all residue pairs."""
for i in range(0, len(self.duos)):
yield self.duos[i]
|
the-stack_0_11980 | # Copyright 2019 The Dreamer Authors. Copyright 2020 Plan2Explore Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from plan2explore import tools
def cross_entropy_method(
cell, objective, state, obs_shape, action_shape, horizon, graph,
beams=1000, topk=100, iterations=10, min_action=-1, max_action=1):
obs_shape, action_shape = tuple(obs_shape), tuple(action_shape)
batch = tools.shape(tools.nested.flatten(state)[0])[0]
initial_state = tools.nested.map(lambda tensor: tf.tile(
tensor, [beams] + [1] * (tensor.shape.ndims - 1)), state)
extended_batch = tools.shape(tools.nested.flatten(initial_state)[0])[0]
use_obs = tf.zeros([extended_batch, horizon, 1], tf.bool)
obs = tf.zeros((extended_batch, horizon) + obs_shape)
def iteration(index, mean, stddev):
# Sample action proposals from belief.
normal = tf.random_normal((batch, beams, horizon) + action_shape)
action = normal * stddev[:, None] + mean[:, None]
action = tf.clip_by_value(action, min_action, max_action)
# Evaluate proposal actions.
action = tf.reshape(
action, (extended_batch, horizon) + action_shape)
(_, state), _ = tf.nn.dynamic_rnn(
cell, (0 * obs, action, use_obs), initial_state=initial_state)
return_ = objective(state)
return_ = tf.reshape(return_, (batch, beams))
# Re-fit belief to the best ones.
_, indices = tf.nn.top_k(return_, topk, sorted=False)
indices += tf.range(batch)[:, None] * beams
best_actions = tf.gather(action, indices)
mean, variance = tf.nn.moments(best_actions, 1)
stddev = tf.sqrt(variance + 1e-6)
return index + 1, mean, stddev
mean = tf.zeros((batch, horizon) + action_shape)
stddev = tf.ones((batch, horizon) + action_shape)
_, mean, std = tf.while_loop(
lambda index, mean, stddev: index < iterations, iteration,
(0, mean, stddev), back_prop=False)
return mean
def action_head_policy(
cell, objective, state, obs_shape, action_shape, graph, config, strategy, min_action=-1, max_action=1):
features = cell.features_from_state(state)
policy = graph.heads.action(features)
if strategy == 'sample':
action = policy.sample()
elif strategy == 'mode':
action = policy.mode()
elif strategy == 'curious_sample':
curious_policy = graph.heads.curious_action(features)
action = curious_policy.sample()
elif strategy == 'random_sample':
batch = tools.shape(tools.nested.flatten(features)[0])[0]
mean = tf.zeros((batch,action_shape[0]))
stddev = tf.ones((batch,action_shape[0]))
normal = tf.random_normal((batch,action_shape[0]))
action = normal * stddev + mean
action = tf.clip_by_value(action, min_action, max_action)
else:
raise NotImplementedError(strategy)
plan = action[:, None, :]
return plan
|
the-stack_0_11981 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import json
import os
from os.path import abspath, basename, dirname, isdir, isfile, islink, join
import re
import tarfile
import tempfile
from ..auxlib.entity import EntityEncoder
from ..base.constants import CONDA_PACKAGE_EXTENSION_V1
from ..base.context import context
from ..common.compat import PY3
from ..common.path import paths_equal
from ..core.prefix_data import PrefixData
from ..gateways.disk.delete import rmtree
from ..install import PREFIX_PLACEHOLDER
from ..misc import untracked
def remove(prefix, files):
"""
Remove files for a given prefix.
"""
dst_dirs = set()
for f in files:
dst = join(prefix, f)
dst_dirs.add(dirname(dst))
os.unlink(dst)
for path in sorted(dst_dirs, key=len, reverse=True):
try:
os.rmdir(path)
except OSError: # directory might not be empty
pass
def execute(args, parser):
prefix = context.target_prefix
if args.which:
for path in args.which:
for prec in which_package(path):
print('%-50s %s' % (path, prec.dist_str()))
return
print('# prefix:', prefix)
if args.reset:
remove(prefix, untracked(prefix))
return
if args.untracked:
files = sorted(untracked(prefix))
print('# untracked files: %d' % len(files))
for fn in files:
print(fn)
return
make_tarbz2(prefix,
name=args.pkg_name.lower(),
version=args.pkg_version,
build_number=int(args.pkg_build))
def get_installed_version(prefix, name):
for info in PrefixData(prefix).iter_records():
if info['name'] == name:
return str(info['version'])
return None
def create_info(name, version, build_number, requires_py):
d = dict(
name=name,
version=version,
platform=context.platform,
arch=context.arch_name,
build_number=int(build_number),
build=str(build_number),
depends=[],
)
if requires_py:
d['build'] = ('py%d%d_' % requires_py) + d['build']
d['depends'].append('python %d.%d*' % requires_py)
return d
shebang_pat = re.compile(r'^#!.+$', re.M)
def fix_shebang(tmp_dir, path):
if open(path, 'rb').read(2) != '#!':
return False
with open(path) as fi:
data = fi.read()
m = shebang_pat.match(data)
if not (m and 'python' in m.group()):
return False
data = shebang_pat.sub('#!%s/bin/python' % PREFIX_PLACEHOLDER,
data, count=1)
tmp_path = join(tmp_dir, basename(path))
with open(tmp_path, 'w') as fo:
fo.write(data)
os.chmod(tmp_path, int('755', 8))
return True
def _add_info_dir(t, tmp_dir, files, has_prefix, info):
info_dir = join(tmp_dir, 'info')
os.mkdir(info_dir)
with open(join(info_dir, 'files'), 'w') as fo:
for f in files:
fo.write(f + '\n')
with open(join(info_dir, 'index.json'), 'w') as fo:
json.dump(info, fo, indent=2, sort_keys=True, cls=EntityEncoder)
if has_prefix:
with open(join(info_dir, 'has_prefix'), 'w') as fo:
for f in has_prefix:
fo.write(f + '\n')
for fn in os.listdir(info_dir):
t.add(join(info_dir, fn), 'info/' + fn)
def create_conda_pkg(prefix, files, info, tar_path, update_info=None):
"""
create a conda package with `files` (in `prefix` and `info` metadata)
at `tar_path`, and return a list of warning strings
"""
files = sorted(files)
warnings = []
has_prefix = []
tmp_dir = tempfile.mkdtemp()
t = tarfile.open(tar_path, 'w:bz2')
h = hashlib.new('sha1')
for f in files:
assert not (f.startswith('/') or f.endswith('/') or '\\' in f or f == ''), f
path = join(prefix, f)
if f.startswith('bin/') and fix_shebang(tmp_dir, path):
path = join(tmp_dir, basename(path))
has_prefix.append(f)
t.add(path, f)
h.update(f.encode('utf-8'))
h.update(b'\x00')
if islink(path):
link = os.readlink(path)
if PY3 and isinstance(link, str):
h.update(bytes(link, 'utf-8'))
else:
h.update(link)
if link.startswith('/'):
warnings.append('found symlink to absolute path: %s -> %s' %
(f, link))
elif isfile(path):
h.update(open(path, 'rb').read())
if path.endswith('.egg-link'):
warnings.append('found egg link: %s' % f)
info['file_hash'] = h.hexdigest()
if update_info:
update_info(info)
_add_info_dir(t, tmp_dir, files, has_prefix, info)
t.close()
rmtree(tmp_dir)
return warnings
def make_tarbz2(prefix, name='unknown', version='0.0', build_number=0,
files=None):
if files is None:
files = untracked(prefix)
print("# files: %d" % len(files))
if len(files) == 0:
print("# failed: nothing to do")
return None
if any('/site-packages/' in f for f in files):
python_version = get_installed_version(prefix, 'python')
assert python_version is not None
requires_py = tuple(int(x) for x in python_version[:3].split('.'))
else:
requires_py = False
info = create_info(name, version, build_number, requires_py)
tarbz2_fn = ('%(name)s-%(version)s-%(build)s' % info) + CONDA_PACKAGE_EXTENSION_V1
create_conda_pkg(prefix, files, info, tarbz2_fn)
print('# success')
print(tarbz2_fn)
return tarbz2_fn
def which_package(path):
"""
given the path (of a (presumably) conda installed file) iterate over
the conda packages the file came from. Usually the iteration yields
only one package.
"""
path = abspath(path)
prefix = which_prefix(path)
if prefix is None:
from ..exceptions import CondaVerificationError
raise CondaVerificationError("could not determine conda prefix from: %s" % path)
for prec in PrefixData(prefix).iter_records():
if any(paths_equal(join(prefix, f), path) for f in prec['files'] or ()):
yield prec
def which_prefix(path):
"""
given the path (to a (presumably) conda installed file) return the
environment prefix in which the file in located
"""
prefix = abspath(path)
while True:
if isdir(join(prefix, 'conda-meta')):
# we found the it, so let's return it
return prefix
if prefix == dirname(prefix):
# we cannot chop off any more directories, so we didn't find it
return None
prefix = dirname(prefix)
|
the-stack_0_11983 | """
Created by: Rob Mulla
Sep 24
IEEE Fraud Detection Model
- FE013
- Yang's Features
- Raddars Features
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import sys
import matplotlib.pylab as plt
from sklearn.model_selection import KFold
from datetime import datetime
import time
import logging
from sklearn.metrics import roc_auc_score
from catboost import CatBoostClassifier, Pool
from timeit import default_timer as timer
import lightgbm as lgb
import gc
start = timer()
##################
# PARAMETERS
###################
run_id = "{:%m%d_%H%M}".format(datetime.now())
KERNEL_RUN = False
MODEL_NUMBER = os.path.basename(__file__).split('.')[0]
if KERNEL_RUN:
INPUT_DIR = '../input/champs-scalar-coupling/'
FE_DIR = '../input/molecule-fe024/'
FOLDS_DIR = '../input/champs-3fold-ids/'
TARGET = "isFraud"
N_ESTIMATORS = 100000
N_META_ESTIMATORS = 500000
LEARNING_RATE = 0.005
VERBOSE = 100
EARLY_STOPPING_ROUNDS = 100
RANDOM_STATE = 529
N_THREADS = 58
DEPTH = -1 #14
N_FOLDS = 5
SHUFFLE = False
FE_SET = 'FE013' # Feature Engineering Version
MODEL_TYPE = "lightgbm"
#####################
## SETUP LOGGER
#####################
def get_logger():
"""
credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480
"""
os.environ["TZ"] = "US/Eastern"
time.tzset()
FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log')
formatter = logging.Formatter(FORMAT)
handler.setFormatter(formatter)
# logger.addHandler(handler)
logger.addHandler(fhandler)
return logger
logger = get_logger()
logger.info(f'Running for Model Number {MODEL_NUMBER}')
##################
# PARAMETERS
###################
if MODEL_TYPE == 'xgboost':
EVAL_METRIC = "AUC"
elif MODEL_TYPE == 'lightgbm':
EVAL_METRIC = 'auc'
elif MODEL_TYPE == 'catboost':
EVAL_METRIC = "AUC"
##################
# TRACKING FUNCTION
###################
def update_tracking(run_id,
field,
value, csv_file="../tracking/tracking.csv", integer=False, digits=None, drop_incomplete_rows=False):
"""
Function to update the tracking CSV with information about the model
"""
try:
df = pd.read_csv(csv_file, index_col=[0])
except FileNotFoundError:
df = pd.DataFrame()
if integer:
value = round(value)
elif digits is not None:
value = round(value, digits)
if drop_incomplete_rows:
df = df.loc[~df['AUC'].isna()]
df.loc[run_id, field] = value # Model number is index
df.to_csv(csv_file)
update_tracking(run_id, "model_number", MODEL_NUMBER, drop_incomplete_rows=True)
update_tracking(run_id, "n_estimators", N_ESTIMATORS)
update_tracking(run_id, "early_stopping_rounds", EARLY_STOPPING_ROUNDS)
update_tracking(run_id, "random_state", RANDOM_STATE)
update_tracking(run_id, "n_threads", N_THREADS)
update_tracking(run_id, "learning_rate", LEARNING_RATE)
update_tracking(run_id, "n_fold", N_FOLDS)
update_tracking(run_id, "model_type", MODEL_TYPE)
update_tracking(run_id, "eval_metric", EVAL_METRIC)
update_tracking(run_id, "depth", DEPTH)
update_tracking(run_id, "shuffle", SHUFFLE)
update_tracking(run_id, "fe", FE_SET)
#####################
# PREPARE MODEL DATA
#####################
folds = KFold(n_splits=N_FOLDS, random_state=RANDOM_STATE, shuffle=SHUFFLE)
logger.info('Loading Data...')
train_df = pd.read_parquet(f'../data/train_{FE_SET}.parquet')
test_df = pd.read_parquet(f'../data/test_{FE_SET}.parquet')
logger.info('Done loading Data...')
###########
# FEATURES
###########
FEATURES = ['V1max', 'V2max', 'V3max', 'V4max', 'V5max', 'V6max', 'V7max',
'V8max', 'V9max', 'V10max', 'V11max', 'V12max', 'V13max', 'V14max', 'V15max',
'V16max', 'V17max', 'V18max', 'V19max', 'V20max', 'V21max', 'V22max',
'V23max', 'V24max', 'V25max', 'V26max', 'V27max', 'V28max', 'V29max',
'V30max', 'V31max', 'V32max', 'V33max', 'V34max', 'V35max', 'V36max',
'V37max', 'V38max', 'V39max', 'V40max', 'V41max', 'V42max', 'V43max',
'V44max', 'V45max', 'V46max', 'V47max', 'V48max', 'V49max', 'V50max',
'V51max', 'V52max', 'V53max', 'V54max', 'V55max', 'V56max', 'V57max',
'V58max', 'V59max', 'V60max', 'V61max', 'V62max', 'V63max', 'V64max',
'V65max', 'V66max', 'V67max', 'V68max', 'V69max', 'V70max', 'V71max',
'V72max', 'V73max', 'V74max', 'V75max', 'V76max', 'V77max', 'V78max',
'V79max', 'V80max', 'V81max', 'V82max', 'V83max', 'V84max', 'V85max',
'V86max', 'V87max', 'V88max', 'V89max', 'V90max', 'V91max', 'V92max',
'V93max', 'V94max', 'V95max', 'V96max', 'V97max', 'V98max', 'V99max',
'V100max', 'V101max', 'V102max', 'V103max', 'V104max', 'V105max', 'V106max',
'V107max', 'V108max', 'V109max', 'V110max', 'V111max', 'V112max', 'V113max',
'V114max', 'V115max', 'V116max', 'V117max', 'V118max', 'V119max', 'V120max',
'V121max', 'V122max', 'V123max', 'V124max', 'V125max', 'V126max', 'V127max',
'V128max', 'V129max', 'V130max', 'V131max', 'V132max', 'V133max', 'V134max',
'V135max', 'V136max', 'V137max', 'V138max', 'V139max', 'V140max', 'V141max',
'V142max', 'V143max', 'V144max', 'V145max', 'V146max', 'V147max', 'V148max',
'V149max', 'V150max', 'V151max', 'V152max', 'V153max', 'V154max', 'V155max',
'V156max', 'V157max', 'V158max', 'V159max', 'V160max', 'V161max', 'V162max',
'V163max', 'V164max', 'V165max', 'V166max', 'V167max', 'V168max', 'V169max',
'V170max', 'V171max', 'V172max', 'V173max', 'V174max', 'V175max', 'V176max',
'V177max', 'V178max', 'V179max', 'V180max', 'V181max', 'V182max', 'V183max',
'V184max', 'V185max', 'V186max', 'V187max', 'V188max', 'V189max', 'V190max',
'V191max', 'V192max', 'V193max', 'V194max', 'V195max', 'V196max', 'V197max',
'V198max', 'V199max', 'V200max', 'V201max', 'V202max', 'V203max', 'V204max',
'V205max', 'V206max', 'V207max', 'V208max', 'V209max', 'V210max', 'V211max',
'V212max', 'V213max', 'V214max', 'V215max', 'V216max', 'V217max', 'V218max',
'V219max', 'V220max', 'V221max', 'V222max', 'V223max', 'V224max', 'V225max',
'V226max', 'V227max', 'V228max', 'V229max', 'V230max', 'V231max', 'V232max',
'V233max', 'V234max', 'V235max', 'V236max', 'V237max', 'V238max', 'V239max',
'V240max', 'V241max', 'V242max', 'V243max', 'V244max', 'V245max', 'V246max',
'V247max', 'V248max', 'V249max', 'V250max', 'V251max', 'V252max', 'V253max',
'V254max', 'V255max', 'V256max', 'V257max', 'V258max', 'V259max', 'V260max',
'V261max', 'V262max', 'V263max', 'V264max', 'V265max', 'V266max', 'V267max',
'V268max', 'V269max', 'V270max', 'V271max', 'V272max', 'V273max', 'V274max',
'V275max', 'V276max', 'V277max', 'V278max', 'V279max', 'V280max', 'V281max',
'V282max', 'V283max', 'V284max', 'V285max', 'V286max', 'V287max', 'V288max',
'V289max', 'V290max', 'V291max', 'V292max', 'V293max', 'V294max', 'V295max',
'V296max', 'V297max', 'V298max', 'V299max', 'V300max', 'V301max', 'V302max',
'V303max', 'V304max', 'V305max', 'V306max', 'V307max', 'V308max', 'V309max',
'V310max', 'V311max', 'V312max', 'V313max', 'V314max', 'V315max', 'V316max',
'V317max', 'V318max', 'V319max', 'V320max', 'V321max', 'V322max', 'V323max',
'V324max', 'V325max', 'V326max', 'V327max', 'V328max', 'V329max', 'V330max',
'V331max', 'V332max', 'V333max', 'V334max', 'V335max', 'V336max', 'V337max',
'V338max', 'V339max', 'ntrans', 'min_amt', 'mean_amt', 'max_amt',
'num_trans_ints', 'minC1', 'minC2', 'minC3', 'minC4', 'minC5', 'minC6',
'minC7', 'minC8', 'minC9', 'minC10', 'minC11', 'minC12', 'minC13', 'minC14',
'maxC1', 'maxC2', 'maxC3', 'maxC4', 'maxC5', 'maxC6', 'maxC7', 'maxC8',
'maxC9', 'maxC10', 'maxC11', 'maxC12', 'maxC13', 'maxC14', 'countC1_inc',
'countC2_inc', 'countC3_inc', 'countC4_inc', 'countC5_inc', 'countC6_inc',
'countC7_inc', 'countC8_inc', 'countC9_inc', 'countC10_inc', 'countC11_inc',
'countC12_inc', 'countC13_inc', 'countC14_inc', 'ndistM1', 'ndistM2',
'ndistM3', 'ndistM4', 'ndistM5', 'ndistM6', 'ndistM7', 'ndistM8', 'ndistM9']
CAT_FEATURES = ['ProductCD', 'card4', 'card6',
'id_12', 'id_13', 'id_14',
'id_15', 'id_16', 'id_17',
'id_18', 'id_19', 'id_20',
'id_21',
'id_22',
'id_23',
'id_24',
'id_25',
'id_26',
'id_27',
'id_28',
'id_29',
'id_32',
'id_34',
'id_35',
'id_36', 'id_37', 'id_38',
'DeviceType', 'DeviceInfo',
'M4','P_emaildomain',
'R_emaildomain', 'addr1', 'addr2',
'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8', 'M9',
'ProductCD_W_95cents','ProductCD_W_00cents','ProductCD_W_50cents',
'ProductCD_W_50_95_0_cents','ProductCD_W_NOT_50_95_0_cents']
CAT_FEATURES = [c for c in CAT_FEATURES if c in FEATURES]
X = train_df[FEATURES].copy()
y = train_df[TARGET].copy()
X_test = test_df[FEATURES].copy()
X = X.fillna(-9999)
X_test = X_test.fillna(-9999)
logger.info('Running with features...')
logger.info(FEATURES)
logger.info(f'Target is {TARGET}')
update_tracking(run_id, "n_features", len(FEATURES), integer=True)
############################
#### TRAIN MODELS FUNCTIONS
############################
def train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance):
train_dataset = Pool(data=X_train, label=y_train, cat_features=CAT_FEATURES)
valid_dataset = Pool(data=X_valid, label=y_valid, cat_features=CAT_FEATURES)
test_dataset = Pool(data=X_test, cat_features=CAT_FEATURES)
model = CatBoostClassifier(
iterations=N_ESTIMATORS,
learning_rate=LEARNING_RATE,
depth=DEPTH,
eval_metric=EVAL_METRIC,
verbose=VERBOSE,
random_state=RANDOM_STATE,
thread_count=N_THREADS,
task_type="GPU")
model.fit(
train_dataset,
eval_set=valid_dataset,
early_stopping_rounds=EARLY_STOPPING_ROUNDS,
)
y_pred_valid = model.predict_proba(valid_dataset)[:,1]
y_pred = model.predict_proba(test_dataset)[:,1]
fold_importance = pd.DataFrame()
fold_importance["feature"] = model.feature_names_
fold_importance["importance"] = model.get_feature_importance()
fold_importance["fold"] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance],
axis=0)
best_iteration = model.best_iteration_
return y_pred, y_pred_valid, feature_importance, best_iteration
lgb_params = {
'objective':'binary',
'boosting_type':'gbdt',
'metric': EVAL_METRIC,
'n_jobs':N_THREADS,
'learning_rate':LEARNING_RATE,
'num_leaves': 2**8,
'max_depth':DEPTH,
'tree_learner':'serial',
'colsample_bytree': 0.85,
'subsample_freq':1,
'subsample':0.85,
'n_estimators':N_ESTIMATORS,
'max_bin':255,
'verbose':-1,
'seed': RANDOM_STATE,
#'early_stopping_rounds':EARLY_STOPPING_ROUNDS,
'reg_alpha':0.3,
'reg_lamdba':0.243,
#'categorical_feature': CAT_FEATURES
}
# lgb_params = {
# 'min_data_in_leaf': 106,
# 'num_leaves': 500,
# 'learning_rate': LEARNING_RATE, #0.008,
# 'min_child_weight': 0.03454472573214212,
# 'bagging_fraction': 0.4181193142567742,
# 'feature_fraction': 0.3797454081646243,
# 'reg_lambda': 0.6485237330340494,
# 'reg_alpha': 0.3899927210061127,
# 'max_depth': DEPTH, #-1,
# 'objective': 'binary',
# 'seed': RANDOM_STATE, #13,
# 'feature_fraction_seed': RANDOM_STATE, #13,
# 'bagging_seed': RANDOM_STATE, #13,
# 'drop_seed': RANDOM_STATE, #13,
# 'data_random_seed': RANDOM_STATE, #13,
# 'boosting_type': 'gbdt',
# 'verbose': 1,
# 'metric':'auc',
# 'n_estimators':N_ESTIMATORS,
# }
def train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance):
X_train = X_train.copy()
X_valid = X_valid.copy()
X_test = X_test.copy()
if len(CAT_FEATURES) > 0:
X_train[CAT_FEATURES] = X_train[CAT_FEATURES].astype('category')
X_valid[CAT_FEATURES] = X_valid[CAT_FEATURES].astype('category')
X_test[CAT_FEATURES] = X_test[CAT_FEATURES].astype('category')
model = lgb.LGBMClassifier(**lgb_params)
model.fit(X_train, y_train,
eval_set = [(X_train, y_train),
(X_valid, y_valid)],
verbose = VERBOSE,
early_stopping_rounds=EARLY_STOPPING_ROUNDS)
y_pred_valid = model.predict_proba(X_valid)[:,1]
y_pred = model.predict_proba(X_test)[:,1]
fold_importance = pd.DataFrame()
fold_importance["feature"] = X_train.columns
fold_importance["importance"] = model.feature_importances_
fold_importance["fold"] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance],
axis=0)
best_iteration = model.best_iteration_
return y_pred, y_pred_valid, feature_importance, best_iteration
################################
# Dataframes for storing results
#################################
feature_importance = pd.DataFrame()
oof = np.zeros(len(X))
pred = np.zeros(len(X_test))
oof_df = train_df[['isFraud']].copy()
oof_df['oof'] = np.nan
oof_df['fold'] = np.nan
scores = []
best_iterations = []
del train_df, test_df
gc.collect()
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X, y)):
X_train = X.iloc[train_idx]
y_train = y.iloc[train_idx]
X_valid = X.iloc[valid_idx]
y_valid = y.iloc[valid_idx]
if MODEL_TYPE == "catboost":
y_pred, y_pred_valid, feature_importance, best_iteration = train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance)
if MODEL_TYPE == 'lightgbm':
y_pred, y_pred_valid, feature_importance, best_iteration = train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance)
best_iterations.append(best_iteration)
fold_score = roc_auc_score(y_valid, y_pred_valid)
scores.append(fold_score)
update_tracking(run_id, "AUC_f{}".format(fold_n + 1),
fold_score,
integer=False,)
logger.info('Fold {} of {} CV mean AUC score: {:.4f}. Best iteration {}'.format(fold_n + 1,
N_FOLDS,
fold_score,
best_iteration))
oof_df.iloc[valid_idx, oof_df.columns.get_loc('oof')] = y_pred_valid.reshape(-1)
oof_df.iloc[valid_idx, oof_df.columns.get_loc('fold')] = fold_n + 1
pred += y_pred
update_tracking(run_id, 'avg_best_iteration',
np.mean(best_iterations),
integer=True)
###############
# Store Results
###############
pred /= N_FOLDS
score = np.mean(scores)
sub = pd.read_csv('../input/sample_submission.csv')
sub['isFraud'] = pred
sub.to_csv(f'../sub/sub_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv', index=False)
oof_df.to_csv(f'../oof/oof_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv')
logger.info('CV mean AUC score: {:.4f}, std: {:.4f}.'.format(np.mean(scores),
np.std(scores)))
total_score = roc_auc_score(oof_df['isFraud'], oof_df['oof'])
feature_importance.to_csv(f'../fi/fi_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv')
update_tracking(run_id, "AUC",
total_score,
integer=False,)
logger.info('OOF AUC Score: {:.4f}'.format(total_score))
end = timer()
update_tracking(run_id, "training_time", (end - start), integer=True)
logger.info('Done!')
|
the-stack_0_11985 | import torch
import torch.nn as nn
import torch.nn.functional as F
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
class AugmentedConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dk, dv, Nh, relative):
super(AugmentedConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.dk = dk
self.dv = dv
self.Nh = Nh
self.relative = relative
self.conv_out = nn.Conv2d(self.in_channels, self.out_channels - self.dv, self.kernel_size, padding=1)
self.qkv_conv = nn.Conv2d(self.in_channels, 2 * self.dk + self.dv, kernel_size=1)
self.attn_out = nn.Conv2d(self.dv, self.dv, 1)
def forward(self, x):
# Input x
# (batch_size, channels, height, width)
batch, _, height, width = x.size()
# conv_out
# (batch_size, out_channels, height, width)
conv_out = self.conv_out(x)
# flat_q, flat_k, flat_v
# (batch_size, Nh, height * width, dvh or dkh)
# dvh = dv / Nh, dkh = dk / Nh
# q, k, v
# (batch_size, Nh, height, width, dv or dk)
flat_q, flat_k, flat_v, q, k, v = self.compute_flat_qkv(x, self.dk, self.dv, self.Nh)
logits = torch.matmul(flat_q.transpose(2, 3), flat_k)
if self.relative:
h_rel_logits, w_rel_logits = self.relative_logits(q)
logits += h_rel_logits
logits += w_rel_logits
weights = F.softmax(logits, dim=-1)
# attn_out
# (batch, Nh, height * width, dvh)
attn_out = torch.matmul(weights, flat_v.transpose(2, 3))
attn_out = torch.reshape(attn_out, (batch, self.Nh, self.dv // self.Nh, height, width))
# combine_heads_2d
# (batch, out_channels, height, width)
attn_out = self.combine_heads_2d(attn_out)
attn_out = self.attn_out(attn_out)
return torch.cat((conv_out, attn_out), dim=1)
def compute_flat_qkv(self, x, dk, dv, Nh):
N, _, H, W = x.size()
qkv = self.qkv_conv(x)
q, k, v = torch.split(qkv, [dk, dk, dv], dim=1)
q = self.split_heads_2d(q, Nh)
k = self.split_heads_2d(k, Nh)
v = self.split_heads_2d(v, Nh)
dkh = dk // Nh
q *= dkh ** -0.5
flat_q = torch.reshape(q, (N, Nh, dk // Nh, H * W))
flat_k = torch.reshape(k, (N, Nh, dk // Nh, H * W))
flat_v = torch.reshape(v, (N, Nh, dv // Nh, H * W))
return flat_q, flat_k, flat_v, q, k, v
def split_heads_2d(self, x, Nh):
batch, channels, height, width = x.size()
ret_shape = (batch, Nh, channels // Nh, height, width)
split = torch.reshape(x, ret_shape)
return split
def combine_heads_2d(self, x):
batch, Nh, dv, H, W = x.size()
ret_shape = (batch, Nh * dv, H, W)
return torch.reshape(x, ret_shape)
def relative_logits(self, q):
B, Nh, dk, H, W = q.size()
q = torch.transpose(q, 2, 4).transpose(2, 3)
key_rel_w = nn.Parameter(torch.randn((2 * W - 1, dk), requires_grad=True)).to(device)
rel_logits_w = self.relative_logits_1d(q, key_rel_w, H, W, Nh, "w")
key_rel_h = nn.Parameter(torch.randn((2 * H - 1, dk), requires_grad=True)).to(device)
rel_logits_h = self.relative_logits_1d(torch.transpose(q, 2, 3), key_rel_h, W, H, Nh, "h")
return rel_logits_h, rel_logits_w
def relative_logits_1d(self, q, rel_k, H, W, Nh, case):
rel_logits = torch.einsum('bhxyd,md->bhxym', q, rel_k)
rel_logits = torch.reshape(rel_logits, (-1, Nh * H, W, 2 * W - 1))
rel_logits = self.rel_to_abs(rel_logits)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H, W, W))
rel_logits = torch.unsqueeze(rel_logits, dim=3)
rel_logits = rel_logits.repeat((1, 1, 1, H, 1, 1))
if case == "w":
rel_logits = torch.transpose(rel_logits, 3, 4)
elif case == "h":
rel_logits = torch.transpose(rel_logits, 2, 4).transpose(4, 5).transpose(3, 5)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H * W, H * W))
return rel_logits
def rel_to_abs(self, x):
B, Nh, L, _ = x.size()
col_pad = torch.zeros((B, Nh, L, 1)).to(device)
x = torch.cat((x, col_pad), dim=3)
flat_x = torch.reshape(x, (B, Nh, L * 2 * L))
flat_pad = torch.zeros((B, Nh, L - 1)).to(device)
flat_x_padded = torch.cat((flat_x, flat_pad), dim=2)
final_x = torch.reshape(flat_x_padded, (B, Nh, L + 1, 2 * L - 1))
final_x = final_x[:, :, :L, L - 1:]
return final_x
|
the-stack_0_11986 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for utility functions
defined in :py:class:`monai.transforms.utility.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
import logging
import re
from copy import deepcopy
from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import DtypeLike, KeysCollection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import no_collation
from monai.transforms.inverse import InvertibleTransform
from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform
from monai.transforms.utility.array import (
AddChannel,
AddExtremePointsChannel,
AsChannelFirst,
AsChannelLast,
CastToType,
ClassesToIndices,
ConvertToMultiChannelBasedOnBratsClasses,
CuCIM,
DataStats,
EnsureChannelFirst,
EnsureType,
FgBgToIndices,
Identity,
IntensityStats,
LabelToMask,
Lambda,
MapLabelValue,
RemoveRepeatedChannel,
RepeatChannel,
SimulateDelay,
SplitChannel,
SqueezeDim,
ToCupy,
ToDevice,
ToNumpy,
ToPIL,
TorchVision,
ToTensor,
Transpose,
)
from monai.transforms.utils import extreme_points_to_image, get_extreme_points
from monai.transforms.utils_pytorch_numpy_unification import concatenate
from monai.utils import convert_to_numpy, ensure_tuple, ensure_tuple_rep
from monai.utils.enums import TraceKeys, TransformBackends
from monai.utils.type_conversion import convert_to_dst_type
__all__ = [
"AddChannelD",
"AddChannelDict",
"AddChanneld",
"AddExtremePointsChannelD",
"AddExtremePointsChannelDict",
"AddExtremePointsChanneld",
"AsChannelFirstD",
"AsChannelFirstDict",
"AsChannelFirstd",
"AsChannelLastD",
"AsChannelLastDict",
"AsChannelLastd",
"CastToTypeD",
"CastToTypeDict",
"CastToTyped",
"ConcatItemsD",
"ConcatItemsDict",
"ConcatItemsd",
"ConvertToMultiChannelBasedOnBratsClassesD",
"ConvertToMultiChannelBasedOnBratsClassesDict",
"ConvertToMultiChannelBasedOnBratsClassesd",
"CopyItemsD",
"CopyItemsDict",
"CopyItemsd",
"CuCIMd",
"CuCIMD",
"CuCIMDict",
"DataStatsD",
"DataStatsDict",
"DataStatsd",
"DeleteItemsD",
"DeleteItemsDict",
"DeleteItemsd",
"EnsureChannelFirstD",
"EnsureChannelFirstDict",
"EnsureChannelFirstd",
"EnsureTypeD",
"EnsureTypeDict",
"EnsureTyped",
"FgBgToIndicesD",
"FgBgToIndicesDict",
"FgBgToIndicesd",
"IdentityD",
"IdentityDict",
"Identityd",
"IntensityStatsd",
"IntensityStatsD",
"IntensityStatsDict",
"LabelToMaskD",
"LabelToMaskDict",
"LabelToMaskd",
"LambdaD",
"LambdaDict",
"Lambdad",
"MapLabelValueD",
"MapLabelValueDict",
"MapLabelValued",
"RandCuCIMd",
"RandCuCIMD",
"RandCuCIMDict",
"RandLambdaD",
"RandLambdaDict",
"RandLambdad",
"RandTorchVisionD",
"RandTorchVisionDict",
"RandTorchVisiond",
"RemoveRepeatedChannelD",
"RemoveRepeatedChannelDict",
"RemoveRepeatedChanneld",
"RepeatChannelD",
"RepeatChannelDict",
"RepeatChanneld",
"SelectItemsD",
"SelectItemsDict",
"SelectItemsd",
"SimulateDelayD",
"SimulateDelayDict",
"SimulateDelayd",
"SplitChannelD",
"SplitChannelDict",
"SplitChanneld",
"SqueezeDimD",
"SqueezeDimDict",
"SqueezeDimd",
"ToCupyD",
"ToCupyDict",
"ToCupyd",
"ToDeviced",
"ToDeviceD",
"ToDeviceDict",
"ToNumpyD",
"ToNumpyDict",
"ToNumpyd",
"ToPILD",
"ToPILDict",
"ToPILd",
"ToTensorD",
"ToTensorDict",
"ToTensord",
"TorchVisionD",
"TorchVisionDict",
"TorchVisiond",
"Transposed",
"TransposeDict",
"TransposeD",
"ClassesToIndicesd",
"ClassesToIndicesD",
"ClassesToIndicesDict",
]
class Identityd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Identity`.
"""
backend = Identity.backend
def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.identity = Identity()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.identity(d[key])
return d
class AsChannelFirstd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelFirst`.
"""
backend = AsChannelFirst.backend
def __init__(self, keys: KeysCollection, channel_dim: int = -1, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
channel_dim: which dimension of input image is the channel, default is the last dimension.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = AsChannelFirst(channel_dim=channel_dim)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class AsChannelLastd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelLast`.
"""
backend = AsChannelLast.backend
def __init__(self, keys: KeysCollection, channel_dim: int = 0, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
channel_dim: which dimension of input image is the channel, default is the first dimension.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = AsChannelLast(channel_dim=channel_dim)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class AddChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AddChannel`.
"""
backend = AddChannel.backend
def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.adder = AddChannel()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.adder(d[key])
return d
class EnsureChannelFirstd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.EnsureChannelFirst`.
"""
backend = EnsureChannelFirst.backend
def __init__(
self,
keys: KeysCollection,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
strict_check: bool = True,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None and `key_{postfix}` was used to store the metadata in `LoadImaged`.
So need the key to extract metadata for channel dim information, default is `meta_dict`.
For example, for data with key `image`, metadata by default is in `image_meta_dict`.
strict_check: whether to raise an error when the meta information is insufficient.
"""
super().__init__(keys)
self.adjuster = EnsureChannelFirst(strict_check=strict_check)
self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys))
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
def __call__(self, data) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, meta_key, meta_key_postfix in zip(self.keys, self.meta_keys, self.meta_key_postfix):
d[key] = self.adjuster(d[key], d[meta_key or f"{key}_{meta_key_postfix}"])
return d
class RepeatChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.RepeatChannel`.
"""
backend = RepeatChannel.backend
def __init__(self, keys: KeysCollection, repeats: int, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
repeats: the number of repetitions for each element.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.repeater = RepeatChannel(repeats)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.repeater(d[key])
return d
class RemoveRepeatedChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.RemoveRepeatedChannel`.
"""
backend = RemoveRepeatedChannel.backend
def __init__(self, keys: KeysCollection, repeats: int, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
repeats: the number of repetitions for each element.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.repeater = RemoveRepeatedChannel(repeats)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.repeater(d[key])
return d
class SplitChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`.
All the input specified by `keys` should be split into same count of data.
"""
backend = SplitChannel.backend
def __init__(
self,
keys: KeysCollection,
output_postfixes: Optional[Sequence[str]] = None,
channel_dim: int = 0,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
output_postfixes: the postfixes to construct keys to store split data.
for example: if the key of input data is `pred` and split 2 classes, the output
data keys will be: pred_(output_postfixes[0]), pred_(output_postfixes[1])
if None, using the index number: `pred_0`, `pred_1`, ... `pred_N`.
channel_dim: which dimension of input image is the channel, default to 0.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.output_postfixes = output_postfixes
self.splitter = SplitChannel(channel_dim=channel_dim)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
rets = self.splitter(d[key])
postfixes: Sequence = list(range(len(rets))) if self.output_postfixes is None else self.output_postfixes
if len(postfixes) != len(rets):
raise AssertionError("count of split results must match output_postfixes.")
for i, r in enumerate(rets):
split_key = f"{key}_{postfixes[i]}"
if split_key in d:
raise RuntimeError(f"input data already contains key {split_key}.")
d[split_key] = r
return d
class CastToTyped(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.CastToType`.
"""
backend = CastToType.backend
def __init__(
self,
keys: KeysCollection,
dtype: Union[Sequence[Union[DtypeLike, torch.dtype]], DtypeLike, torch.dtype] = np.float32,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: convert image to this data type, default is `np.float32`.
it also can be a sequence of dtypes or torch.dtype,
each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
self.converter = CastToType()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, dtype in self.key_iterator(d, self.dtype):
d[key] = self.converter(d[key], dtype=dtype)
return d
class ToTensord(MapTransform, InvertibleTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToTensor`.
"""
backend = ToTensor.backend
def __init__(
self,
keys: KeysCollection,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: target data content type to convert, for example: torch.float, etc.
device: specify the target device to put the Tensor data.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = ToTensor(dtype=dtype, device=device)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
self.push_transform(d, key)
d[key] = self.converter(d[key])
return d
def inverse(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = deepcopy(dict(data))
for key in self.key_iterator(d):
# Create inverse transform
inverse_transform = ToNumpy()
# Apply inverse
d[key] = inverse_transform(d[key])
# Remove the applied transform
self.pop_transform(d, key)
return d
class EnsureTyped(MapTransform, InvertibleTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.EnsureType`.
Ensure the input data to be a PyTorch Tensor or numpy array, support: `numpy array`, `PyTorch Tensor`,
`float`, `int`, `bool`, `string` and `object` keep the original.
If passing a dictionary, list or tuple, still return dictionary, list or tuple and recursively convert
every item to the expected data type.
Note: Currently, we only convert tensor data to numpy array or scalar number in the inverse operation.
"""
backend = EnsureType.backend
def __init__(
self,
keys: KeysCollection,
data_type: str = "tensor",
dtype: Optional[Union[DtypeLike, torch.dtype]] = None,
device: Optional[torch.device] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
data_type: target data type to convert, should be "tensor" or "numpy".
dtype: target data content type to convert, for example: np.float32, torch.float, etc.
device: for Tensor data type, specify the target device.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = EnsureType(data_type=data_type, dtype=dtype, device=device)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
self.push_transform(d, key)
d[key] = self.converter(d[key])
return d
def inverse(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
d = deepcopy(dict(data))
for key in self.key_iterator(d):
# FIXME: currently, only convert tensor data to numpy array or scalar number,
# need to also invert numpy array but it's not easy to determine the previous data type
d[key] = convert_to_numpy(d[key])
# Remove the applied transform
self.pop_transform(d, key)
return d
class ToNumpyd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`.
"""
backend = ToNumpy.backend
def __init__(self, keys: KeysCollection, dtype: DtypeLike = None, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: target data type when converting to numpy array.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = ToNumpy(dtype=dtype)
def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class ToCupyd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToCupy`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: data type specifier. It is inferred from the input by default.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = ToCupy.backend
def __init__(self, keys: KeysCollection, dtype=None, allow_missing_keys: bool = False) -> None:
super().__init__(keys, allow_missing_keys)
self.converter = ToCupy(dtype=dtype)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class ToPILd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`.
"""
backend = ToPIL.backend
def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = ToPIL()
def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class Transposed(MapTransform, InvertibleTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Transpose`.
"""
backend = Transpose.backend
def __init__(
self, keys: KeysCollection, indices: Optional[Sequence[int]], allow_missing_keys: bool = False
) -> None:
super().__init__(keys, allow_missing_keys)
self.transform = Transpose(indices)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.transform(d[key])
# if None was supplied then numpy uses range(a.ndim)[::-1]
indices = self.transform.indices or range(d[key].ndim)[::-1]
self.push_transform(d, key, extra_info={"indices": indices})
return d
def inverse(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
d = deepcopy(dict(data))
for key in self.key_iterator(d):
transform = self.get_most_recent_transform(d, key)
# Create inverse transform
fwd_indices = np.array(transform[TraceKeys.EXTRA_INFO]["indices"])
inv_indices = np.argsort(fwd_indices)
inverse_transform = Transpose(inv_indices.tolist())
# Apply inverse
d[key] = inverse_transform(d[key])
# Remove the applied transform
self.pop_transform(d, key)
return d
class DeleteItemsd(MapTransform):
"""
Delete specified items from data dictionary to release memory.
It will remove the key-values and copy the others to construct a new dictionary.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, keys: KeysCollection, sep: str = ".", use_re: Union[Sequence[bool], bool] = False) -> None:
"""
Args:
keys: keys of the corresponding items to delete, can be "A{sep}B{sep}C"
to delete key `C` in nested dictionary, `C` can be regular expression.
See also: :py:class:`monai.transforms.compose.MapTransform`
sep: the separator tag to define nested dictionary keys, default to ".".
use_re: whether the specified key is a regular expression, it also can be
a list of bool values, map the to keys.
"""
super().__init__(keys)
self.sep = sep
self.use_re = ensure_tuple_rep(use_re, len(self.keys))
def __call__(self, data):
def _delete_item(keys, d, use_re: bool = False):
key = keys[0]
if len(keys) > 1:
d[key] = _delete_item(keys[1:], d[key], use_re)
return d
return {k: v for k, v in d.items() if (use_re and not re.search(key, k)) or (not use_re and k != key)}
d = dict(data)
for key, use_re in zip(self.keys, self.use_re):
d = _delete_item(key.split(self.sep), d, use_re)
return d
class SelectItemsd(MapTransform):
"""
Select only specified items from data dictionary to release memory.
It will copy the selected key-values and construct and new dictionary.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __call__(self, data):
return {key: data[key] for key in self.key_iterator(data)}
class SqueezeDimd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SqueezeDim`.
"""
backend = SqueezeDim.backend
def __init__(self, keys: KeysCollection, dim: int = 0, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dim: dimension to be squeezed. Default: 0 (the first dimension)
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.converter = SqueezeDim(dim=dim)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class DataStatsd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.DataStats`.
"""
backend = DataStats.backend
def __init__(
self,
keys: KeysCollection,
prefix: Union[Sequence[str], str] = "Data",
data_type: Union[Sequence[bool], bool] = True,
data_shape: Union[Sequence[bool], bool] = True,
value_range: Union[Sequence[bool], bool] = True,
data_value: Union[Sequence[bool], bool] = False,
additional_info: Optional[Union[Sequence[Callable], Callable]] = None,
logger_handler: Optional[logging.Handler] = None,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
prefix: will be printed in format: "{prefix} statistics".
it also can be a sequence of string, each element corresponds to a key in ``keys``.
data_type: whether to show the type of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
data_shape: whether to show the shape of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
value_range: whether to show the value range of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
data_value: whether to show the raw value of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
a typical example is to print some properties of Nifti image: affine, pixdim, etc.
additional_info: user can define callable function to extract
additional info from input data. it also can be a sequence of string, each element
corresponds to a key in ``keys``.
logger_handler: add additional handler to output data: save to file, etc.
add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html
the handler should have a logging level of at least `INFO`.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.prefix = ensure_tuple_rep(prefix, len(self.keys))
self.data_type = ensure_tuple_rep(data_type, len(self.keys))
self.data_shape = ensure_tuple_rep(data_shape, len(self.keys))
self.value_range = ensure_tuple_rep(value_range, len(self.keys))
self.data_value = ensure_tuple_rep(data_value, len(self.keys))
self.additional_info = ensure_tuple_rep(additional_info, len(self.keys))
self.logger_handler = logger_handler
self.printer = DataStats(logger_handler=logger_handler)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, prefix, data_type, data_shape, value_range, data_value, additional_info in self.key_iterator(
d, self.prefix, self.data_type, self.data_shape, self.value_range, self.data_value, self.additional_info
):
d[key] = self.printer(d[key], prefix, data_type, data_shape, value_range, data_value, additional_info)
return d
class SimulateDelayd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SimulateDelay`.
"""
backend = SimulateDelay.backend
def __init__(
self, keys: KeysCollection, delay_time: Union[Sequence[float], float] = 0.0, allow_missing_keys: bool = False
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
delay_time: The minimum amount of time, in fractions of seconds, to accomplish this identity task.
It also can be a sequence of string, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.delay_time = ensure_tuple_rep(delay_time, len(self.keys))
self.delayer = SimulateDelay()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, delay_time in self.key_iterator(d, self.delay_time):
d[key] = self.delayer(d[key], delay_time=delay_time)
return d
class CopyItemsd(MapTransform):
"""
Copy specified items from data dictionary and save with different key names.
It can copy several items together and copy several times.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self, keys: KeysCollection, times: int, names: KeysCollection, allow_missing_keys: bool = False
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
times: expected copy times, for example, if keys is "img", times is 3,
it will add 3 copies of "img" data to the dictionary.
names: the names corresponding to the newly copied data,
the length should match `len(keys) x times`. for example, if keys is ["img", "seg"]
and times is 2, names can be: ["img_1", "seg_1", "img_2", "seg_2"].
allow_missing_keys: don't raise exception if key is missing.
Raises:
ValueError: When ``times`` is nonpositive.
ValueError: When ``len(names)`` is not ``len(keys) * times``. Incompatible values.
"""
super().__init__(keys, allow_missing_keys)
if times < 1:
raise ValueError(f"times must be positive, got {times}.")
self.times = times
names = ensure_tuple(names)
if len(names) != (len(self.keys) * times):
raise ValueError(
"len(names) must match len(keys) * times, "
f"got len(names)={len(names)} len(keys) * times={len(self.keys) * times}."
)
self.names = names
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
"""
Raises:
KeyError: When a key in ``self.names`` already exists in ``data``.
"""
d = dict(data)
key_len = len(self.keys)
for i in range(self.times):
for key, new_key in self.key_iterator(d, self.names[i * key_len : (i + 1) * key_len]):
if new_key in d:
raise KeyError(f"Key {new_key} already exists in data.")
val = d[key]
if isinstance(val, torch.Tensor):
d[new_key] = val.detach().clone()
else:
d[new_key] = deepcopy(val)
return d
class ConcatItemsd(MapTransform):
"""
Concatenate specified items from data dictionary together on the first dim to construct a big array.
Expect all the items are numpy array or PyTorch Tensor.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, keys: KeysCollection, name: str, dim: int = 0, allow_missing_keys: bool = False) -> None:
"""
Args:
keys: keys of the corresponding items to be concatenated together.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: the name corresponding to the key to store the concatenated data.
dim: on which dimension to concatenate the items, default is 0.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.name = name
self.dim = dim
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
"""
Raises:
TypeError: When items in ``data`` differ in type.
TypeError: When the item type is not in ``Union[numpy.ndarray, torch.Tensor]``.
"""
d = dict(data)
output = []
data_type = None
for key in self.key_iterator(d):
if data_type is None:
data_type = type(d[key])
elif not isinstance(d[key], data_type):
raise TypeError("All items in data must have the same type.")
output.append(d[key])
if len(output) == 0:
return d
if data_type is np.ndarray:
d[self.name] = np.concatenate(output, axis=self.dim)
elif data_type is torch.Tensor:
d[self.name] = torch.cat(output, dim=self.dim) # type: ignore
else:
raise TypeError(f"Unsupported data type: {data_type}, available options are (numpy.ndarray, torch.Tensor).")
return d
class Lambdad(MapTransform, InvertibleTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Lambda`.
For example:
.. code-block:: python
:emphasize-lines: 2
input_data={'image': np.zeros((10, 2, 2)), 'label': np.ones((10, 2, 2))}
lambd = Lambdad(keys='label', func=lambda x: x[:4, :, :])
print(lambd(input_data)['label'].shape)
(4, 2, 2)
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
func: Lambda/function to be applied. It also can be a sequence of Callable,
each element corresponds to a key in ``keys``.
inv_func: Lambda/function of inverse operation if want to invert transforms, default to `lambda x: x`.
It also can be a sequence of Callable, each element corresponds to a key in ``keys``.
overwrite: whether to overwrite the original data in the input dictionary with lamdbda function output.
default to True. it also can be a sequence of bool, each element corresponds to a key in ``keys``.
allow_missing_keys: don't raise exception if key is missing.
Note: The inverse operation doesn't allow to define `extra_info` or access other information, such as the
image's original size. If need these complicated information, please write a new InvertibleTransform directly.
"""
backend = Lambda.backend
def __init__(
self,
keys: KeysCollection,
func: Union[Sequence[Callable], Callable],
inv_func: Union[Sequence[Callable], Callable] = no_collation,
overwrite: Union[Sequence[bool], bool] = True,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.func = ensure_tuple_rep(func, len(self.keys))
self.inv_func = ensure_tuple_rep(inv_func, len(self.keys))
self.overwrite = ensure_tuple_rep(overwrite, len(self.keys))
self._lambd = Lambda()
def _transform(self, data: Any, func: Callable):
return self._lambd(data, func=func)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, func, overwrite in self.key_iterator(d, self.func, self.overwrite):
ret = self._transform(data=d[key], func=func)
if overwrite:
d[key] = ret
self.push_transform(d, key)
return d
def _inverse_transform(self, transform_info: Dict, data: Any, func: Callable):
return self._lambd(data, func=func)
def inverse(self, data):
d = deepcopy(dict(data))
for key, inv_func, overwrite in self.key_iterator(d, self.inv_func, self.overwrite):
transform = self.get_most_recent_transform(d, key)
ret = self._inverse_transform(transform_info=transform, data=d[key], func=inv_func)
if overwrite:
d[key] = ret
self.pop_transform(d, key)
return d
class RandLambdad(Lambdad, RandomizableTransform):
"""
Randomizable version :py:class:`monai.transforms.Lambdad`, the input `func` may contain random logic,
or randomly execute the function based on `prob`. so `CacheDataset` will not execute it and cache the results.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
func: Lambda/function to be applied. It also can be a sequence of Callable,
each element corresponds to a key in ``keys``.
inv_func: Lambda/function of inverse operation if want to invert transforms, default to `lambda x: x`.
It also can be a sequence of Callable, each element corresponds to a key in ``keys``.
overwrite: whether to overwrite the original data in the input dictionary with lamdbda function output.
default to True. it also can be a sequence of bool, each element corresponds to a key in ``keys``.
prob: probability of executing the random function, default to 1.0, with 100% probability to execute.
note that all the data specified by `keys` will share the same random probability to execute or not.
allow_missing_keys: don't raise exception if key is missing.
For more details, please check :py:class:`monai.transforms.Lambdad`.
Note: The inverse operation doesn't allow to define `extra_info` or access other information, such as the
image's original size. If need these complicated information, please write a new InvertibleTransform directly.
"""
backend = Lambda.backend
def __init__(
self,
keys: KeysCollection,
func: Union[Sequence[Callable], Callable],
inv_func: Union[Sequence[Callable], Callable] = no_collation,
overwrite: Union[Sequence[bool], bool] = True,
prob: float = 1.0,
allow_missing_keys: bool = False,
) -> None:
Lambdad.__init__(
self=self,
keys=keys,
func=func,
inv_func=inv_func,
overwrite=overwrite,
allow_missing_keys=allow_missing_keys,
)
RandomizableTransform.__init__(self=self, prob=prob, do_transform=True)
def _transform(self, data: Any, func: Callable):
return self._lambd(data, func=func) if self._do_transform else data
def __call__(self, data):
self.randomize(data)
return super().__call__(data)
def _inverse_transform(self, transform_info: Dict, data: Any, func: Callable):
return self._lambd(data, func=func) if transform_info[TraceKeys.DO_TRANSFORM] else data
class LabelToMaskd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.LabelToMask`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
select_labels: labels to generate mask from. for 1 channel label, the `select_labels`
is the expected label values, like: [1, 2, 3]. for One-Hot format label, the
`select_labels` is the expected channel indices.
merge_channels: whether to use `np.any()` to merge the result on channel dim.
if yes, will return a single channel mask with binary data.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = LabelToMask.backend
def __init__( # pytype: disable=annotation-type-mismatch
self,
keys: KeysCollection,
select_labels: Union[Sequence[int], int],
merge_channels: bool = False,
allow_missing_keys: bool = False,
) -> None: # pytype: disable=annotation-type-mismatch
super().__init__(keys, allow_missing_keys)
self.converter = LabelToMask(select_labels=select_labels, merge_channels=merge_channels)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class FgBgToIndicesd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.FgBgToIndices`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
fg_postfix: postfix to save the computed foreground indices in dict.
for example, if computed on `label` and `postfix = "_fg_indices"`, the key will be `label_fg_indices`.
bg_postfix: postfix to save the computed background indices in dict.
for example, if computed on `label` and `postfix = "_bg_indices"`, the key will be `label_bg_indices`.
image_key: if image_key is not None, use ``label == 0 & image > image_threshold`` to determine
the negative sample(background). so the output items will not map to all the voxels in the label.
image_threshold: if enabled image_key, use ``image > image_threshold`` to determine
the valid image content area and select background only in this area.
output_shape: expected shape of output indices. if not None, unravel indices to specified shape.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = FgBgToIndices.backend
def __init__(
self,
keys: KeysCollection,
fg_postfix: str = "_fg_indices",
bg_postfix: str = "_bg_indices",
image_key: Optional[str] = None,
image_threshold: float = 0.0,
output_shape: Optional[Sequence[int]] = None,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.fg_postfix = fg_postfix
self.bg_postfix = bg_postfix
self.image_key = image_key
self.converter = FgBgToIndices(image_threshold, output_shape)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
image = d[self.image_key] if self.image_key else None
for key in self.key_iterator(d):
d[str(key) + self.fg_postfix], d[str(key) + self.bg_postfix] = self.converter(d[key], image)
return d
class ClassesToIndicesd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ClassesToIndices`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
indices_postfix: postfix to save the computed indices of all classes in dict.
for example, if computed on `label` and `postfix = "_cls_indices"`, the key will be `label_cls_indices`.
num_classes: number of classes for argmax label, not necessary for One-Hot label.
image_key: if image_key is not None, use ``image > image_threshold`` to define valid region, and only select
the indices within the valid region.
image_threshold: if enabled image_key, use ``image > image_threshold`` to determine the valid image content
area and select only the indices of classes in this area.
output_shape: expected shape of output indices. if not None, unravel indices to specified shape.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = ClassesToIndices.backend
def __init__(
self,
keys: KeysCollection,
indices_postfix: str = "_cls_indices",
num_classes: Optional[int] = None,
image_key: Optional[str] = None,
image_threshold: float = 0.0,
output_shape: Optional[Sequence[int]] = None,
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.indices_postfix = indices_postfix
self.image_key = image_key
self.converter = ClassesToIndices(num_classes, image_threshold, output_shape)
def __call__(self, data: Mapping[Hashable, Any]):
d = dict(data)
image = d[self.image_key] if self.image_key else None
for key in self.key_iterator(d):
d[str(key) + self.indices_postfix] = self.converter(d[key], image)
return d
class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ConvertToMultiChannelBasedOnBratsClasses`.
Convert labels to multi channels based on brats18 classes:
label 1 is the necrotic and non-enhancing tumor core
label 2 is the the peritumoral edema
label 4 is the GD-enhancing tumor
The possible classes are TC (Tumor core), WT (Whole tumor)
and ET (Enhancing tumor).
"""
backend = ConvertToMultiChannelBasedOnBratsClasses.backend
def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False):
super().__init__(keys, allow_missing_keys)
self.converter = ConvertToMultiChannelBasedOnBratsClasses()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class AddExtremePointsChanneld(Randomizable, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AddExtremePointsChannel`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
label_key: key to label source to get the extreme points.
background: Class index of background label, defaults to 0.
pert: Random perturbation amount to add to the points, defaults to 0.0.
sigma: if a list of values, must match the count of spatial dimensions of input data,
and apply every value in the list to 1 spatial dimension. if only 1 value provided,
use it for all spatial dimensions.
rescale_min: minimum value of output data.
rescale_max: maximum value of output data.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = AddExtremePointsChannel.backend
def __init__(
self,
keys: KeysCollection,
label_key: str,
background: int = 0,
pert: float = 0.0,
sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 3.0,
rescale_min: float = -1.0,
rescale_max: float = 1.0,
allow_missing_keys: bool = False,
):
MapTransform.__init__(self, keys, allow_missing_keys)
self.background = background
self.pert = pert
self.points: List[Tuple[int, ...]] = []
self.label_key = label_key
self.sigma = sigma
self.rescale_min = rescale_min
self.rescale_max = rescale_max
def randomize(self, label: NdarrayOrTensor) -> None:
self.points = get_extreme_points(label, rand_state=self.R, background=self.background, pert=self.pert)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
label = d[self.label_key]
if label.shape[0] != 1:
raise ValueError("Only supports single channel labels!")
# Generate extreme points
self.randomize(label[0, :])
for key in self.key_iterator(d):
img = d[key]
points_image = extreme_points_to_image(
points=self.points,
label=label,
sigma=self.sigma,
rescale_min=self.rescale_min,
rescale_max=self.rescale_max,
)
points_image, *_ = convert_to_dst_type(points_image, img) # type: ignore
d[key] = concatenate([img, points_image], axis=0)
return d
class TorchVisiond(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.TorchVision` for non-randomized transforms.
For randomized transforms of TorchVision use :py:class:`monai.transforms.RandTorchVisiond`.
Note:
As most of the TorchVision transforms only work for PIL image and PyTorch Tensor, this transform expects input
data to be dict of PyTorch Tensors, users can easily call `ToTensord` transform to convert Numpy to Tensor.
"""
backend = TorchVision.backend
def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: The transform name in TorchVision package.
allow_missing_keys: don't raise exception if key is missing.
args: parameters for the TorchVision transform.
kwargs: parameters for the TorchVision transform.
"""
super().__init__(keys, allow_missing_keys)
self.trans = TorchVision(name, *args, **kwargs)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.trans(d[key])
return d
class RandTorchVisiond(Randomizable, MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.TorchVision` for randomized transforms.
For deterministic non-randomized transforms of TorchVision use :py:class:`monai.transforms.TorchVisiond`.
Note:
- As most of the TorchVision transforms only work for PIL image and PyTorch Tensor, this transform expects input
data to be dict of PyTorch Tensors, users can easily call `ToTensord` transform to convert Numpy to Tensor.
- This class inherits the ``Randomizable`` purely to prevent any dataset caching to skip the transform
computation. If the random factor of the underlying torchvision transform is not derived from `self.R`,
the results may not be deterministic.
See Also: :py:class:`monai.transforms.Randomizable`.
"""
backend = TorchVision.backend
def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: The transform name in TorchVision package.
allow_missing_keys: don't raise exception if key is missing.
args: parameters for the TorchVision transform.
kwargs: parameters for the TorchVision transform.
"""
MapTransform.__init__(self, keys, allow_missing_keys)
self.trans = TorchVision(name, *args, **kwargs)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.trans(d[key])
return d
class MapLabelValued(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.MapLabelValue`.
"""
backend = MapLabelValue.backend
def __init__(
self,
keys: KeysCollection,
orig_labels: Sequence,
target_labels: Sequence,
dtype: DtypeLike = np.float32,
allow_missing_keys: bool = False,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
orig_labels: original labels that map to others.
target_labels: expected label values, 1: 1 map to the `orig_labels`.
dtype: convert the output data to dtype, default to float32.
allow_missing_keys: don't raise exception if key is missing.
"""
super().__init__(keys, allow_missing_keys)
self.mapper = MapLabelValue(orig_labels=orig_labels, target_labels=target_labels, dtype=dtype)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.mapper(d[key])
return d
class IntensityStatsd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.IntensityStats`.
Compute statistics for the intensity values of input image and store into the meta data dictionary.
For example: if `ops=[lambda x: np.mean(x), "max"]` and `key_prefix="orig"`, may generate below stats:
`{"orig_custom_0": 1.5, "orig_max": 3.0}`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
ops: expected operations to compute statistics for the intensity.
if a string, will map to the predefined operations, supported: ["mean", "median", "max", "min", "std"]
mapping to `np.nanmean`, `np.nanmedian`, `np.nanmax`, `np.nanmin`, `np.nanstd`.
if a callable function, will execute the function on input image.
key_prefix: the prefix to combine with `ops` name to generate the key to store the results in the
meta data dictionary. if some `ops` are callable functions, will use "{key_prefix}_custom_{index}"
as the key, where index counts from 0.
mask_keys: if not None, specify the mask array for the image to extract only the interested area to compute
statistics, mask must have the same shape as the image.
it should be a sequence of strings or None, map to the `keys`.
channel_wise: whether to compute statistics for every channel of input image separately.
if True, return a list of values for every operation, default to False.
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
used to store the computed statistics to the meta dict.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None, use `key_{postfix}` to to fetch the meta data according
to the key data, default is `meta_dict`, the meta data is a dictionary object.
used to store the computed statistics to the meta dict.
allow_missing_keys: don't raise exception if key is missing.
"""
backend = IntensityStats.backend
def __init__(
self,
keys: KeysCollection,
ops: Sequence[Union[str, Callable]],
key_prefix: str,
mask_keys: Optional[KeysCollection] = None,
channel_wise: bool = False,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
allow_missing_keys: bool = False,
) -> None:
super().__init__(keys, allow_missing_keys)
self.stats = IntensityStats(ops=ops, key_prefix=key_prefix, channel_wise=channel_wise)
self.mask_keys = ensure_tuple_rep(None, len(self.keys)) if mask_keys is None else ensure_tuple(mask_keys)
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
def __call__(self, data) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key, mask_key, meta_key, meta_key_postfix in self.key_iterator(
d, self.mask_keys, self.meta_keys, self.meta_key_postfix
):
meta_key = meta_key or f"{key}_{meta_key_postfix}"
d[key], d[meta_key] = self.stats(
img=d[key], meta_data=d.get(meta_key), mask=d.get(mask_key) if mask_key is not None else None
)
return d
class ToDeviced(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToDevice`.
"""
backend = ToDevice.backend
def __init__(
self, keys: KeysCollection, device: Union[torch.device, str], allow_missing_keys: bool = False, **kwargs
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
device: target device to move the Tensor, for example: "cuda:1".
allow_missing_keys: don't raise exception if key is missing.
kwargs: other args for the PyTorch `Tensor.to()` API, for more details:
https://pytorch.org/docs/stable/generated/torch.Tensor.to.html.
"""
super().__init__(keys, allow_missing_keys)
self.converter = ToDevice(device=device, **kwargs)
def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> Dict[Hashable, torch.Tensor]:
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.converter(d[key])
return d
class CuCIMd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.CuCIM` for non-randomized transforms.
For randomized transforms of CuCIM use :py:class:`monai.transforms.RandCuCIMd`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: The transform name in CuCIM package.
allow_missing_keys: don't raise exception if key is missing.
args: parameters for the CuCIM transform.
kwargs: parameters for the CuCIM transform.
Note:
CuCIM transforms only work with CuPy arrays, this transform expects input data to be `cupy.ndarray`.
Users can call `ToCuPy` transform to convert a numpy array or torch tensor to cupy array.
"""
def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None:
super().__init__(keys=keys, allow_missing_keys=allow_missing_keys)
self.trans = CuCIM(name, *args, **kwargs)
def __call__(self, data):
"""
Args:
data: Dict[Hashable, `cupy.ndarray`]
Returns:
Dict[Hashable, `cupy.ndarray`]
"""
d = dict(data)
for key in self.key_iterator(d):
d[key] = self.trans(d[key])
return d
class RandCuCIMd(CuCIMd, RandomizableTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.CuCIM` for randomized transforms.
For deterministic non-randomized transforms of CuCIM use :py:class:`monai.transforms.CuCIMd`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: The transform name in CuCIM package.
apply_prob: the probability to apply the transform (default=1.0)
allow_missing_keys: don't raise exception if key is missing.
args: parameters for the CuCIM transform.
kwargs: parameters for the CuCIM transform.
Note:
- CuCIM transform only work with CuPy arrays, so this transform expects input data to be `cupy.ndarray`.
Users can call `ToCuPy` transform to convert a numpy array or torch tensor to cupy array.
- If the cuCIM transform is already randomized the `apply_prob` argument has nothing to do with
the randomness of the underlying cuCIM transform. `apply_prob` defines if the transform (either randomized
or non-randomized) being applied randomly, so it can apply non-randomized tranforms randomly but be careful
with setting `apply_prob` to anything than 1.0 when using along with cuCIM's randomized transforms.
- If the random factor of the underlying cuCIM transform is not derived from `self.R`,
the results may not be deterministic. See Also: :py:class:`monai.transforms.Randomizable`.
"""
def __init__(self, apply_prob: float = 1.0, *args, **kwargs) -> None:
CuCIMd.__init__(self, *args, **kwargs)
RandomizableTransform.__init__(self, prob=apply_prob)
def __call__(self, data):
"""
Args:
data: Dict[Hashable, `cupy.ndarray`]
Returns:
Dict[Hashable, `cupy.ndarray`]
"""
self.randomize(data)
if not self._do_transform:
return dict(data)
return super().__call__(data)
IdentityD = IdentityDict = Identityd
AsChannelFirstD = AsChannelFirstDict = AsChannelFirstd
AsChannelLastD = AsChannelLastDict = AsChannelLastd
AddChannelD = AddChannelDict = AddChanneld
EnsureChannelFirstD = EnsureChannelFirstDict = EnsureChannelFirstd
RemoveRepeatedChannelD = RemoveRepeatedChannelDict = RemoveRepeatedChanneld
RepeatChannelD = RepeatChannelDict = RepeatChanneld
SplitChannelD = SplitChannelDict = SplitChanneld
CastToTypeD = CastToTypeDict = CastToTyped
ToTensorD = ToTensorDict = ToTensord
EnsureTypeD = EnsureTypeDict = EnsureTyped
ToNumpyD = ToNumpyDict = ToNumpyd
ToCupyD = ToCupyDict = ToCupyd
ToPILD = ToPILDict = ToPILd
TransposeD = TransposeDict = Transposed
DeleteItemsD = DeleteItemsDict = DeleteItemsd
SelectItemsD = SelectItemsDict = SelectItemsd
SqueezeDimD = SqueezeDimDict = SqueezeDimd
DataStatsD = DataStatsDict = DataStatsd
SimulateDelayD = SimulateDelayDict = SimulateDelayd
CopyItemsD = CopyItemsDict = CopyItemsd
ConcatItemsD = ConcatItemsDict = ConcatItemsd
LambdaD = LambdaDict = Lambdad
LabelToMaskD = LabelToMaskDict = LabelToMaskd
FgBgToIndicesD = FgBgToIndicesDict = FgBgToIndicesd
ClassesToIndicesD = ClassesToIndicesDict = ClassesToIndicesd
ConvertToMultiChannelBasedOnBratsClassesD = (
ConvertToMultiChannelBasedOnBratsClassesDict
) = ConvertToMultiChannelBasedOnBratsClassesd
AddExtremePointsChannelD = AddExtremePointsChannelDict = AddExtremePointsChanneld
TorchVisionD = TorchVisionDict = TorchVisiond
RandTorchVisionD = RandTorchVisionDict = RandTorchVisiond
RandLambdaD = RandLambdaDict = RandLambdad
MapLabelValueD = MapLabelValueDict = MapLabelValued
IntensityStatsD = IntensityStatsDict = IntensityStatsd
ToDeviceD = ToDeviceDict = ToDeviced
CuCIMD = CuCIMDict = CuCIMd
RandCuCIMD = RandCuCIMDict = RandCuCIMd
|
the-stack_0_11989 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
import unittest
from paddle.jit import to_static, ProgramTranslator
class NetWithParameterList(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super(NetWithParameterList, self).__init__()
weight = self.create_parameter([in_size, out_size])
bias = self.create_parameter([out_size], is_bias=True)
self.params = paddle.nn.ParameterList([weight, bias])
@to_static
def forward(self, x):
out = paddle.matmul(x, self.params[0])
out = paddle.add(out, self.params[1])
out = paddle.tanh(out)
return out
class NetWithParameterListIter(NetWithParameterList):
def __init__(self, in_size, out_size):
super(NetWithParameterListIter, self).__init__(in_size, out_size)
@to_static
def forward(self, x):
# NOTE: manually trigger `__iter__` logic.
params = list(self.params.__iter__())
out = paddle.matmul(x, params[0])
out = paddle.add(out, params[1])
out = paddle.tanh(out)
return out
class TestParameterList(unittest.TestCase):
def setUp(self):
self.seed = 2021
self.iter_num = 5
self.prog_trans = ProgramTranslator()
def train(self, is_iter, to_static):
paddle.seed(self.seed)
np.random.seed(self.seed)
self.prog_trans.enable(to_static)
if is_iter:
net = NetWithParameterList(10, 3)
else:
net = NetWithParameterListIter(10, 3)
sgd = paddle.optimizer.SGD(0.1, parameters=net.parameters())
for batch_id in range(self.iter_num):
x = paddle.rand([4, 10], dtype='float32')
out = net(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_grad()
return loss
def test_parameter_list(self):
static_loss = self.train(False, to_static=True)
dygraph_loss = self.train(False, to_static=False)
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph result is {}\nstatic result is {}'.format(
dygraph_loss, static_loss))
def test_parameter_list_iter(self):
static_loss = self.train(True, to_static=True)
dygraph_loss = self.train(True, to_static=False)
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph result is {}\nstatic result is {}'.format(
dygraph_loss, static_loss))
class NetWithRawParamList(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super(NetWithRawParamList, self).__init__()
weight = self.add_parameter('w',
self.create_parameter([in_size, out_size]))
bias = self.add_parameter(
'b', self.create_parameter([out_size], is_bias=True))
self.params = [weight]
self.bias_dict = {'b': bias}
@to_static
def forward(self, x):
out = paddle.matmul(x, self.params[0])
out = paddle.add(out, self.bias_dict['b'])
out = paddle.tanh(out)
return out
class TestRawParameterList(unittest.TestCase):
def setUp(self):
self.seed = 2021
self.iter_num = 5
self.prog_trans = ProgramTranslator()
def init_net(self):
self.net = NetWithRawParamList(10, 3)
def train(self, to_static):
paddle.seed(self.seed)
np.random.seed(self.seed)
self.prog_trans.enable(to_static)
self.init_net()
sgd = paddle.optimizer.SGD(0.1, parameters=self.net.parameters())
for batch_id in range(self.iter_num):
x = paddle.rand([4, 10], dtype='float32')
out = self.net(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_grad()
return loss
def test_parameter_list(self):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph result is {}\nstatic result is {}'.format(
dygraph_loss, static_loss))
class NetWithSubLayerParamList(paddle.nn.Layer):
def __init__(self, sub_layer):
super(NetWithSubLayerParamList, self).__init__()
self.sub_layer = sub_layer
self.params = [sub_layer.weight]
self.bias_dict = {'b': sub_layer.bias}
@to_static
def forward(self, x):
out = paddle.matmul(x, self.params[0])
out = paddle.add(out, self.bias_dict['b'])
out = paddle.tanh(out)
return out
class TestSubLayerParameterList(TestRawParameterList):
def init_net(self):
fc = paddle.nn.Linear(10, 3)
self.net = NetWithSubLayerParamList(fc)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_11992 | import copy
from django.conf import settings
from olympia.constants.promoted import RECOMMENDED
import olympia.core.logger
from olympia import amo
from olympia.amo.utils import attach_trans_dict
from olympia.amo.celery import create_chunked_tasks_signatures
from olympia.amo.utils import to_language
from olympia.constants.search import SEARCH_LANGUAGE_TO_ANALYZER
from olympia.lib.es.utils import create_index
from olympia.versions.compare import version_int
log = olympia.core.logger.getLogger('z.es')
class AddonIndexer:
"""
Base Indexer class for add-ons.
"""
@classmethod
def attach_translation_mappings(cls, mapping, field_names):
"""
For each field in field_names, attach a dict to the ES mapping
properties making "<field_name>_translations" an object containing
"string" and "lang" as non-indexed strings.
Used to store non-indexed, non-analyzed translations in ES that will be
sent back by the API for each item. It does not take care of the
indexed content for search, it's there only to store and return
raw translations.
"""
for field_name in field_names:
# _translations is the suffix in TranslationSerializer.
mapping['properties'][
'%s_translations' % field_name
] = cls.get_translations_definition()
@classmethod
def get_translations_definition(cls):
"""
Return the mapping to use for raw translations (to be returned directly
by the API, not used for analysis).
See attach_translation_mappings() for more information.
"""
return {
'type': 'object',
'properties': {
'lang': {'type': 'text', 'index': False},
'string': {'type': 'text', 'index': False},
},
}
@classmethod
def get_raw_field_definition(cls):
"""
Return the mapping to use for the "raw" version of a field. Meant to be
used as part of a 'fields': {'raw': ... } definition in the mapping of
an existing field.
Used for exact matches and sorting
"""
# It needs to be a keyword to turnoff all analysis ; that means we
# don't get the lowercase filter applied by the standard &
# language-specific analyzers, so we need to do that ourselves through
# a custom normalizer for exact matches to work in a case-insensitive
# way.
return {
'type': 'keyword',
'normalizer': 'lowercase_keyword_normalizer',
}
@classmethod
def attach_language_specific_analyzers(cls, mapping, field_names):
"""
For each field in field_names, attach language-specific mappings that
will use specific analyzers for these fields in every language that we
support.
These mappings are used by the search filtering code if they exist.
"""
for lang, analyzer in SEARCH_LANGUAGE_TO_ANALYZER.items():
for field in field_names:
property_name = '%s_l10n_%s' % (field, lang)
mapping['properties'][property_name] = {
'type': 'text',
'analyzer': analyzer,
}
@classmethod
def attach_language_specific_analyzers_with_raw_variant(cls, mapping, field_names):
"""
Like attach_language_specific_analyzers() but with an extra field to
storethe "raw" variant of the value, for exact matches.
"""
for lang, analyzer in SEARCH_LANGUAGE_TO_ANALYZER.items():
for field in field_names:
property_name = '%s_l10n_%s' % (field, lang)
mapping['properties'][property_name] = {
'type': 'text',
'analyzer': analyzer,
'fields': {
'raw': cls.get_raw_field_definition(),
},
}
@classmethod
def extract_field_api_translations(cls, obj, field, db_field=None):
"""
Returns a dict containing translations that we need to store for
the API. Empty translations are skipped entirely.
"""
if db_field is None:
db_field = '%s_id' % field
extend_with_me = {
'%s_translations'
% field: [
{'lang': to_language(lang), 'string': str(string)}
for lang, string in obj.translations[getattr(obj, db_field)]
if string
]
}
return extend_with_me
@classmethod
def extract_field_search_translation(cls, obj, field, default_locale):
"""
Returns the translation for this field in the object's default locale,
in the form a dict with one entry (the field being the key and the
translation being the value, or an empty string if none was found).
That field will be analyzed and indexed by ES *without*
language-specific analyzers.
"""
translations = dict(obj.translations[getattr(obj, '%s_id' % field)])
default_locale = default_locale.lower() if default_locale else None
value = translations.get(default_locale, getattr(obj, field))
return {field: str(value) if value else ''}
@classmethod
def extract_field_analyzed_translations(cls, obj, field, db_field=None):
"""
Returns a dict containing translations for each language that we have
an analyzer for, for the given field.
When no translation exist for a given language+field combo, the value
returned is an empty string, to avoid storing the word "None" as the
field does not understand null values.
"""
if db_field is None:
db_field = '%s_id' % field
translations = dict(obj.translations[getattr(obj, db_field)])
return {
'%s_l10n_%s' % (field, lang): translations.get(lang) or ''
for lang in SEARCH_LANGUAGE_TO_ANALYZER
}
# Fields we don't need to expose in the results, only used for filtering
# or sorting.
hidden_fields = (
'*.raw',
'boost',
'colors',
'hotness',
# Translated content that is used for filtering purposes is stored
# under 3 different fields:
# - One field with all translations (e.g., "name").
# - One field for each language, using corresponding analyzer
# (e.g., "name_l10n_en-us", "name_l10n_fr", etc.)
# - One field with all translations in separate objects for the API
# (e.g. "name_translations")
# Only that last one with all translations needs to be returned.
'name',
'description',
'name_l10n_*',
'description_l10n_*',
'summary',
'summary_l10n_*',
)
index_settings = {
'analysis': {
'analyzer': {
'standard_with_word_split': {
# This analyzer tries to split the text into words by using
# various methods. It also lowercases them and make sure
# each token is only returned once.
# Only use for short things with extremely meaningful
# content like add-on name - it makes too many
# modifications to be useful for things like descriptions,
# for instance.
'tokenizer': 'standard',
'filter': [
'custom_word_delimiter',
'lowercase',
'stop',
'custom_dictionary_decompounder',
'unique',
],
},
'trigram': {
# Analyzer that splits the text into trigrams.
'tokenizer': 'ngram_tokenizer',
'filter': [
'lowercase',
],
},
},
'tokenizer': {
'ngram_tokenizer': {
'type': 'ngram',
'min_gram': 3,
'max_gram': 3,
'token_chars': ['letter', 'digit'],
}
},
'normalizer': {
'lowercase_keyword_normalizer': {
# By default keywords are indexed 'as-is', but for exact
# name matches we need to lowercase them before indexing,
# so this normalizer does that for us.
'type': 'custom',
'filter': ['lowercase'],
},
},
'filter': {
'custom_word_delimiter': {
# This filter is useful for add-on names that have multiple
# words sticked together in a way that is easy to
# recognize, like FooBar, which should be indexed as FooBar
# and Foo Bar. (preserve_original: True makes us index both
# the original and the split version.)
'type': 'word_delimiter',
'preserve_original': True,
},
'custom_dictionary_decompounder': {
# This filter is also useful for add-on names that have
# multiple words sticked together, but without a pattern
# that we can automatically recognize. To deal with those,
# we use a small dictionary of common words. It allows us
# to index 'awesometabpassword' as 'awesome tab password',
# helping users looking for 'tab password' find that addon.
'type': 'dictionary_decompounder',
'word_list': [
'all',
'auto',
'ball',
'bar',
'block',
'blog',
'bookmark',
'browser',
'bug',
'button',
'cat',
'chat',
'click',
'clip',
'close',
'color',
'context',
'cookie',
'cool',
'css',
'delete',
'dictionary',
'down',
'download',
'easy',
'edit',
'fill',
'fire',
'firefox',
'fix',
'flag',
'flash',
'fly',
'forecast',
'fox',
'foxy',
'google',
'grab',
'grease',
'html',
'http',
'image',
'input',
'inspect',
'inspector',
'iris',
'js',
'key',
'keys',
'lang',
'link',
'mail',
'manager',
'map',
'mega',
'menu',
'menus',
'monkey',
'name',
'net',
'new',
'open',
'password',
'persona',
'privacy',
'query',
'screen',
'scroll',
'search',
'secure',
'select',
'smart',
'spring',
'status',
'style',
'super',
'sync',
'tab',
'text',
'think',
'this',
'time',
'title',
'translate',
'tree',
'undo',
'upload',
'url',
'user',
'video',
'window',
'with',
'word',
'zilla',
],
},
},
}
}
@classmethod
def get_model(cls):
from olympia.addons.models import Addon
return Addon
@classmethod
def get_index_alias(cls):
"""Return the index alias name."""
return settings.ES_INDEXES.get('default')
@classmethod
def get_mapping(cls):
appver_mapping = {
'properties': {
'max': {'type': 'long'},
'min': {'type': 'long'},
'max_human': {'type': 'keyword', 'index': False},
'min_human': {'type': 'keyword', 'index': False},
}
}
version_mapping = {
'type': 'object',
'properties': {
'compatible_apps': {
'properties': {app.id: appver_mapping for app in amo.APP_USAGE}
},
# Keep '<version>.id' indexed to be able to run exists queries
# on it.
'id': {'type': 'long'},
'reviewed': {'type': 'date', 'index': False},
'files': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'created': {'type': 'date', 'index': False},
'hash': {'type': 'keyword', 'index': False},
'filename': {'type': 'keyword', 'index': False},
'is_mozilla_signed_extension': {'type': 'boolean'},
'size': {'type': 'long', 'index': False},
'strict_compatibility': {'type': 'boolean', 'index': False},
'status': {'type': 'byte'},
'permissions': {'type': 'keyword', 'index': False},
'optional_permissions': {'type': 'keyword', 'index': False},
},
},
'license': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'builtin': {'type': 'short', 'index': False},
'name_translations': cls.get_translations_definition(),
'url': {'type': 'text', 'index': False},
},
},
'release_notes_translations': cls.get_translations_definition(),
'version': {'type': 'keyword', 'index': False},
},
}
mapping = {
'properties': {
'id': {'type': 'long'},
'app': {'type': 'byte'},
'average_daily_users': {'type': 'long'},
'bayesian_rating': {'type': 'double'},
'boost': {'type': 'float', 'null_value': 1.0},
'category': {'type': 'integer'},
'colors': {
'type': 'nested',
'properties': {
'h': {'type': 'integer'},
's': {'type': 'integer'},
'l': {'type': 'integer'},
'ratio': {'type': 'double'},
},
},
'contributions': {'type': 'text'},
'created': {'type': 'date'},
'current_version': version_mapping,
'default_locale': {'type': 'keyword', 'index': False},
'description': {'type': 'text', 'analyzer': 'snowball'},
'guid': {'type': 'keyword'},
'has_eula': {'type': 'boolean', 'index': False},
'has_privacy_policy': {'type': 'boolean', 'index': False},
'hotness': {'type': 'double'},
'icon_hash': {'type': 'keyword', 'index': False},
'icon_type': {'type': 'keyword', 'index': False},
'is_disabled': {'type': 'boolean'},
'is_experimental': {'type': 'boolean'},
'is_recommended': {'type': 'boolean'},
'last_updated': {'type': 'date'},
'listed_authors': {
'type': 'object',
'properties': {
'id': {'type': 'long'},
'name': {'type': 'text'},
'username': {'type': 'keyword'},
'is_public': {'type': 'boolean', 'index': False},
},
},
'modified': {'type': 'date', 'index': False},
'name': {
'type': 'text',
# Adding word-delimiter to split on camelcase, known
# words like 'tab', and punctuation, and eliminate
# duplicates.
'analyzer': 'standard_with_word_split',
'fields': {
# Raw field for exact matches and sorting.
'raw': cls.get_raw_field_definition(),
# Trigrams for partial matches.
'trigrams': {
'type': 'text',
'analyzer': 'trigram',
},
},
},
'previews': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'caption_translations': cls.get_translations_definition(),
'modified': {'type': 'date', 'index': False},
'position': {'type': 'long', 'index': False},
'sizes': {
'type': 'object',
'properties': {
'thumbnail': {'type': 'short', 'index': False},
'image': {'type': 'short', 'index': False},
},
},
},
},
'promoted': {
'type': 'object',
'properties': {
'group_id': {'type': 'byte'},
'approved_for_apps': {'type': 'byte'},
},
},
'ratings': {
'type': 'object',
'properties': {
'count': {'type': 'short', 'index': False},
'average': {'type': 'float'},
},
},
'slug': {'type': 'keyword'},
'requires_payment': {'type': 'boolean', 'index': False},
'status': {'type': 'byte'},
'summary': {'type': 'text', 'analyzer': 'snowball'},
'tags': {'type': 'keyword'},
'type': {'type': 'byte'},
'weekly_downloads': {'type': 'long'},
},
}
# Add fields that we expect to return all translations without being
# analyzed/indexed.
cls.attach_translation_mappings(
mapping,
(
'description',
'developer_comments',
'homepage',
'name',
'summary',
'support_email',
'support_url',
),
)
# Add language-specific analyzers for localized fields that are
# analyzed/indexed.
cls.attach_language_specific_analyzers(mapping, ('description', 'summary'))
cls.attach_language_specific_analyzers_with_raw_variant(mapping, ('name',))
return mapping
@classmethod
def extract_version(cls, obj, version_obj):
from olympia.versions.models import License, Version
data = (
{
'id': version_obj.pk,
'compatible_apps': cls.extract_compatibility_info(obj, version_obj),
'files': [
{
'id': version_obj.file.id,
'created': version_obj.file.created,
'filename': version_obj.file.filename,
'hash': version_obj.file.hash,
'is_mozilla_signed_extension': (
version_obj.file.is_mozilla_signed_extension
),
'size': version_obj.file.size,
'status': version_obj.file.status,
'strict_compatibility': version_obj.file.strict_compatibility,
'permissions': version_obj.file.permissions,
'optional_permissions': version_obj.file.optional_permissions,
}
],
'reviewed': version_obj.reviewed,
'version': version_obj.version,
}
if version_obj
else None
)
if data and version_obj:
attach_trans_dict(Version, [version_obj])
data.update(
cls.extract_field_api_translations(
version_obj, 'release_notes', db_field='release_notes_id'
)
)
if version_obj.license:
data['license'] = {
'id': version_obj.license.id,
'builtin': version_obj.license.builtin,
'url': version_obj.license.url,
}
attach_trans_dict(License, [version_obj.license])
data['license'].update(
cls.extract_field_api_translations(version_obj.license, 'name')
)
return data
@classmethod
def extract_compatibility_info(cls, obj, version_obj):
"""Return compatibility info for the specified version_obj, as will be
indexed in ES."""
compatible_apps = {}
for app, appver in version_obj.compatible_apps.items():
if appver:
min_, max_ = appver.min.version_int, appver.max.version_int
min_human, max_human = appver.min.version, appver.max.version
if not version_obj.file.strict_compatibility:
# The files attached to this version are not using strict
# compatibility, so the max version essentially needs to be
# ignored - let's fake a super high one. We leave max_human
# alone to leave the API representation intact.
max_ = version_int('*')
else:
# Fake wide compatibility for add-ons with no info. We don't
# want to reindex every time a new version of the app is
# released, so we directly index a super high version as the
# max.
min_human, max_human = (
amo.DEFAULT_WEBEXT_MIN_VERSIONS.get(
app, amo.DEFAULT_WEBEXT_MIN_VERSION
),
amo.FAKE_MAX_VERSION,
)
min_, max_ = version_int(min_human), version_int(max_human)
compatible_apps[app.id] = {
'min': min_,
'min_human': min_human,
'max': max_,
'max_human': max_human,
}
return compatible_apps
@classmethod
def extract_document(cls, obj):
"""Extract indexable attributes from an add-on."""
from olympia.addons.models import Preview
attrs = (
'id',
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'hotness',
'icon_hash',
'icon_type',
'is_disabled',
'is_experimental',
'last_updated',
'modified',
'requires_payment',
'slug',
'status',
'type',
'weekly_downloads',
)
data = {attr: getattr(obj, attr) for attr in attrs}
data['colors'] = None
# Extract dominant colors from static themes.
if obj.type == amo.ADDON_STATICTHEME:
if obj.current_previews:
data['colors'] = obj.current_previews[0].colors
data['app'] = [app.id for app in obj.compatible_apps.keys()]
# Boost by the number of users on a logarithmic scale.
data['boost'] = float(data['average_daily_users'] ** 0.2)
# Quadruple the boost if the add-on is public.
if (
obj.status == amo.STATUS_APPROVED
and not obj.is_experimental
and 'boost' in data
):
data['boost'] = float(max(data['boost'], 1) * 4)
# We can use all_categories because the indexing code goes through the
# transformer that sets it.
data['category'] = [cat.id for cat in obj.all_categories]
data['current_version'] = cls.extract_version(obj, obj.current_version)
data['listed_authors'] = [
{
'name': a.name,
'id': a.id,
'username': a.username,
'is_public': a.is_public,
}
for a in obj.listed_authors
]
data['has_eula'] = bool(obj.eula)
data['has_privacy_policy'] = bool(obj.privacy_policy)
data['is_recommended'] = bool(
obj.promoted and obj.promoted.group == RECOMMENDED
)
data['previews'] = [
{
'id': preview.id,
'modified': preview.modified,
'sizes': preview.sizes,
'position': preview.position,
}
for preview in obj.current_previews
]
data['promoted'] = (
{
'group_id': obj.promoted.group_id,
# store the app approvals because .approved_applications needs it.
'approved_for_apps': [
app.id for app in obj.promoted.approved_applications
],
}
if obj.promoted
else None
)
data['ratings'] = {
'average': obj.average_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
# We can use tag_list because the indexing code goes through the
# transformer that sets it (attach_tags).
data['tags'] = getattr(obj, 'tag_list', [])
# Handle localized fields.
# First, deal with the 3 fields that need everything:
for field in ('description', 'name', 'summary'):
data.update(cls.extract_field_api_translations(obj, field))
data.update(
cls.extract_field_search_translation(obj, field, obj.default_locale)
)
data.update(cls.extract_field_analyzed_translations(obj, field))
# Then add fields that only need to be returned to the API without
# contributing to search relevancy.
for field in ('developer_comments', 'homepage', 'support_email', 'support_url'):
data.update(cls.extract_field_api_translations(obj, field))
if obj.type != amo.ADDON_STATICTHEME:
# Also do that for preview captions, which are set on each preview
# object.
attach_trans_dict(Preview, obj.current_previews)
for i, preview in enumerate(obj.current_previews):
data['previews'][i].update(
cls.extract_field_api_translations(preview, 'caption')
)
return data
@classmethod
def create_new_index(cls, index_name):
"""
Create a new index for addons in ES.
Intended to be used by reindexation (and tests), generally a bad idea
to call manually.
"""
index_settings = copy.deepcopy(cls.index_settings)
config = {
'mappings': cls.get_mapping(),
'settings': {
# create_index will add its own index settings like number of
# shards and replicas.
'index': index_settings
},
}
create_index(index_name, config)
@classmethod
def reindex_tasks_group(cls, index_name):
"""
Return the group of tasks to execute for a full reindex of addons on
the index called `index_name` (which is not an alias but the real
index name).
"""
from olympia.addons.tasks import index_addons
ids = cls.get_model().unfiltered.values_list('id', flat=True).order_by('id')
chunk_size = 150
return create_chunked_tasks_signatures(
index_addons, list(ids), chunk_size, task_kwargs={'index': index_name}
)
|
the-stack_0_11995 | #Michael Astwood 2018 with help from https://apmonitor.com/wiki/index.php/Main/GekkoPythonOptimization
#This program is built to control light
#levels based on data taken during experiments.
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
m = GEKKO()
m.time = np.linspace(0,20,41)
# Parameters for model
mass = 500
b = m.Param(value=50)
K = m.Param(value=0.8)
# Manipulated variable (in our system it will be the light intensity)
p = m.MV(value=50, lb=0, ub=100) #value is initial value, ub is upper bound, lb is lower bound
p.STATUS = 1 # allow optimizer to change
p.DCOST = 0 # smooth out changes in intensity
p.DMAX = 10 # slow down changes in intensity
# Controlled Variable (in our system it will be the ratio)
v = m.CV(value=0)
v.STATUS = 1 # add the setpoint to the objective
m.options.CV_TYPE = 2 # squared error root(sum(x^2))
v.SP = 40 # set point
v.TR_INIT = 0 # set point trajectory (0 = straight line at SP, 1 = starts at zero and ramps up)
v.TAU = 5 # time constant of trajectory
# Process model (this is a model for car acceleration)
m.Equation(mass*v.dt() == -v*b + K*b*p) #a differential equation in terms of our controlled variable
#linear drag vs gas pedal input
m.options.IMODE = 6 #this puts the library in MPC mode
m.solve(disp=True) #this finalizes our controller for this prediction cycle
# get additional solution information
import json
with open(m.path+'//results.json') as f:
results = json.load(f)
#plotting the results
plt.figure()
plt.subplot(2,1,1)
plt.plot(m.time,p.value,'b-',label='MV Optimized')
plt.legend()
plt.ylabel('Input')
plt.subplot(2,1,2)
plt.plot(m.time,results['v1.tr'],'k-',label='Reference Trajectory')
plt.plot(m.time,v.value,'r--',label='CV Response')
plt.ylabel('Output')
plt.xlabel('Time')
plt.legend(loc='best')
plt.show()
|
the-stack_0_11997 | from django.urls import include, path
from event import views
from rest_framework.routers import SimpleRouter
from rest_framework_nested import routers
router = SimpleRouter()
router.register(r"event", views.EventViewSet)
event_router = routers.NestedSimpleRouter(router, r"event", lookup="event")
event_router.register(
r"participants", views.EventParticipant, basename="event-participants"
)
event_router.register(
r"competitions", views.EventCompetitionListCreate, basename="competitions"
)
router.register(
r"competition", views.EventCompetitionRetrieveUpdateDestroy, basename="competition"
)
event_participants_router = routers.NestedSimpleRouter(
router, r"competition", lookup="competition"
)
event_participants_router.register(
r"participants",
views.EventCompetitionParticipants,
basename="competition-participants",
)
event_participants_router.register(
r"nominations", views.NominationView, basename="competition-nominations"
)
router.register(r"tickets", views.TicketViewSet, basename="tickets")
router.register(r"scans", views.TicketScanViewSet, basename="scans")
router.register(r"quotas", views.EventQuotaViewSet, basename="quotas")
app_name = "event"
urlpatterns = [
path("", include(router.urls)),
path("", include(event_router.urls)),
path("", include(event_participants_router.urls)),
]
|
the-stack_0_11998 | #!/usr/bin/python2.7
"""Public interface to top-level pytype functions."""
from __future__ import print_function
import contextlib
import logging
import os
import sys
import tokenize
import traceback
from pytype import __version__
from pytype import analyze
from pytype import directors
from pytype import errors
from pytype import load_pytd
from pytype import utils
from pytype.pyc import pyc
from pytype.pyi import parser
from pytype.pytd import optimize
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import serialize_ast
from pytype.pytd import visitors
from pytype.pytd.parse import builtins as pytd_builtins
import six
log = logging.getLogger(__name__)
# Webpage explaining the pytype error codes
ERROR_DOC_URL = "https://google.github.io/pytype/errors.html"
def read_source_file(input_filename):
try:
with open(input_filename, "r") as fi:
return fi.read()
except IOError:
raise utils.UsageError("Could not load input file %s" % input_filename)
def _call(analyze_types, input_filename, errorlog, options, loader):
"""Helper function to call analyze.check/infer_types."""
src = read_source_file(input_filename)
# 'deep' tells the analyzer whether to analyze functions not called from main.
deep = not options.main_only
return analyze_types(
src=src,
filename=input_filename,
errorlog=errorlog,
options=options,
loader=loader,
deep=deep)
def check_py(input_filename, errorlog, options, loader):
"""Check the types of one file."""
_call(analyze.check_types, input_filename, errorlog, options, loader)
def generate_pyi(input_filename, errorlog, options, loader):
"""Run the inferencer on one file, producing output.
Args:
input_filename: name of the file to process
errorlog: Where error messages go. Instance of errors.ErrorLog.
options: config.Options object.
loader: A load_pytd.Loader instance.
Returns:
A tuple, (PYI Ast as string, TypeDeclUnit).
Raises:
CompileError: If we couldn't parse the input file.
UsageError: If the input filepath is invalid.
"""
mod, builtins = _call(
analyze.infer_types, input_filename, errorlog, options, loader)
mod.Visit(visitors.VerifyVisitor())
mod = optimize.Optimize(mod,
builtins,
# TODO(kramm): Add FLAGs for these
lossy=False,
use_abcs=False,
max_union=7,
remove_mutable=False)
mod = pytd_utils.CanonicalOrdering(mod, sort_signatures=True)
result = pytd.Print(mod)
log.info("=========== pyi optimized =============")
log.info("\n%s", result)
log.info("========================================")
if not result.endswith("\n"):
result += "\n"
result_prefix = ""
if options.quick:
result_prefix += "# (generated with --quick)\n"
if result_prefix:
result = result_prefix + "\n" + result
return result, mod
def check_or_generate_pyi(options, errorlog, loader):
"""Returns generated errors and result pyi or None if it's only check.
Args:
options: config.Options object.
errorlog: errors.ErrorLog object.
loader: load_pytd.Loader object.
Returns:
A tuple, (PYI Ast as string, AST) or None.
"""
result = pytd_builtins.DEFAULT_SRC
ast = pytd_builtins.GetDefaultAst(options.python_version)
try:
if options.check:
check_py(input_filename=options.input,
errorlog=errorlog,
options=options,
loader=loader)
return None
else:
result, ast = generate_pyi(input_filename=options.input,
errorlog=errorlog,
options=options,
loader=loader)
except utils.UsageError as e:
raise
except pyc.CompileError as e:
errorlog.python_compiler_error(options.input, e.lineno, e.error)
except IndentationError as e:
errorlog.python_compiler_error(options.input, e.lineno, e.msg)
except tokenize.TokenError as e:
msg, (lineno, unused_column) = e.args # pylint: disable=unbalanced-tuple-unpacking
errorlog.python_compiler_error(options.input, lineno, msg)
except directors.SkipFile:
result += "# skip-file found, file not analyzed"
except Exception as e: # pylint: disable=broad-except
if options.nofail:
log.warn("***Caught exception: %s", str(e), exc_info=True)
if not options.check:
result += ( # pytype: disable=name-error
"# Caught error in pytype: " + str(e).replace("\n", "\n#")
+ "\n# " + "\n# ".join(traceback.format_exc().splitlines()))
else:
e.args = (
str(utils.message(e)) + "\nFile: %s" % options.input,) + e.args[1:]
raise
return (result, ast)
def _write_pyi_output(options, contents, filename):
assert filename
if filename == "-":
sys.stdout.write(contents)
else:
log.info("write pyi %r => %r", options.input, filename)
with open(filename, "w") as fi:
fi.write(contents)
def process_one_file(options):
"""Check a .py file or generate a .pyi for it, according to options.
Args:
options: config.Options object.
Returns:
An error code (0 means no error).
"""
log.info("Process %s => %s", options.input, options.output)
errorlog = errors.ErrorLog()
loader = load_pytd.create_loader(options)
try:
generated_values = check_or_generate_pyi(options, errorlog, loader)
except utils.UsageError as e:
logging.error("Usage error: %s\n", utils.message(e))
return 1
if not options.check:
result, ast = generated_values
if options.pickle_output:
pyi_output = options.verify_pickle
else:
pyi_output = options.output
# Write out the pyi file.
if pyi_output:
_write_pyi_output(options, result, pyi_output)
# Write out the pickle file.
if options.pickle_output:
log.info("write pickle %r => %r", options.input, options.output)
write_pickle(ast, loader, options)
exit_status = handle_errors(errorlog, options)
# If we have set return_success, set exit_status to 0 after the regular error
# handler has been called.
if options.return_success:
exit_status = 0
# Touch output file upon success.
if options.touch and not exit_status:
with open(options.touch, "a"):
os.utime(options.touch, None)
return exit_status
def write_pickle(ast, loader, options):
"""Dump a pickle of the ast to a file."""
try:
ast = serialize_ast.PrepareForExport(
options.module_name, options.python_version, ast, loader)
except parser.ParseError as e:
if options.nofail:
ast = serialize_ast.PrepareForExport(
options.module_name, options.python_version,
pytd_builtins.GetDefaultAst(options.python_version), loader)
log.warn("***Caught exception: %s", str(e), exc_info=True)
else:
raise
if options.verify_pickle:
ast1 = ast.Visit(visitors.LateTypeToClassType())
ast1 = ast1.Visit(visitors.ClearClassPointers())
ast2 = loader.load_file(options.module_name, options.verify_pickle)
ast2 = ast2.Visit(visitors.ClearClassPointers())
if not ast1.ASTeq(ast2):
raise AssertionError()
serialize_ast.StoreAst(ast, options.output)
def print_error_doc_url(errorlog):
names = {e.name for e in errorlog}
if names:
doclink = "\nFor more details, see %s" % ERROR_DOC_URL
if len(names) == 1:
doclink += "#" + names.pop()
print(doclink + ".", file=sys.stderr)
def handle_errors(errorlog, options):
"""Handle the errorlog according to the given options."""
if not options.report_errors:
return 0
if options.output_errors_csv:
errorlog.print_to_csv_file(options.output_errors_csv)
return 0 # Command is successful regardless of errors.
errorlog.print_to_stderr()
print_error_doc_url(errorlog)
return 1 if errorlog.has_error() else 0 # exit code
def parse_pyi(options):
"""Tries parsing a PYI file."""
loader = load_pytd.create_loader(options)
ast = loader.load_file(options.module_name, options.input)
ast = loader.finish_and_verify_ast(ast)
if options.output:
result = "# Internal AST parsed and postprocessed from %s\n\n%s" % (
options.input, pytd.Print(ast))
_write_pyi_output(options, result, options.output)
def get_pytype_version():
return __version__.__version__
@contextlib.contextmanager
def wrap_pytype_exceptions(exception_type, filename=""):
"""Catch pytype errors and reraise them as a single exception type.
NOTE: This will also wrap non-pytype errors thrown within the body of the
code block; it is therefore recommended to use this to wrap a single function
call.
Args:
exception_type: The class to wrap exceptions in.
filename: A filename to use in error messages.
Yields:
nothing, just calls the code block.
"""
try:
yield
except utils.UsageError as e:
raise exception_type("Pytype usage error: %s" % utils.message(e))
except pyc.CompileError as e:
raise exception_type("Error reading file %s at line %s: %s" %
(filename, e.lineno, e.error))
except tokenize.TokenError as e:
msg, (lineno, unused_column) = e.args # pylint: disable=unbalanced-tuple-unpacking
raise exception_type("Error reading file %s at line %s: %s" %
(filename, lineno, msg))
except directors.SkipFile:
raise exception_type("Pytype could not analyze file %s: "
"'# skip-file' directive found" % filename)
except Exception as e: # pylint: disable=broad-except
msg = "Pytype error: %s: %s" % (e.__class__.__name__, e.args[0])
# We need the version check here because six.reraise doesn't work properly
# in python3
if sys.version_info[0] == 2:
_, _, tb = sys.exc_info()
six.reraise(exception_type, exception_type(msg), tb)
else:
raise exception_type(msg).with_traceback(e.__traceback__)
|
the-stack_0_12002 | import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter('ignore')
import numpy as np
from fuzzywuzzy import fuzz
import json
import tensorflow as tf
from collections import Counter
from ._utils._utils import load_graph, check_file
from .num2word import to_cardinal
from .texts._text_functions import (
normalizer_textcleaning,
stemmer_str_idx,
pad_sentence_batch,
)
from .texts._tatabahasa import (
rules_normalizer,
consonants,
vowels,
sounds,
GO,
PAD,
EOS,
UNK,
)
from .spell import _return_possible, _edit_normalizer, _return_known
from .topic_influencer import is_location
from ._utils._paths import MALAY_TEXT, PATH_NORMALIZER, S3_PATH_NORMALIZER
class _DEEP_NORMALIZER:
def __init__(self, x, logits, sess, dicts):
self._sess = sess
self._x = x
self._logits = logits
self._dicts = dicts
self._dicts['rev_dictionary_to'] = {
int(k): v for k, v in self._dicts['rev_dictionary_to'].items()
}
def normalize(self, string):
"""
Normalize a string.
Parameters
----------
string : str
Returns
-------
string: normalized string
"""
assert isinstance(string, str), 'input must be a string'
token_strings = normalizer_textcleaning(string).split()
idx = stemmer_str_idx(token_strings, self._dicts['dictionary_from'])
predicted = self._sess.run(
self._logits, feed_dict = {self._x: pad_sentence_batch(idx, PAD)[0]}
)
results = []
for word in predicted:
results.append(
''.join(
[
self._dicts['rev_dictionary_to'][c]
for c in word
if c not in [GO, PAD, EOS, UNK]
]
)
)
return ' '.join(results)
class _SPELL_NORMALIZE:
def __init__(self, corpus):
self.corpus = Counter(corpus)
def normalize(self, string, debug = True):
"""
Normalize a string
Parameters
----------
string : str
debug : bool, optional (default=True)
If true, it will print character similarity distances.
Returns
-------
string: normalized string
"""
assert isinstance(string, str), 'input must be a string'
result = []
for word in normalizer_textcleaning(string).split():
if word.istitle():
result.append(word)
continue
if word[0] == 'x' and len(word) > 1:
result_string = 'tak '
word = word[1:]
else:
result_string = ''
if word[-2:] == 'la':
end_result_string = ' lah'
word = word[:-2]
elif word[-3:] == 'lah':
end_result_string = ' lah'
word = word[:-3]
else:
end_result_string = ''
if word in sounds:
result.append(result_string + sounds[word] + end_result_string)
continue
if word in rules_normalizer:
result.append(
result_string + rules_normalizer[word] + end_result_string
)
continue
if word in self.corpus:
result.append(result_string + word + end_result_string)
continue
candidates = (
_return_known([word], self.corpus)
or _return_known(_edit_normalizer(word), self.corpus)
or _return_possible(word, self.corpus, _edit_normalizer)
or [word]
)
candidates = list(candidates)
candidates = [
(candidate, is_location(candidate))
for candidate in list(candidates)
]
if debug:
print([(k, fuzz.ratio(string, k[0])) for k in candidates], '\n')
strings = [fuzz.ratio(string, k[0]) for k in candidates]
descending_sort = np.argsort(strings)[::-1]
selected = None
for index in descending_sort:
if not candidates[index][1]:
selected = candidates[index][0]
break
selected = (
candidates[descending_sort[0]][0] if not selected else selected
)
result.append(result_string + selected + end_result_string)
return ' '.join(result)
class _FUZZY_NORMALIZE:
def __init__(self, normalized, corpus):
self.normalized = normalized
self.corpus = corpus
def normalize(self, string):
"""
Normalize a string.
Parameters
----------
string : str
Returns
-------
string: normalized string
"""
assert isinstance(string, str), 'input must be a string'
result = []
for word in normalizer_textcleaning(string).split():
if word.istitle():
result.append(word)
continue
if word[0] == 'x' and len(word) > 1:
result_string = 'tak '
word = word[1:]
else:
result_string = ''
if word[-2:] == 'la':
end_result_string = ' lah'
word = word[:-2]
elif word[-3:] == 'lah':
end_result_string = ' lah'
word = word[:-3]
else:
end_result_string = ''
if word in sounds:
result.append(result_string + sounds[word] + end_result_string)
continue
if word in rules_normalizer:
result.append(
result_string + rules_normalizer[word] + end_result_string
)
continue
if word in self.corpus:
result.append(result_string + word + end_result_string)
continue
results = []
for i in range(len(self.normalized)):
results.append(
np.mean([fuzz.ratio(word, k) for k in self.normalized[i]])
)
if len(np.where(np.array(results) > 70)[0]) < 1:
result.append(result_string + word + end_result_string)
continue
result.append(
result_string
+ self.corpus[np.argmax(results)]
+ end_result_string
)
return ' '.join(result)
def fuzzy(corpus):
"""
Train a fuzzy logic Normalizer
Parameters
----------
corpus : list of strings. Prefer to feed with malaya.load_malay_dictionary()
Returns
-------
FUZZY_NORMALIZE: Trained malaya.normalizer._FUZZY_NORMALIZE class
"""
assert isinstance(corpus, list) and isinstance(
corpus[0], str
), 'input must be list of strings'
transform = []
for i in corpus:
i = i.lower()
result = []
result.append(i)
result.append(''.join(char for char in i if char not in vowels))
if i[0] in consonants and i[-1] in consonants:
result.append(i[0] + i[-1])
if i[-1] == 'a':
result.append(i[:-1] + 'e')
result.append(i + 'k')
if i[1] in vowels and i[0] in consonants:
result.append(i[0] + i[2:])
if i[-2] in vowels and i[-1] in consonants:
result.append(i[:-2] + i[-1])
result.append(i[0] + i[-1])
if i[-2:] == 'ar':
result.append(i[:-2] + 'o')
if i[:2] == 'ha':
result.append(i[1:])
transform.append(list(set(result)))
return _FUZZY_NORMALIZE(transform, corpus)
def spell(corpus):
"""
Train a Spelling Normalizer
Parameters
----------
corpus : list of strings. Prefer to feed with malaya.load_malay_dictionary()
Returns
-------
SPELL_NORMALIZE: Trained malaya.normalizer._SPELL_NORMALIZE class
"""
assert isinstance(corpus, list) and isinstance(
corpus[0], str
), 'input must be list of strings'
return _SPELL_NORMALIZE(corpus)
def basic(string):
"""
Use basic rules-based to normalize a string.
Parameters
----------
string: str
Returns
-------
string: normalized string
"""
assert isinstance(string, str), 'input must be a string'
result = []
for word in normalizer_textcleaning(string).split():
if word.istitle():
result.append(word)
continue
if word in sounds:
result.append(sounds[word])
elif word[-1] == '2':
result.append(word[:-1])
else:
result.append(word)
return ' '.join(result)
def deep_model():
"""
Load deep-learning model to normalize a string. This model totally more sucks than fuzzy based, Husein still need to read more.
Returns
-------
DEEP_NORMALIZER: malaya.normalizer._DEEP_NORMALIZER class
"""
check_file(PATH_NORMALIZER['deep'], S3_PATH_NORMALIZER['deep'])
try:
with open(PATH_NORMALIZER['deep']['setting'], 'r') as fopen:
dic_normalizer = json.load(fopen)
g = load_graph(PATH_NORMALIZER['deep']['model'])
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('normalizer') and try again"
)
return _DEEP_NORMALIZER(
g.get_tensor_by_name('import/Placeholder:0'),
g.get_tensor_by_name('import/logits:0'),
tf.InteractiveSession(graph = g),
dic_normalizer,
)
|
the-stack_0_12004 | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://logging.googleapis.com/v2/'
DOCS_URL = 'https://cloud.google.com/logging/docs/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
BILLINGACCOUNTS = (
'billingAccounts',
'billingAccounts/{billingAccountsId}',
{},
[u'billingAccountsId'],
True
)
BILLINGACCOUNTS_BUCKETS = (
'billingAccounts.buckets',
'{+name}',
{
'':
'billingAccounts/{billingAccountsId}/buckets/{bucketsId}',
},
[u'name'],
True
)
BILLINGACCOUNTS_EXCLUSIONS = (
'billingAccounts.exclusions',
'{+name}',
{
'':
'billingAccounts/{billingAccountsId}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
BILLINGACCOUNTS_SINKS = (
'billingAccounts.sinks',
'{+sinkName}',
{
'':
'billingAccounts/{billingAccountsId}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
BUCKETS = (
'buckets',
'{+name}',
{
'':
'{v2Id}/{v2Id1}/buckets/{bucketsId}',
},
[u'name'],
True
)
EXCLUSIONS = (
'exclusions',
'{+name}',
{
'':
'{v2Id}/{v2Id1}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
FOLDERS = (
'folders',
'folders/{foldersId}',
{},
[u'foldersId'],
True
)
FOLDERS_EXCLUSIONS = (
'folders.exclusions',
'{+name}',
{
'':
'folders/{foldersId}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
FOLDERS_LOCATIONS = (
'folders.locations',
'folders/{foldersId}/locations/{locationsId}',
{},
[u'foldersId', u'locationsId'],
True
)
FOLDERS_LOCATIONS_BUCKETS = (
'folders.locations.buckets',
'{+name}',
{
'':
'folders/{foldersId}/locations/{locationsId}/buckets/'
'{bucketsId}',
},
[u'name'],
True
)
FOLDERS_SINKS = (
'folders.sinks',
'{+sinkName}',
{
'':
'folders/{foldersId}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
ORGANIZATIONS = (
'organizations',
'organizations/{organizationsId}',
{},
[u'organizationsId'],
True
)
ORGANIZATIONS_EXCLUSIONS = (
'organizations.exclusions',
'{+name}',
{
'':
'organizations/{organizationsId}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
ORGANIZATIONS_LOCATIONS = (
'organizations.locations',
'organizations/{organizationsId}/locations/{locationsId}',
{},
[u'organizationsId', u'locationsId'],
True
)
ORGANIZATIONS_LOCATIONS_BUCKETS = (
'organizations.locations.buckets',
'{+name}',
{
'':
'organizations/{organizationsId}/locations/{locationsId}/'
'buckets/{bucketsId}',
},
[u'name'],
True
)
ORGANIZATIONS_SINKS = (
'organizations.sinks',
'{+sinkName}',
{
'':
'organizations/{organizationsId}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
[u'projectsId'],
True
)
PROJECTS_EXCLUSIONS = (
'projects.exclusions',
'{+name}',
{
'':
'projects/{projectsId}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'projects/{projectsId}/locations/{locationsId}',
{},
[u'projectsId', u'locationsId'],
True
)
PROJECTS_LOCATIONS_BUCKETS = (
'projects.locations.buckets',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/buckets/'
'{bucketsId}',
},
[u'name'],
True
)
PROJECTS_METRICS = (
'projects.metrics',
'{+metricName}',
{
'':
'projects/{projectsId}/metrics/{metricsId}',
},
[u'metricName'],
True
)
PROJECTS_SINKS = (
'projects.sinks',
'{+sinkName}',
{
'':
'projects/{projectsId}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
SINKS = (
'sinks',
'{+sinkName}',
{
'':
'{v2Id}/{v2Id1}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
the-stack_0_12006 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Matching functions'''
import numpy as np
import numba
from .exceptions import ParameterError
from .utils import valid_intervals
__all__ = ['match_intervals', 'match_events']
@numba.jit(nopython=True, cache=True)
def __jaccard(int_a, int_b): # pragma: no cover
'''Jaccard similarity between two intervals
Parameters
----------
int_a, int_b : np.ndarrays, shape=(2,)
Returns
-------
Jaccard similarity between intervals
'''
ends = [int_a[1], int_b[1]]
if ends[1] < ends[0]:
ends.reverse()
starts = [int_a[0], int_b[0]]
if starts[1] < starts[0]:
starts.reverse()
intersection = ends[0] - starts[1]
if intersection < 0:
intersection = 0.
union = ends[1] - starts[0]
if union > 0:
return intersection / union
return 0.0
@numba.jit(nopython=True, cache=True)
def __match_interval_overlaps(query, intervals_to, candidates): # pragma: no cover
'''Find the best Jaccard match from query to candidates'''
best_score = -1
best_idx = -1
for idx in candidates:
score = __jaccard(query, intervals_to[idx])
if score > best_score:
best_score, best_idx = score, idx
return best_idx
@numba.jit(nopython=True, cache=True)
def __match_intervals(intervals_from, intervals_to, strict=True): # pragma: no cover
'''Numba-accelerated interval matching algorithm.
'''
# sort index of the interval starts
start_index = np.argsort(intervals_to[:, 0])
# sort index of the interval ends
end_index = np.argsort(intervals_to[:, 1])
# and sorted values of starts
start_sorted = intervals_to[start_index, 0]
# and ends
end_sorted = intervals_to[end_index, 1]
search_ends = np.searchsorted(start_sorted, intervals_from[:, 1], side='right')
search_starts = np.searchsorted(end_sorted, intervals_from[:, 0], side='left')
output = np.empty(len(intervals_from), dtype=numba.uint32)
for i in range(len(intervals_from)):
query = intervals_from[i]
# Find the intervals that start after our query ends
after_query = search_ends[i]
# And the intervals that end after our query begins
before_query = search_starts[i]
# Candidates for overlapping have to (end after we start) and (begin before we end)
candidates = set(start_index[:after_query]) & set(end_index[before_query:])
# Proceed as before
if len(candidates) > 0:
output[i] = __match_interval_overlaps(query, intervals_to, candidates)
elif strict:
# Numba only lets us use compile-time constants in exception messages
raise ParameterError
else:
# Find the closest interval
# (start_index[after_query] - query[1]) is the distance to the next interval
# (query[0] - end_index[before_query])
dist_before = np.inf
dist_after = np.inf
if search_starts[i] > 0:
dist_before = query[0] - end_sorted[search_starts[i]-1]
if search_ends[i] + 1 < len(intervals_to):
dist_after = start_sorted[search_ends[i]+1] - query[1]
if dist_before < dist_after:
output[i] = end_index[search_starts[i]-1]
else:
output[i] = start_index[search_ends[i]+1]
return output
def match_intervals(intervals_from, intervals_to, strict=True):
'''Match one set of time intervals to another.
This can be useful for tasks such as mapping beat timings
to segments.
Each element `[a, b]` of `intervals_from` is matched to the
element `[c, d]` of `intervals_to` which maximizes the
Jaccard similarity between the intervals:
`max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|`
In `strict=True` mode, if there is no interval with positive
intersection with `[a,b]`, an exception is thrown.
In `strict=False` mode, any interval `[a, b]` that has no
intersection with any element of `intervals_to` is instead
matched to the interval `[c, d]` which minimizes
`min(|b - c|, |a - d|)`
that is, the disjoint interval `[c, d]` with a boundary closest
to `[a, b]`.
.. note:: An element of `intervals_to` may be matched to multiple
entries of `intervals_from`.
Parameters
----------
intervals_from : np.ndarray [shape=(n, 2)]
The time range for source intervals.
The `i` th interval spans time `intervals_from[i, 0]`
to `intervals_from[i, 1]`.
`intervals_from[0, 0]` should be 0, `intervals_from[-1, 1]`
should be the track duration.
intervals_to : np.ndarray [shape=(m, 2)]
Analogous to `intervals_from`.
strict : bool
If `True`, intervals can only match if they intersect.
If `False`, disjoint intervals can match.
Returns
-------
interval_mapping : np.ndarray [shape=(n,)]
For each interval in `intervals_from`, the
corresponding interval in `intervals_to`.
See Also
--------
match_events
Raises
------
ParameterError
If either array of input intervals is not the correct shape
If `strict=True` and some element of `intervals_from` is disjoint from
every element of `intervals_to`.
Examples
--------
>>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])
>>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])
>>> librosa.util.match_intervals(ints_from, ints_to)
array([2, 1, 2], dtype=uint32)
>>> # [3, 5] => [4, 5] (ints_to[2])
>>> # [1, 4] => [1, 3] (ints_to[1])
>>> # [4, 5] => [4, 5] (ints_to[2])
The reverse matching of the above is not possible in `strict` mode
because `[6, 7]` is disjoint from all intervals in `ints_from`.
With `strict=False`, we get the following:
>>> librosa.util.match_intervals(ints_to, ints_from, strict=False)
array([1, 1, 2, 2], dtype=uint32)
>>> # [0, 2] => [1, 4] (ints_from[1])
>>> # [1, 3] => [1, 4] (ints_from[1])
>>> # [4, 5] => [4, 5] (ints_from[2])
>>> # [6, 7] => [4, 5] (ints_from[2])
'''
if len(intervals_from) == 0 or len(intervals_to) == 0:
raise ParameterError('Attempting to match empty interval list')
# Verify that the input intervals has correct shape and size
valid_intervals(intervals_from)
valid_intervals(intervals_to)
try:
return __match_intervals(intervals_from, intervals_to, strict=strict)
except ParameterError as exc:
raise ParameterError('Unable to match intervals with strict={}'.format(strict)) from exc
def match_events(events_from, events_to, left=True, right=True):
'''Match one set of events to another.
This is useful for tasks such as matching beats to the nearest
detected onsets, or frame-aligned events to the nearest zero-crossing.
.. note:: A target event may be matched to multiple source events.
Examples
--------
>>> # Sources are multiples of 7
>>> s_from = np.arange(0, 100, 7)
>>> s_from
array([ 0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91,
98])
>>> # Targets are multiples of 10
>>> s_to = np.arange(0, 100, 10)
>>> s_to
array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
>>> # Find the matching
>>> idx = librosa.util.match_events(s_from, s_to)
>>> idx
array([0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9])
>>> # Print each source value to its matching target
>>> zip(s_from, s_to[idx])
[(0, 0), (7, 10), (14, 10), (21, 20), (28, 30), (35, 30),
(42, 40), (49, 50), (56, 60), (63, 60), (70, 70), (77, 80),
(84, 80), (91, 90), (98, 90)]
Parameters
----------
events_from : ndarray [shape=(n,)]
Array of events (eg, times, sample or frame indices) to match from.
events_to : ndarray [shape=(m,)]
Array of events (eg, times, sample or frame indices) to
match against.
left : bool
right : bool
If `False`, then matched events cannot be to the left (or right)
of source events.
Returns
-------
event_mapping : np.ndarray [shape=(n,)]
For each event in `events_from`, the corresponding event
index in `events_to`.
`event_mapping[i] == arg min |events_from[i] - events_to[:]|`
See Also
--------
match_intervals
Raises
------
ParameterError
If either array of input events is not the correct shape
'''
if len(events_from) == 0 or len(events_to) == 0:
raise ParameterError('Attempting to match empty event list')
# If we can't match left or right, then only strict equivalence
# counts as a match.
if not (left or right) and not np.all(np.in1d(events_from, events_to)):
raise ParameterError('Cannot match events with left=right=False '
'and events_from is not contained '
'in events_to')
# If we can't match to the left, then there should be at least one
# target event greater-equal to every source event
if (not left) and max(events_to) < max(events_from):
raise ParameterError('Cannot match events with left=False '
'and max(events_to) < max(events_from)')
# If we can't match to the right, then there should be at least one
# target event less-equal to every source event
if (not right) and min(events_to) > min(events_from):
raise ParameterError('Cannot match events with right=False '
'and min(events_to) > min(events_from)')
# array of matched items
output = np.empty_like(events_from, dtype=np.int)
return __match_events_helper(output, events_from, events_to, left, right)
@numba.jit(nopython=True, cache=True)
def __match_events_helper(output, events_from, events_to, left=True, right=True): # pragma: no cover
# mock dictionary for events
from_idx = np.argsort(events_from)
sorted_from = events_from[from_idx]
to_idx = np.argsort(events_to)
sorted_to = events_to[to_idx]
# find the matching indices
matching_indices = np.searchsorted(sorted_to, sorted_from)
# iterate over indices in matching_indices
for ind, middle_ind in enumerate(matching_indices):
left_flag = False
right_flag = False
left_ind = -1
right_ind = len(matching_indices)
left_diff = 0
right_diff = 0
mid_diff = 0
middle_ind = matching_indices[ind]
sorted_from_num = sorted_from[ind]
# Prevent oob from chosen index
if middle_ind == len(sorted_to):
middle_ind -= 1
# Permitted to look to the left
if left and middle_ind > 0:
left_ind = middle_ind - 1
left_flag = True
# Permitted to look to right
if right and middle_ind < len(sorted_to) - 1:
right_ind = middle_ind + 1
right_flag = True
mid_diff = abs(sorted_to[middle_ind] - sorted_from_num)
if left and left_flag:
left_diff = abs(sorted_to[left_ind] - sorted_from_num)
if right and right_flag:
right_diff = abs(sorted_to[right_ind] - sorted_from_num)
if left_flag and (not right and (sorted_to[middle_ind] > sorted_from_num) or
(not right_flag and left_diff < mid_diff) or
(left_diff < right_diff and left_diff < mid_diff)):
output[ind] = to_idx[left_ind]
# Check if right should be chosen
elif right_flag and (right_diff < mid_diff):
output[ind] = to_idx[right_ind]
# Selected index wins
else:
output[ind] = to_idx[middle_ind]
# Undo sorting
solutions = np.empty_like(output)
solutions[from_idx] = output
return solutions
|
the-stack_0_12008 | """
The multigrid module provides a framework for solving elliptic
problems. A multigrid object is just a list of grids, from the finest
mesh down (by factors of two) to a single interior zone (each grid has
the same number of guardcells).
The main multigrid class is setup to solve a constant-coefficient
Helmholtz equation::
(alpha - beta L) phi = f
where L is the Laplacian and alpha and beta are constants. If alpha =
0 and beta = -1, then this is the Poisson equation.
We support Dirichlet or Neumann BCs, or a periodic domain.
The general usage is as follows::
a = multigrid.CellCenterMG2d(nx, ny, verbose=1, alpha=alpha, beta=beta)
this creates the multigrid object a, with a finest grid of nx by ny
zones and the default boundary condition types. alpha and beta are
the coefficients of the Helmholtz equation. Setting verbose = 1
causing debugging information to be output, so you can see the
residual errors in each of the V-cycles.
Initialization is done as::
a.init_zeros()
this initializes the solution vector with zeros (this is not necessary
if you just created the multigrid object, but it can be used to reset
the solution between runs on the same object).
Next::
a.init_RHS(zeros((nx, ny), numpy.float64))
this initializes the RHS on the finest grid to 0 (Laplace's equation).
Any RHS can be set by passing through an array of (nx, ny) values here.
Then to solve, you just do::
a.solve(rtol = 1.e-10)
where rtol is the desired tolerance (residual norm / source norm)
to access the final solution, use the get_solution method::
v = a.get_solution()
For convenience, the grid information on the solution level is available as
attributes to the class,
a.ilo, a.ihi, a.jlo, a.jhi are the indices bounding the interior
of the solution array (i.e. excluding the ghost cells).
a.x and a.y are the coordinate arrays
a.dx and a.dy are the grid spacings
"""
from __future__ import print_function
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import mesh.boundary as bnd
import mesh.patch as patch
from util import msg
class CellCenterMG2d(object):
"""
The main multigrid class for cell-centered data.
We require that nx = ny be a power of 2 and dx = dy, for
simplicity
"""
def __init__(self, nx, ny, ng=1,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0,
xl_BC_type="dirichlet", xr_BC_type="dirichlet",
yl_BC_type="dirichlet", yr_BC_type="dirichlet",
xl_BC=None, xr_BC=None,
yl_BC=None, yr_BC=None,
alpha=0.0, beta=-1.0,
nsmooth=10, nsmooth_bottom=50,
verbose=0,
aux_field=None, aux_bc=None,
true_function=None, vis=0, vis_title=""):
"""
Create the CellCenterMG2d object. Note that this requires a
grid to be a power of 2 in size and square.
Parameters
----------
nx : int
number of cells in x-direction
ny : int
number of cells in y-direction.
xmin : float, optional
minimum physical coordinate in x-direction
xmax : float, optional
maximum physical coordinate in x-direction
ymin : float, optional
minimum physical coordinate in y-direction
ymax : float, optional
maximum physical coordinate in y-direction
xl_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional
boundary condition to enforce on lower x face
xr_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional
boundary condition to enforce on upper x face
yl_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional
boundary condition to enforce on lower y face
yr_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional
boundary condition to enforce on upper y face
xl_BC : function, optional
function (of y) to call to get -x boundary values
(homogeneous assumed otherwise)
xr_BC : function, optional
function (of y) to call to get +x boundary values
(homogeneous assumed otherwise)
yl_BC : function, optional
function (of x) to call to get -y boundary values
(homogeneous assumed otherwise)
yr_BC : function, optional
function (of x) to call to get +y boundary values
(homogeneous assumed otherwise)
alpha : float, optional
coefficient in Helmholtz equation (alpha - beta L) phi = f
beta : float, optional
coefficient in Helmholtz equation (alpha - beta L) phi = f
nsmooth : int, optional
number of smoothing iterations to be done at each intermediate
level in the V-cycle (up and down)
nsmooth_bottom : int, optional
number of smoothing iterations to be done during the bottom
solve
verbose : int, optional
increase verbosity during the solve (for verbose=1)
aux_field : list of str, optional
extra fields to define and carry at each level.
Useful for subclassing.
aux_bc : list of BC objects, optional
the boundary conditions corresponding to the aux fields
true_function : function, optional
a function (of x,y) that provides the exact solution to
the elliptic problem we are solving. This is used only
for visualization purposes
vis : int, optional
output a detailed visualization of every smoothing step
all throughout the V-cycle (if vis=1)
vis_title : string, optional
a descriptive title to write on the visualization plots
Returns
-------
out: CellCenterMG2d object
"""
if nx != ny:
raise ValueError("ERROR: multigrid currently requires nx = ny")
self.nx = nx
self.ny = ny
self.ng = ng
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
if (xmax-xmin) != (ymax-ymin):
raise ValueError("ERROR: multigrid currently requires a square domain")
self.alpha = alpha
self.beta = beta
self.nsmooth = nsmooth
self.nsmooth_bottom = nsmooth_bottom
self.max_cycles = 100
self.verbose = verbose
# for visualization purposes, we can set a function name that
# provides the true solution to our elliptic problem.
if true_function is not None:
self.true_function = true_function
# a small number used in computing the error, so we don't divide by 0
self.small = 1.e-16
# keep track of whether we've initialized the RHS
self.initialized_rhs = 0
# assume that self.nx = 2^(nlevels-1) and that nx = ny
# this defines nlevels such that we end exactly on a 2x2 grid
self.nlevels = int(math.log(self.nx)/math.log(2.0))
# a multigrid object will be a list of grids
self.grids = []
# create the grids. Here, self.grids[0] will be the coarsest
# grid and self.grids[nlevel-1] will be the finest grid
# we store the solution, v, the rhs, f.
# create the boundary condition object
bc = bnd.BC(xlb=xl_BC_type, xrb=xr_BC_type,
ylb=yl_BC_type, yrb=yr_BC_type)
nx_t = ny_t = 2
for i in range(self.nlevels):
# create the grid
my_grid = patch.Grid2d(nx_t, ny_t, ng=self.ng,
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# add a CellCenterData2d object for this level to our list
self.grids.append(patch.CellCenterData2d(my_grid, dtype=np.float64))
# create the phi BC object -- this only applies for the finest
# level. On the coarser levels, phi represents the residual,
# which has homogeneous BCs
bc_p = bnd.BC(xlb=xl_BC_type, xrb=xr_BC_type,
ylb=yl_BC_type, yrb=yr_BC_type,
xl_func=xl_BC, xr_func=xr_BC,
yl_func=yl_BC, yr_func=yr_BC, grid=my_grid)
if i == self.nlevels-1:
self.grids[i].register_var("v", bc_p)
else:
self.grids[i].register_var("v", bc)
self.grids[i].register_var("f", bc)
self.grids[i].register_var("r", bc)
if aux_field is not None:
for f, b in zip(aux_field, aux_bc):
self.grids[i].register_var(f, b)
self.grids[i].create()
if self.verbose:
print(self.grids[i])
nx_t = nx_t*2
ny_t = ny_t*2
# provide coordinate and indexing information for the solution mesh
soln_grid = self.grids[self.nlevels-1].grid
self.ilo = soln_grid.ilo
self.ihi = soln_grid.ihi
self.jlo = soln_grid.jlo
self.jhi = soln_grid.jhi
self.x = soln_grid.x
self.dx = soln_grid.dx
self.x2d = soln_grid.x2d
self.y = soln_grid.y
self.dy = soln_grid.dy # note, dy = dx is assumed
self.y2d = soln_grid.y2d
self.soln_grid = soln_grid
# store the source norm
self.source_norm = 0.0
# after solving, keep track of the number of cycles taken, the
# relative error from the previous cycle, and the residual error
# (normalized to the source norm)
self.num_cycles = 0
self.residual_error = 1.e33
self.relative_error = 1.e33
# keep track of where we are in the V
self.current_cycle = -1
self.current_level = -1
self.up_or_down = ""
# for visualization -- what frame are we outputting?
self.vis = vis
self.vis_title = vis_title
self.frame = 0
# these draw functions are for visualization purposes and are
# not ordinarily used, except for plotting the progression of the
# solution within the V
def _draw_V(self):
""" draw the V-cycle on our optional visualization """
xdown = np.linspace(0.0, 0.5, self.nlevels)
xup = np.linspace(0.5, 1.0, self.nlevels)
ydown = np.linspace(1.0, 0.0, self.nlevels)
yup = np.linspace(0.0, 1.0, self.nlevels)
plt.plot(xdown, ydown, lw=2, color="k")
plt.plot(xup, yup, lw=2, color="k")
plt.scatter(xdown, ydown, marker="o", color="k", s=40)
plt.scatter(xup, yup, marker="o", color="k", s=40)
if self.up_or_down == "down":
plt.scatter(xdown[self.nlevels-self.current_level-1],
ydown[self.nlevels-self.current_level-1],
marker="o", color="r", zorder=100, s=38)
else:
plt.scatter(xup[self.current_level], yup[self.current_level],
marker="o", color="r", zorder=100, s=38)
plt.text(0.7, 0.1, "V-cycle %d" % (self.current_cycle))
plt.axis("off")
def _draw_solution(self):
""" plot the current solution on our optional visualization """
myg = self.grids[self.current_level].grid
v = self.grids[self.current_level].get_var("v")
cm = "viridis"
plt.imshow(np.transpose(v[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1]),
interpolation="nearest", origin="lower",
extent=[self.xmin, self.xmax, self.ymin, self.ymax], cmap=cm)
#plt.xlabel("x")
plt.ylabel("y")
if self.current_level == self.nlevels-1:
plt.title(r"solving $L\phi = f$")
else:
plt.title(r"solving $Le = r$")
formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
cb = plt.colorbar(format=formatter, shrink=0.5)
cb.ax.yaxis.offsetText.set_fontsize("small")
cl = plt.getp(cb.ax, 'ymajorticklabels')
plt.setp(cl, fontsize="small")
def _draw_main_solution(self):
"""
plot the solution at the finest level on our optional
visualization
"""
myg = self.grids[self.nlevels-1].grid
v = self.grids[self.nlevels-1].get_var("v")
cm = "viridis"
plt.imshow(np.transpose(v[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1]),
interpolation="nearest", origin="lower",
extent=[self.xmin, self.xmax, self.ymin, self.ymax], cmap=cm)
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"current fine grid solution")
formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
cb = plt.colorbar(format=formatter, shrink=0.5)
cb.ax.yaxis.offsetText.set_fontsize("small")
cl = plt.getp(cb.ax, 'ymajorticklabels')
plt.setp(cl, fontsize="small")
def _draw_main_error(self):
"""
plot the error with respect to the true solution on our optional
visualization
"""
myg = self.grids[self.nlevels-1].grid
v = self.grids[self.nlevels-1].get_var("v")
e = v - self.true_function(myg.x2d, myg.y2d)
cmap = "viridis"
plt.imshow(np.transpose(e[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1]),
interpolation="nearest", origin="lower",
extent=[self.xmin, self.xmax, self.ymin, self.ymax], cmap=cmap)
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"current fine grid error")
formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
cb = plt.colorbar(format=formatter, shrink=0.5)
cb.ax.yaxis.offsetText.set_fontsize("small")
cl = plt.getp(cb.ax, 'ymajorticklabels')
plt.setp(cl, fontsize="small")
def grid_info(self, level, indent=0):
"""
Report simple grid information
"""
print("{}level: {}, grid: {} x {}".format(
indent*" ", level, self.grids[level].grid.nx, self.grids[level].grid.ny))
def get_solution(self, grid=None):
"""
Return the solution after doing the MG solve
If a grid object is passed in, then the solution is put on that
grid -- not the passed in grid must have the same dx and dy
Returns
-------
out : ndarray
"""
v = self.grids[self.nlevels-1].get_var("v")
if grid is None:
return v.copy()
else:
myg = self.soln_grid
assert grid.dx == myg.dx and grid.dy == myg.dy
sol = grid.scratch_array()
sol.v(buf=1)[:, :] = v.v(buf=1)
return sol
def get_solution_gradient(self, grid=None):
"""
Return the gradient of the solution after doing the MG solve. The
x- and y-components are returned in separate arrays.
If a grid object is passed in, then the gradient is computed on that
grid. Note: the passed-in grid must have the same dx, dy
Returns
-------
out : ndarray, ndarray
"""
myg = self.soln_grid
if grid is None:
og = self.soln_grid
else:
og = grid
assert og.dx == myg.dx and og.dy == myg.dy
v = self.grids[self.nlevels-1].get_var("v")
gx = og.scratch_array()
gy = og.scratch_array()
gx.v()[:, :] = 0.5*(v.ip(1) - v.ip(-1))/myg.dx
gy.v()[:, :] = 0.5*(v.jp(1) - v.jp(-1))/myg.dy
return gx, gy
def get_solution_object(self):
"""
Return the full solution data object at the finest resolution
after doing the MG solve
Returns
-------
out : CellCenterData2d object
"""
return self.grids[self.nlevels-1]
def init_solution(self, data):
"""
Initialize the solution to the elliptic problem by passing in
a value for all defined zones
Parameters
----------
data : ndarray
An array (of the same size as the finest MG level) with the
values to initialize the solution to the elliptic problem.
"""
v = self.grids[self.nlevels-1].get_var("v")
v[:, :] = data.copy()
def init_zeros(self):
"""
Set the initial solution to zero
"""
v = self.grids[self.nlevels-1].get_var("v")
v[:, :] = 0.0
def init_RHS(self, data):
"""
Initialize the right hand side, f, of the Helmholtz equation
(alpha - beta L) phi = f
Parameters
----------
data : ndarray
An array (of the same size as the finest MG level) with the
values to initialize the solution to the elliptic problem.
"""
f = self.grids[self.nlevels-1].get_var("f")
f[:, :] = data.copy()
# store the source norm
self.source_norm = f.norm()
if self.verbose:
print("Source norm = ", self.source_norm)
self.initialized_rhs = 1
def _compute_residual(self, level):
""" compute the residual and store it in the r variable"""
v = self.grids[level].get_var("v")
f = self.grids[level].get_var("f")
r = self.grids[level].get_var("r")
myg = self.grids[level].grid
# compute the residual
# r = f - alpha phi + beta L phi
r.v()[:, :] = f.v()[:, :] - self.alpha*v.v()[:, :] + \
self.beta*((v.ip(-1) + v.ip(1) - 2*v.v())/myg.dx**2 +
(v.jp(-1) + v.jp(1) - 2*v.v())/myg.dy**2)
def smooth(self, level, nsmooth):
"""
Use red-black Gauss-Seidel iterations to smooth the solution
at a given level. This is used at each stage of the V-cycle
(up and down) in the MG solution, but it can also be called
directly to solve the elliptic problem (although it will take
many more iterations).
Parameters
----------
level : int
The level in the MG hierarchy to smooth the solution
nsmooth : int
The number of r-b Gauss-Seidel smoothing iterations to perform
"""
v = self.grids[level].get_var("v")
f = self.grids[level].get_var("f")
myg = self.grids[level].grid
self.grids[level].fill_BC("v")
xcoeff = self.beta/myg.dx**2
ycoeff = self.beta/myg.dy**2
# do red-black G-S
for i in range(nsmooth):
# do the red black updating in four decoupled groups
#
#
# | | |
# --+-------+-------+--
# | | |
# | 4 | 3 |
# | | |
# --+-------+-------+--
# | | |
# jlo | 1 | 2 |
# | | |
# --+-------+-------+--
# | ilo | |
#
# groups 1 and 3 are done together, then we need to
# fill ghost cells, and then groups 2 and 4
for n, (ix, iy) in enumerate([(0, 0), (1, 1), (1, 0), (0, 1)]):
v.ip_jp(ix, iy, s=2)[:, :] = (f.ip_jp(ix, iy, s=2) +
xcoeff*(v.ip_jp(1+ix, iy, s=2) + v.ip_jp(-1+ix, iy, s=2)) +
ycoeff*(v.ip_jp(ix, 1+iy, s=2) + v.ip_jp(ix, -1+iy, s=2))) / \
(self.alpha + 2.0*xcoeff + 2.0*ycoeff)
if n == 1 or n == 3:
self.grids[level].fill_BC("v")
if self.vis == 1:
plt.clf()
plt.subplot(221)
self._draw_solution()
plt.subplot(222)
self._draw_V()
plt.subplot(223)
self._draw_main_solution()
plt.subplot(224)
self._draw_main_error()
plt.suptitle(self.vis_title, fontsize=18)
plt.pause(0.001)
plt.draw()
plt.savefig("mg_%4.4d.png" % (self.frame))
self.frame += 1
def solve(self, rtol=1.e-11):
"""
The main driver for the multigrid solution of the Helmholtz
equation. This controls the V-cycles, smoothing at each
step of the way and uses simple smoothing at the coarsest
level to perform the bottom solve.
Parameters
----------
rtol : float
The relative tolerance (residual norm / source norm) to
solve to. Note that if the source norm is 0 (e.g. the
righthand side of our equation is 0), then we just use
the norm of the residual.
"""
# start by making sure that we've initialized the RHS
if not self.initialized_rhs:
msg.fail("ERROR: RHS not initialized")
if self.verbose:
print("source norm = ", self.source_norm)
old_phi = self.grids[self.nlevels-1].get_var("v").copy()
residual_error = 1.e33
cycle = 1
# V-cycles until we achieve the L2 norm of the residual < rtol
while residual_error > rtol and cycle <= self.max_cycles:
self.current_cycle = cycle
# zero out the solution on all but the finest grid
for level in range(self.nlevels-1):
self.grids[level].zero("v")
if self.verbose:
print("<<< beginning V-cycle (cycle {}) >>>\n".format(cycle))
# do V-cycles through the entire hierarchy
level = self.nlevels-1
self.v_cycle(level)
# compute the error with respect to the previous solution
# this is for diagnostic purposes only -- it is not used to
# determine convergence
soln = self.grids[self.nlevels-1]
diff = (soln.get_var("v") - old_phi)/(soln.get_var("v") + self.small)
relative_error = soln.grid.norm(diff)
old_phi = soln.get_var("v").copy()
# compute the residual error, relative to the source norm
self._compute_residual(self.nlevels-1)
fp = self.grids[level]
r = fp.get_var("r")
if self.source_norm != 0.0:
residual_error = r.norm()/self.source_norm
else:
residual_error = r.norm()
if self.verbose:
print("cycle {}: relative err = {}, residual err = {}\n".format(
cycle, relative_error, residual_error))
cycle += 1
self.num_cycles = cycle-1
self.relative_error = relative_error
self.residual_error = residual_error
fp.fill_BC("v")
def v_cycle(self, level):
"""
Perform a V-cycle for a single 2-level solve. This is applied
recursively do V-cycle through the entire hierarchy.
"""
if level > 0:
self.current_level = level
self.up_or_down = "down"
# pointers to the fine and coarse data
fp = self.grids[level]
cp = self.grids[level-1]
if self.verbose:
self._compute_residual(level)
self.grid_info(level, indent=2)
print(" before G-S, residual L2: {}".format(fp.get_var("r").norm()))
# smooth on the current level
self.smooth(level, self.nsmooth)
# compute the residual
self._compute_residual(level)
if self.verbose:
print(" after G-S, residual L2: {}\n".format(fp.get_var("r").norm()))
# restrict the residual down to the RHS of the coarser level
f_coarse = cp.get_var("f")
f_coarse.v()[:, :] = fp.restrict("r").v()
# solve the coarse problem
self.v_cycle(level-1)
# ascending part
self.current_level = level
self.up_or_down = "up"
fp = self.grids[level]
cp = self.grids[level-1]
# prolong the error up from the coarse grid
e = cp.prolong("v")
# correct the solution on the current grid
v = fp.get_var("v")
v.v()[:, :] += e.v()
fp.fill_BC("v")
if self.verbose:
self._compute_residual(level)
self.grid_info(level, indent=2)
print(" before G-S, residual L2: {}".format(fp.get_var("r").norm()))
# smooth
self.smooth(level, self.nsmooth)
if self.verbose:
self._compute_residual(level)
print(" after G-S, residual L2: {}\n".format(fp.get_var("r").norm()))
else:
# bottom solve: solve the discrete coarse problem. We
# could use any number of different matrix solvers here
# (like CG), but since we are 2x2 by design at this point,
# we will just smooth
if self.verbose:
print(" bottom solve:")
self.current_level = level
bp = self.grids[level]
if self.verbose:
self.grid_info(level, indent=2)
print("")
self.smooth(level, self.nsmooth_bottom)
bp.fill_BC("v")
|
the-stack_0_12011 | #===============================================================================
# Copyright 2017-2019 Intel Corporation
# All Rights Reserved.
#
# If this software was obtained under the Intel Simplified Software License,
# the following terms apply:
#
# The source code, information and material ("Material") contained herein is
# owned by Intel Corporation or its suppliers or licensors, and title to such
# Material remains with Intel Corporation or its suppliers or licensors. The
# Material contains proprietary information of Intel or its suppliers and
# licensors. The Material is protected by worldwide copyright laws and treaty
# provisions. No part of the Material may be used, copied, reproduced,
# modified, published, uploaded, posted, transmitted, distributed or disclosed
# in any way without Intel's prior express written permission. No license under
# any patent, copyright or other intellectual property rights in the Material
# is granted to or conferred upon you, either expressly, by implication,
# inducement, estoppel or otherwise. Any license under such intellectual
# property rights must be express and approved by Intel in writing.
#
# Unless otherwise agreed by Intel in writing, you may not remove or alter this
# notice or any other notice embedded in Materials by Intel or Intel's
# suppliers or licensors in any way.
#
#
# If this software was obtained under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#
# Intel(R) Integrated Performance Primitives (Intel(R) IPP) Cryptography
#
import re
import sys
import os
import hashlib
Header = sys.argv[1] ## Intel(R) IPP Crypto dispatcher will be generated for fucntions in Header
OutDir = sys.argv[2] ## Output folder for generated files
cpulist = sys.argv[3] ## Actual CPU list: semicolon separated string
cpulist = cpulist.split(';')
headerID= False ## Header ID define to avoid multiple include like: #if !defined( __IPPCP_H__ )
from gen_disp_common import readNextFunction
HDR= open( Header, 'r' )
h= HDR.readlines()
HDR.close()
## keep filename only
(incdir, Header)= os.path.split(Header)
## original header name to declare external functions as internal for dispatcher
OrgH= Header
isFunctionFound = True
curLine = 0
FunName = ""
FunArg = ""
while (isFunctionFound == True):
result = readNextFunction(h, curLine, headerID)
curLine = result['curLine']
FunName = result['FunName']
FunArg = result['FunArg']
isFunctionFound = result['success']
if (isFunctionFound == True):
##################################################
## create dispatcher files: C file with inline asm
##################################################
filename = "jmp_{}_{}".format(FunName, hashlib.sha512(FunName.encode('utf-8')).hexdigest()[:8])
DISP= open( os.sep.join([OutDir, filename + ".asm"]), 'w' )
for cpu in cpulist:
DISP.write("EXTRN "+cpu+"_"+FunName+":PROC\n")
DISP.write("EXTRN ippcpJumpIndexForMergedLibs:DWORD\n")
DISP.write("EXTRN ippcpSafeInit:PROC\n\n")
DISP.write("_DATA SEGMENT\n\n")
DISP.write(" DQ in_"+FunName+"\n")
DISP.write(FunName+"_arraddr")
for cpu in cpulist:
DISP.write(" DQ "+cpu+"_"+FunName+"\n")
DISP.write("""
_DATA ENDS
_TEXT SEGMENT
in_{FunName} PROC PRIVATE
call ippcpSafeInit
ALIGN 16
{FunName} PROC PUBLIC
movsxd rax, DWORD PTR ippcpJumpIndexForMergedLibs
lea r10, {FunName}_arraddr
jmp qword ptr [r10+rax*8]
{FunName} ENDP
in_{FunName} ENDP
_TEXT ENDS
END
""".format(FunName=FunName))
DISP.close()
|
the-stack_0_12012 | """Tests for certbot_dns_joker.dns_joker."""
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from requests.exceptions import HTTPError
import urllib.parse
import requests
import requests_mock
from certbot.compat import os
from certbot.errors import PluginError
from certbot.plugins import dns_test_common
from certbot.plugins.dns_test_common import DOMAIN
from certbot.tests import util as test_util
FAKE_USERNAME = 'fake_username'
FAKE_PASSWORD = 'fake_password'
MOCK_ENDPOINT = 'mock://endpoint'
class AuthenticatorTest(test_util.TempDirTestCase,
dns_test_common.BaseAuthenticatorTest):
def setUp(self):
super(AuthenticatorTest, self).setUp()
from certbot_dns_joker.dns_joker import Authenticator
path = os.path.join(self.tempdir, 'file.ini')
dns_test_common.write({
# 'certbot_dns_joker:dns_joker_username': FAKE_USERNAME,
# 'certbot_dns_joker:dns_joker_password': FAKE_PASSWORD,
'joker_username': FAKE_USERNAME,
'joker_password': FAKE_PASSWORD,
}, path)
self.config = mock.MagicMock(joker_credentials=path,
joker_propagation_seconds=0) # don't wait during tests
# self.auth = Authenticator(self.config, "certbot_dns_joker:dns_joker")
self.auth = Authenticator(self.config, "joker")
self.mock_client = mock.MagicMock()
# _get_joker_client | pylint: disable=protected-access
self.auth._get_joker_client = mock.MagicMock(return_value=self.mock_client)
def test_perform(self):
self.auth.perform([self.achall])
expected = [
mock.call.add_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client.mock_calls)
def test_cleanup(self):
# _attempt_cleanup | pylint: disable=protected-access
self.auth._attempt_cleanup = True
self.auth.cleanup([self.achall])
expected = [
mock.call.del_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client.mock_calls)
class JokerClientTest(unittest.TestCase):
record_name = "_acme-challenge." + DOMAIN
record_content = "bar"
record_ttl = 42
def setUp(self):
from certbot_dns_joker.dns_joker import _JokerClient
self.client = _JokerClient(FAKE_USERNAME, FAKE_PASSWORD, DOMAIN,
self.record_ttl, endpoint=MOCK_ENDPOINT)
self.adapter = requests_mock.Adapter()
self.client.session.mount('mock://', self.adapter)
def _register_response(self, response='good', subdomain=None, additional_matcher=None, **kwargs):
def add_matcher(request):
data = urllib.parse.parse_qs(request.text)
add_result = True
if additional_matcher is not None:
add_result = additional_matcher(request)
def submatch(label):
if subdomain:
print(f'checking label:{label} subdomain:{subdomain}')
return len(label) > len(subdomain) and label[-len(subdomain)-1:] == '.' + subdomain
else:
return True
# The error message is unhelpful (NoMockAddress) if this fails.
return (
("username" in data and data["username"] == [FAKE_USERNAME]) and
("password" in data and data["password"] == [FAKE_PASSWORD]) and
("zone" in data and data["zone"] == [DOMAIN]) and
("label" in data and submatch(data["label"][0])) and
add_result
)
self.adapter.register_uri(
requests_mock.ANY,
MOCK_ENDPOINT,
text=response,
status_code=200 if response == 'good' else 400,
additional_matcher=add_matcher,
**kwargs
)
def test_add_txt_record(self):
self._register_response()
self.client.add_txt_record(
DOMAIN, self.record_name, self.record_content
)
def test_add_txt_record_fail_to_authenticate(self):
self._register_response(response='badauth')
with self.assertRaises(PluginError) as context:
self.client.add_txt_record(
DOMAIN, self.record_name, self.record_content
)
def test_add_txt_record_fail_to_find_domain(self):
self._register_response(response='nohost')
with self.assertRaises(PluginError) as context:
self.client.add_txt_record(
DOMAIN, self.record_name, self.record_content
)
def test_add_txt_record_subdomain(self):
self._register_response(subdomain='sub')
self.client.add_txt_record(
'sub.' + DOMAIN, 'challenge.sub.' + DOMAIN, self.record_content
)
def test_del_txt_record(self):
self._register_response()
self.client.del_txt_record(
DOMAIN, self.record_name, self.record_content
)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
the-stack_0_12015 | from stix2 import MemoryStore, Filter
import json
from itertools import chain
def query_all(srcs, filters):
"""return the union of a query across multiple memorystores"""
return list(chain.from_iterable(
src.query(filters) for src in srcs
))
def get_related(srcs, src_type, rel_type, target_type, reverse=False):
"""build relationship mappings
params:
srcs: memorystores for enterprise, mobile and pre-attack, in an array
src_type: source type for the relationships, e.g "attack-pattern"
rel_type: relationship type for the relationships, e.g "uses"
target_type: target type for the relationship, e.g "intrusion-set"
reverse: build reverse mapping of target to source
"""
relationships = query_all(srcs, [
Filter('type', '=', 'relationship'),
Filter('relationship_type', '=', rel_type),
Filter('revoked', '=', False)
])
# stix_id => [ ids of objects with relationships with stix_id ]
id_to_related = {}
# build the dict
for relationship in relationships:
if (src_type in relationship.source_ref and target_type in relationship.target_ref):
if (relationship.source_ref in id_to_related and not reverse) or (relationship.target_ref in id_to_related and reverse):
if not reverse:
id_to_related[relationship.source_ref].append({
"relationship": relationship,
"id": relationship.target_ref
})
else:
id_to_related[relationship.target_ref].append({
"relationship": relationship,
"id": relationship.source_ref
})
else:
if not reverse:
id_to_related[relationship.source_ref] = [{
"relationship": relationship,
"id": relationship.target_ref
}]
else:
id_to_related[relationship.target_ref] = [{
"relationship": relationship,
"id": relationship.source_ref
}]
# all objects of target type
if not reverse:
targets = query_all(srcs, [
Filter('type', '=', target_type),
Filter('revoked', '=', False)
])
else:
targets = query_all(srcs, [
Filter('type', '=', src_type),
Filter('revoked', '=', False)
])
id_to_target = {}
# build the dict
for target in targets:
id_to_target[target.id] = target
output = {}
for stix_id in id_to_related:
value = []
for related in id_to_related[stix_id]:
if not related["id"] in id_to_target:
continue # targetting a revoked object
value.append({
"object": json.loads(id_to_target[related["id"]].serialize()),
"relationship": json.loads(related["relationship"].serialize())
})
output[stix_id] = value
return output
# tool:group
def tools_used_by_groups(srcs):
"""returns group_id => {tool, relationship} for each tool used by the
group. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "tool")
def groups_using_tool(srcs):
"""returns tool_id => {group, relationship} for each group using the tool.
srcs should be an array of memorystores for enterprise, mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "tool", reverse=True)
# malware:group
def malware_used_by_groups(srcs):
"""returns group_id => {malware, relationship} for each malware used by
group. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "malware")
def groups_using_malware(srcs):
"""returns malware_id => {group, relationship} for each group using
the malware. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "malware", reverse=True)
# technique:group
def techniques_used_by_groups(srcs):
"""returns group_id => {technique, relationship} for each technique used
by the group. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "attack-pattern")
def groups_using_technique(srcs):
"""returns technique_id => {group, relationship} for each group using the
technique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "attack-pattern", reverse=True)
# technique:malware
def techniques_used_by_malware(srcs):
"""return malware => {technique, relationship} for each technique
used by the malware. srcs should be an array of memorystores for
enterprise, mobile and pre
"""
return get_related(srcs, "malware", "uses", "attack-pattern")
def malware_using_technique(srcs):
"""return technique_id => {malware, relationship} for each malware using
the technique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "malware", "uses", "attack-pattern", reverse=True)
# technique:tool
def techniques_used_by_tools(srcs):
"""return tool_id => {technique, relationship} for each technique used
by the tool. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "tool", "uses", "attack-pattern")
def tools_using_technique(srcs):
"""return technique_id => {tool, relationship} for each tool using the
technique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "tool", "uses", "attack-pattern", reverse=True)
# technique:mitigation
def mitigation_mitigates_techniques(srcs):
"""return mitigation_id => {technique, relationship} for each technique
mitigated by the mitigation. srcs should be an array of memorystores
for enterprise, mobile and pre
"""
return get_related(srcs, "course-of-action", "mitigates", "attack-pattern", reverse=False)
def technique_mitigated_by_mitigation(srcs):
"""return technique_id => {mitigation, relationship} for each mitigation
of the technique. srcs should be an array of memorystores for
enterprise, mobile and pre
"""
return get_related(srcs, "course-of-action", "mitigates", "attack-pattern", reverse=True)
# technique:technique
def technique_related_to_technique(srcs):
"""return technique_id => {technique, relationship} for each technique
related to the technique. srcs should be an array of memorystores for
enterprise, mobile and pre
"""
return get_related(srcs, "attack-pattern", "related-to", "attack-pattern")
# technique:subtechnique
def subtechniques_of(srcs):
""" return technique_id => {subtechnique, relationship} for each subtechnique
of the technique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "attack-pattern", "subtechnique-of", "attack-pattern", reverse=True)
def parent_technique_of(srcs):
""" return subtechnique_id => {technique, relationship} describing the parent technique
of the subtechnique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "attack-pattern", "subtechnique-of", "attack-pattern")
def load(url):
"""Load stix data from file"""
src = MemoryStore()
src.load_from_file(url)
return src
|
the-stack_0_12017 | # resources/srx/addrbook_finder.py
import netaddr
#from .zone import Zone
#from .addrbook import ZoneAddrBook
class AddrBookFinderResults(object):
"""
Helper-class to hold the results of a :ZoneAddrFind.find(): invocation
"""
def __init__(self, ab, find, results):
self._ab = ab
self._find = find
self._results = results
self.sets = []
@property
def lpm(self):
"""
The longest-prefix-matching address is the last one in the results
list. This fact is a result of the :ZoneAddrFinder.find(): sorted call
"""
return self._results[-1][0]
@property
def items(self):
"""
Return a list of the matching address items and sets
"""
return self.addrs + self.sets
@property
def addrs(self):
"""
Return a list of the matching address items
"""
# return a list of names
return [x[0] for x in self._results]
@property
def matching(self):
"""
Returns the string value of the original querried address presented to
the find() method
"""
return self._find
def __repr__(self):
"""
Provides the matching value and the zone name associated with this
results
"""
return "%s(%s in %s)" % (
self.__class__.__name__, self._find, self._ab.name)
class AddrBookFinder(object):
# -------------------------------------------------------------------------
# CONSTRUCTOR
# -------------------------------------------------------------------------
def __init__(self, addr_book):
"""
addr_book
Either a ZoneAddrBook or SharedAddrBook instance
"""
self._ab = addr_book
self._index = None
def __repr__(self):
return "AddrBookFinder(%s)" % self._ab.name
def compile(self):
"""
Compile a list of netaddr objects against the catalog of address items
"""
# create a tuple of (addr-name, netaddr) for each of the items in the
# address-book
self._index = [(name, netaddr.IPNetwork(addr['ip_prefix']))
for name, addr in self._ab.addr.catalog.items()]
def find(self, addr, sets=True):
"""
Given an ip or ip_prefix locate the matching address book address
and address-set items.
"""
# if the caller hasn't explicity invoked :compile(): to create the
# netaddr objects, then do that now.
if self._index is None:
self.compile()
# convert the provided :addr: into a netaddr object and then
# to a subnet match to find address entries. the matching
# values will be sorted with longest prefix matching to be
# last in the list
ip = netaddr.IPNetwork(addr).ip
# is ip in the subnet?
in_net = lambda i: ip & i[1].netmask == i[1].network
# used to sort by prefix-length
by_pflen = lambda a, b: cmp(a[1].prefixlen, b[1].prefixlen)
r = sorted(
filter(
in_net,
self._index),
cmp=by_pflen) # find/sort
if r is None:
return None
# now that we have some matching entries, we should find which
# address-set items uses the items
results = AddrBookFinderResults(self._ab, addr, r)
if sets is True:
results.sets = self.find_sets(results)
# return the results object
return results
def find_sets(self, r):
"""
Given a :AddrBookFinderResults: object, which contains the list of
matching address items, locate the list of address-set objects that
use those items
"""
catalog = self._ab.set.catalog
in_addr = lambda i: i in v['addr_list']
sets = [k for k, v in catalog.items() if filter(in_addr, r.addrs)]
in_set = lambda i: i in v['set_list']
subsets = [k for k, v in catalog.items() if filter(in_set, sets)]
return sets + subsets
|
the-stack_0_12018 | # dataset settings
dataset_type = "PascalContextDataset"
data_root = "data/VOCdevkit/VOC2010/"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
img_scale = (512, 512)
crop_size = (512, 512)
max_ratio = 8
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations"),
dict(type="Resize", img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type="RandomCrop", crop_size=crop_size, cat_max_ratio=0.75),
dict(type="RandomFlip", prob=0.5),
dict(type="PhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size=crop_size, pad_val=0, seg_pad_val=255),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img", "gt_semantic_seg"]),
]
val_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(512 * max_ratio, 512),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(512 * max_ratio, 512),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir="JPEGImages",
ann_dir="SegmentationClassContext",
split="ImageSets/SegmentationContext/train.txt",
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir="JPEGImages",
ann_dir="SegmentationClassContext",
split="ImageSets/SegmentationContext/val.txt",
pipeline=val_pipeline,
),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir="JPEGImages",
ann_dir="SegmentationClassContext",
split="ImageSets/SegmentationContext/val.txt",
pipeline=test_pipeline,
),
)
|
the-stack_0_12019 | # encoding: utf8
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
import cities_light.models
class Migration(migrations.Migration):
dependencies = [
('cities_light', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name_ascii', models.CharField(db_index=True, max_length=200, blank=True)),
('slug', autoslug.fields.AutoSlugField(editable=False)),
('geoname_id', models.IntegerField(unique=True, null=True, blank=True)),
('alternate_names', models.TextField(default='', null=True, blank=True)),
('name', models.CharField(max_length=200, db_index=True)),
('display_name', models.CharField(max_length=200)),
('search_names', cities_light.models.ToSearchTextField(default='', max_length=4000, db_index=True, blank=True)),
('latitude', models.DecimalField(null=True, max_digits=8, decimal_places=5, blank=True)),
('longitude', models.DecimalField(null=True, max_digits=8, decimal_places=5, blank=True)),
('region', models.ForeignKey(to_field='id', blank=True, to='cities_light.Region', null=True)),
('country', models.ForeignKey(to='cities_light.Country', to_field='id')),
('population', models.BigIntegerField(db_index=True, null=True, blank=True)),
('feature_code', models.CharField(db_index=True, max_length=10, null=True, blank=True)),
],
options={
'ordering': ['name'],
'unique_together': set([('region', 'name'), ('region', 'slug')]),
'abstract': False,
'verbose_name_plural': 'cities',
},
bases=(models.Model,),
),
]
|
the-stack_0_12022 | #############################################################
## ##
## Copyright (c) 2003-2017 by The University of Queensland ##
## Centre for Geoscience Computing ##
## http://earth.uq.edu.au/centre-geoscience-computing ##
## ##
## Primary Business: Brisbane, Queensland, Australia ##
## Licensed under the Open Software License version 3.0 ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
#############################################################
"""
Defines the L{ImageFormat} class and functions for mapping a
file name extension to an associated C{ImageFormat} object.
"""
import os
import os.path
class ImageFormat(object):
"""
Class representing an image format.
"""
def __init__(self, name):
"""
Constructor.
@type name: str
@param name: Name assigned to this image format.
"""
self.name = name
def getName(self):
"""
Returns the name associated with this image format.
@rtype: str
"""
return self.name
def __str__(self):
return self.getName()
PNG = ImageFormat("PNG")
PNM = ImageFormat("PNM")
_nameFormatDict = dict()
_nameFormatDict[str.upper(str(PNG))] = PNG
_nameFormatDict[str.upper(str(PNM))] = PNM
def _getDelimitedFormatNameString():
return ", ".join(map(str,list(_nameFormatDict.keys())))
def getFormatFromName(formatName, ext=None):
"""
Returns the C{{ImageFormat}} object which corresponds
to a specified image-format name (string).
@type formatName: str
@param formatName: The name of an image format, one of: {0:s}
@type ext: str
@param ext: File name extension for error message string.
""".format(_getDelimitedFormatNameString())
if str.upper(formatName) in _nameFormatDict:
return _nameFormatDict[str.upper(formatName)]
raise \
ValueError(
(
"No image format found which matched extension '{0:s}';" +
" valid image file formats are: {1:s}"
).format(ext, _getDelimitedFormatNameString())
)
def getFormatFromExtension(fileName):
"""
Returns the C{ImageFormat} object which corresponds
to a specified file name. Uses the C{fileName} extension
to try and deduce the corresponding C{ImageFormat} object.
@type fileName: str
@param fileName: A file name.
@rtype: C{ImageFormat}
@return: An C{ImageFormat} object corresponding to the
specified file name (and corresponding file name extension).
"""
(base, ext) = os.path.splitext(fileName)
if (len(ext) > 0):
formatName = str.lstrip(ext, ".")
else:
raise ValueError(
"Could not determine image format from file "
+
"name " + fileName + ", no extension."
)
return getFormatFromName(formatName, ext)
|
the-stack_0_12023 | from InquirerPy.utils import color_print
import sys, psutil, time, cursor, valclient, ctypes, traceback, os, subprocess
from .utilities.killable_thread import Thread
from .utilities.config.app_config import Config
from .utilities.config.modify_config import Config_Editor
from .utilities.processes import Processes
from .utilities.rcs import Riot_Client_Services
from .utilities.systray import Systray
from .utilities.version_checker import Checker
from .utilities.logging import Logger
from .utilities.program_data import Program_Data
from .localization.localization import Localizer
from .presence.presence import Presence
from .webserver import server
# weird console window management stuff
kernel32 = ctypes.WinDLL('kernel32')
user32 = ctypes.WinDLL('user32')
hWnd = kernel32.GetConsoleWindow()
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), (0x4|0x80|0x20|0x2|0x10|0x1|0x00|0x100)) #disable inputs to console
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7) #allow for ANSI sequences
class Startup:
def __init__(self):
if not Processes.is_program_already_running():
cursor.hide()
Logger.create_logger()
Program_Data.update_file_location()
self.config = Config.fetch_config()
if "locale" in self.config.keys():
if self.config["locale"][0] == "":
config = Localizer.prompt_locale(self.config)
Config.modify_config(config)
Systray.restart()
self.installs = Program_Data.fetch_installs()
Localizer.set_locale(self.config)
self.config = Config.check_config()
Localizer.config = self.config
Logger.debug(self.config)
self.client = None
if Localizer.get_config_value("region",0) == "": # try to autodetect region on first launch
self.check_region()
ctypes.windll.kernel32.SetConsoleTitleW(f"valorant-rpc {Localizer.get_config_value('version')}")
color_print([("Red", Localizer.get_localized_text("prints","startup","wait_for_rpc"))])
try:
self.presence = Presence(self.config)
Startup.clear_line()
except Exception as e:
traceback.print_exc()
color_print([("Cyan",f"{Localizer.get_localized_text('prints','startup','discord_not_detected')} ({e})")])
if not Processes.are_processes_running():
color_print([("Red", Localizer.get_localized_text("prints","startup","starting_valorant"))])
self.start_game()
os._exit(1)
self.run()
def run(self):
self.presence.update_presence("startup")
Checker.check_version(self.config)
if not Processes.are_processes_running():
color_print([("Red", Localizer.get_localized_text("prints","startup","starting_valorant"))])
self.start_game()
self.setup_client()
self.systray = Systray(self.client,self.config)
self.dispatch_systray()
if self.client.fetch_presence() is None:
self.wait_for_presence()
self.check_run_cli()
self.dispatch_presence()
self.dispatch_webserver()
color_print([("LimeGreen",f"{Localizer.get_localized_text('prints','startup','startup_successful')}\n")])
time.sleep(5)
user32.ShowWindow(hWnd, 0) #hide window
self.systray_thread.join()
self.presence_thread.stop()
def dispatch_webserver(self):
server.client = self.client
server.config = self.config
self.webserver_thread = Thread(target=server.start,daemon=True)
self.webserver_thread.start()
def dispatch_presence(self):
self.presence_thread = Thread(target=self.presence.main_loop,daemon=True)
self.presence_thread.start()
def dispatch_systray(self):
self.systray_thread = Thread(target=self.systray.run)
self.systray_thread.start()
def setup_client(self):
self.client = valclient.Client(region=Localizer.get_config_value("region",0))
self.client.activate()
self.presence.client = self.client
def wait_for_presence(self):
presence_timeout = Localizer.get_config_value("startup","presence_timeout")
presence_timer = 0
print()
while self.client.fetch_presence() is None:
Startup.clear_line()
color_print([("Cyan", "["),("White",f"{presence_timer}"),("Cyan", f"] {Localizer.get_localized_text('prints','startup','waiting_for_presence')}")])
presence_timer += 1
if presence_timer >= presence_timeout:
self.systray.exit()
os._exit(1)
time.sleep(1)
Startup.clear_line()
Startup.clear_line()
def start_game(self):
path = Riot_Client_Services.get_rcs_path()
launch_timeout = Localizer.get_config_value("startup","game_launch_timeout")
launch_timer = 0
psutil.subprocess.Popen([path, "--launch-product=valorant", "--launch-patchline=live"])
print()
while not Processes.are_processes_running():
Startup.clear_line()
color_print([("Cyan", "["),("White",f"{launch_timer}"),("Cyan", f"] {Localizer.get_localized_text('prints','startup','waiting_for_valorant')}")])
launch_timer += 1
if launch_timer >= launch_timeout:
self.systray.exit()
os._exit(1)
time.sleep(1)
Startup.clear_line()
def check_run_cli(self):
if Localizer.get_config_value("startup","auto_launch_skincli"):
skincli_path = self.installs.get("valorant-skin-cli")
if skincli_path is not None:
subprocess.Popen(f"start {skincli_path}", shell=True)
def check_region(self):
color_print([("Red bold",Localizer.get_localized_text("prints","startup","autodetect_region"))])
client = valclient.Client(region="na")
client.activate()
sessions = client.riotclient_session_fetch_sessions()
for _,session in sessions.items():
if session["productId"] == "valorant":
launch_args = session["launchConfiguration"]["arguments"]
for arg in launch_args:
if "-ares-deployment" in arg:
region = arg.replace("-ares-deployment=","")
self.config[Localizer.get_config_key("region")][0] = region
Config.modify_config(self.config)
color_print([("LimeGreen",f"{Localizer.get_localized_text('prints','startup','autodetected_region')} {Localizer.get_config_value('region',0)}")])
time.sleep(5)
Systray.restart()
@staticmethod
def clear_line():
sys.stdout.write("\033[F") # move cursor up one line
sys.stdout.write("\r\033[K") |
the-stack_0_12024 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.immutable
import dns.rdata
_pows = tuple(10**i for i in range(0, 11))
# default values are in centimeters
_default_size = 100.0
_default_hprec = 1000000.0
_default_vprec = 1000.0
# for use by from_wire()
_MAX_LATITUDE = 0x80000000 + 90 * 3600000
_MIN_LATITUDE = 0x80000000 - 90 * 3600000
_MAX_LONGITUDE = 0x80000000 + 180 * 3600000
_MIN_LONGITUDE = 0x80000000 - 180 * 3600000
def _exponent_of(what, desc):
if what == 0:
return 0
exp = None
for (i, pow) in enumerate(_pows):
if what < pow:
exp = i - 1
break
if exp is None or exp < 0:
raise dns.exception.SyntaxError("%s value out of bounds" % desc)
return exp
def _float_to_tuple(what):
if what < 0:
sign = -1
what *= -1
else:
sign = 1
what = round(what * 3600000) # pylint: disable=round-builtin
degrees = int(what // 3600000)
what -= degrees * 3600000
minutes = int(what // 60000)
what -= minutes * 60000
seconds = int(what // 1000)
what -= int(seconds * 1000)
what = int(what)
return (degrees, minutes, seconds, what, sign)
def _tuple_to_float(what):
value = float(what[0])
value += float(what[1]) / 60.0
value += float(what[2]) / 3600.0
value += float(what[3]) / 3600000.0
return float(what[4]) * value
def _encode_size(what, desc):
what = int(what)
exponent = _exponent_of(what, desc) & 0xF
base = what // pow(10, exponent) & 0xF
return base * 16 + exponent
def _decode_size(what, desc):
exponent = what & 0x0F
if exponent > 9:
raise dns.exception.FormError("bad %s exponent" % desc)
base = (what & 0xF0) >> 4
if base > 9:
raise dns.exception.FormError("bad %s base" % desc)
return base * pow(10, exponent)
def _check_coordinate_list(value, low, high):
if value[0] < low or value[0] > high:
raise ValueError(f'not in range [{low}, {high}]')
if value[1] < 0 or value[1] > 59:
raise ValueError('bad minutes value')
if value[2] < 0 or value[2] > 59:
raise ValueError('bad seconds value')
if value[3] < 0 or value[3] > 999:
raise ValueError('bad milliseconds value')
if value[4] != 1 and value[4] != -1:
raise ValueError('bad hemisphere value')
@dns.immutable.immutable
class LOC(dns.rdata.Rdata):
"""LOC record"""
# see: RFC 1876
__slots__ = ['latitude', 'longitude', 'altitude', 'size',
'horizontal_precision', 'vertical_precision']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
size=_default_size, hprec=_default_hprec,
vprec=_default_vprec):
"""Initialize a LOC record instance.
The parameters I{latitude} and I{longitude} may be either a 4-tuple
of integers specifying (degrees, minutes, seconds, milliseconds),
or they may be floating point values specifying the number of
degrees. The other parameters are floats. Size, horizontal precision,
and vertical precision are specified in centimeters."""
super().__init__(rdclass, rdtype)
if isinstance(latitude, int):
latitude = float(latitude)
if isinstance(latitude, float):
latitude = _float_to_tuple(latitude)
_check_coordinate_list(latitude, -90, 90)
self.latitude = tuple(latitude)
if isinstance(longitude, int):
longitude = float(longitude)
if isinstance(longitude, float):
longitude = _float_to_tuple(longitude)
_check_coordinate_list(longitude, -180, 180)
self.longitude = tuple(longitude)
self.altitude = float(altitude)
self.size = float(size)
self.horizontal_precision = float(hprec)
self.vertical_precision = float(vprec)
def to_text(self, origin=None, relativize=True, **kw):
if self.latitude[4] > 0:
lat_hemisphere = 'N'
else:
lat_hemisphere = 'S'
if self.longitude[4] > 0:
long_hemisphere = 'E'
else:
long_hemisphere = 'W'
text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
self.latitude[0], self.latitude[1],
self.latitude[2], self.latitude[3], lat_hemisphere,
self.longitude[0], self.longitude[1], self.longitude[2],
self.longitude[3], long_hemisphere,
self.altitude / 100.0
)
# do not print default values
if self.size != _default_size or \
self.horizontal_precision != _default_hprec or \
self.vertical_precision != _default_vprec:
text += " {:0.2f}m {:0.2f}m {:0.2f}m".format(
self.size / 100.0, self.horizontal_precision / 100.0,
self.vertical_precision / 100.0
)
return text
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True,
relativize_to=None):
latitude = [0, 0, 0, 0, 1]
longitude = [0, 0, 0, 0, 1]
size = _default_size
hprec = _default_hprec
vprec = _default_vprec
latitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
latitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError(
'bad latitude seconds value')
latitude[2] = int(seconds)
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError(
'bad latitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
latitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
latitude[2] = int(t)
t = tok.get_string()
if t == 'S':
latitude[4] = -1
elif t != 'N':
raise dns.exception.SyntaxError('bad latitude hemisphere value')
longitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
longitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError(
'bad longitude seconds value')
longitude[2] = int(seconds)
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError(
'bad longitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
longitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
longitude[2] = int(t)
t = tok.get_string()
if t == 'W':
longitude[4] = -1
elif t != 'E':
raise dns.exception.SyntaxError('bad longitude hemisphere value')
t = tok.get_string()
if t[-1] == 'm':
t = t[0: -1]
altitude = float(t) * 100.0 # m -> cm
tokens = tok.get_remaining(max_tokens=3)
if len(tokens) >= 1:
value = tokens[0].unescape().value
if value[-1] == 'm':
value = value[0: -1]
size = float(value) * 100.0 # m -> cm
if len(tokens) >= 2:
value = tokens[1].unescape().value
if value[-1] == 'm':
value = value[0: -1]
hprec = float(value) * 100.0 # m -> cm
if len(tokens) >= 3:
value = tokens[2].unescape().value
if value[-1] == 'm':
value = value[0: -1]
vprec = float(value) * 100.0 # m -> cm
# Try encoding these now so we raise if they are bad
_encode_size(size, "size")
_encode_size(hprec, "horizontal precision")
_encode_size(vprec, "vertical precision")
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
milliseconds = (self.latitude[0] * 3600000 +
self.latitude[1] * 60000 +
self.latitude[2] * 1000 +
self.latitude[3]) * self.latitude[4]
latitude = 0x80000000 + milliseconds
milliseconds = (self.longitude[0] * 3600000 +
self.longitude[1] * 60000 +
self.longitude[2] * 1000 +
self.longitude[3]) * self.longitude[4]
longitude = 0x80000000 + milliseconds
altitude = int(self.altitude) + 10000000
size = _encode_size(self.size, "size")
hprec = _encode_size(self.horizontal_precision, "horizontal precision")
vprec = _encode_size(self.vertical_precision, "vertical precision")
wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
longitude, altitude)
file.write(wire)
@classmethod
def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
(version, size, hprec, vprec, latitude, longitude, altitude) = \
parser.get_struct("!BBBBIII")
if version != 0:
raise dns.exception.FormError("LOC version not zero")
if latitude < _MIN_LATITUDE or latitude > _MAX_LATITUDE:
raise dns.exception.FormError("bad latitude")
if latitude > 0x80000000:
latitude = (latitude - 0x80000000) / 3600000
else:
latitude = -1 * (0x80000000 - latitude) / 3600000
if longitude < _MIN_LONGITUDE or longitude > _MAX_LONGITUDE:
raise dns.exception.FormError("bad longitude")
if longitude > 0x80000000:
longitude = (longitude - 0x80000000) / 3600000
else:
longitude = -1 * (0x80000000 - longitude) / 3600000
altitude = float(altitude) - 10000000.0
size = _decode_size(size, "size")
hprec = _decode_size(hprec, "horizontal precision")
vprec = _decode_size(vprec, "vertical precision")
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
@property
def float_latitude(self):
"latitude as a floating point value"
return _tuple_to_float(self.latitude)
@property
def float_longitude(self):
"longitude as a floating point value"
return _tuple_to_float(self.longitude)
|
the-stack_0_12025 | from backend.blockchain.block import Block
from backend.wallet.transactions import Transaction
from backend.wallet.wallet import Wallet
from backend.config import MINING_REWARD_INPUT
class Blockchain:
def __init__(self):
self.chain = [Block.genesis()]
def add_block(self, data):
self.chain.append(Block.mine_block(self.chain[-1], data))
def __repr__(self):
return f'Blockchain: {self.chain}'
def replace_chain(self, chain):
if len(chain) <= len(self.chain):
raise Exception('Cannot replace. The incoming chain must be longer')
try:
Blockchain.is_valid_chain(chain)
except Exception as e:
raise Exception(f'Cannot replace. The incoming chain is invalid: {e}')
self.chain = chain
def to_json(self):
return list(map(lambda block: block.to_json(), self.chain))
@staticmethod
def from_json(chain_json):
blockchain = Blockchain()
blockchain.chain = list(
map(lambda block_json: Block.from_json(block_json), chain_json)
)
return blockchain
@staticmethod
def is_valid_chain(chain):
if chain[0] != Block.genesis():
raise Exception('The genesis block must be valid')
for i in range(1, len(chain)):
block = chain[i]
last_block = chain[i-1]
Block.is_valid_block(last_block, block)
Blockchain.is_valid_transaction_chain(chain)
@staticmethod
def is_valid_transaction_chain(chain):
transaction_ids = set()
for i in range(len(chain)):
block = chain[i]
has_mining_reward = False
for transaction_json in block.data:
transaction = Transaction.from_json(transaction_json)
if transaction.id in transaction_ids:
raise Exception(f'Transaction {transaction.id} is not unique')
transaction_ids.add(transaction.id)
if transaction.input == MINING_REWARD_INPUT:
if has_mining_reward:
raise Exception(
'There can be only one mining reward per block. '\
f'Check block with hash: {block.hash}'
)
has_mining_reward = True
else:
historic_blockchain = Blockchain()
historic_blockchain.chain = chain[0:i]
historic_balance = Wallet.calculate_balance(
historic_blockchain,
transaction.input['address']
)
if historic_balance != transaction.input['amount']:
raise Exception(f'Transaction {transaction.id} has an invalid input amount')
transaction.is_valid_transaction(transaction)
def main():
blockchain = Blockchain()
blockchain.add_block('one')
blockchain.add_block('two')
print(blockchain)
print(f'blockchain.py ___name__: {__name__}')
if __name__ == '__main__':
main()
|
the-stack_0_12026 | # coding: utf8
# !/usr/bin/env python
import hunspell
import pandas as pd
from math import log
import matplotlib.pyplot as plt
import seaborn as sns
import codecs
import pickle
import re
import unicodedata
from ast import literal_eval
def getScriptPath():
return "/home/alexis/Documents/EPFL/MS3/Project/python"
def getIdxOfWord(ws, w):
"""Return index of word in sentence"""
try:
wIdx = ws.index(w)
except:
wIdx = -1
return wIdx
def stem(stemmer, word):
"""
Computes a possible stem for a given word
:param word: string
The word to be stemmed
:return: string
The last possible stem in list, or the word itself if no stem found
"""
wstem = stemmer.stem(word)
if len(wstem) > 0: # and wstem[-1] not in stopwords
return unicode(wstem[-1], 'utf8')
else:
return word
def storeCount(array, key):
"""Increments value for key in store by one, or sets to 1 if key nonexistent."""
if key in array:
array[key] += 1
else:
array[key] = 1
def storeIncrement(store, key, incr):
"""
Increment value for key in store by given increment.
:param incr: float
"""
if key in store:
store[key] += incr
else:
store[key] = incr
def idxForMaxKeyValPair(array):
maxV = array[0][1]
i = 0
maxVIdx = 0
for k, v in array:
if v > maxV:
maxV = v
maxVIdx = i
i += 1
return maxVIdx
def keyForMaxValue(_dict):
maxK = ''
maxV = 0
for k, v in _dict.iteritems():
if v > maxV:
maxV = v
maxK = k
return maxK
def sortUsingList(tosort, reflist):
"""
Sorts tosort by order of reflist.
Example: tosort: ['a', 'b', 'c'], reflist: [1, 3, 2]
Return: ['a', 'c', 'b']
:param tosort:
:param reflist:
:return:
"""
return [x for (y, x) in sorted(zip(reflist, tosort))]
def sortNTopByVal(tosort, top, descending=False):
"""
Sort dictionary by descending values and return top elements.
Return list of tuples.
"""
return sorted([(k, v) for k, v in tosort.items()], key=lambda x: x[1], reverse=descending)[:top]
def buildSentsByChar(chars, sents):
"""
NOT NEEDED ANY MORE
Build map of chars to list of indices where characters occur in sents.
"""
char_sent_map = dict.fromkeys(chars, list())
for ix, sent in enumerate(sents):
for char, ix_lst in char_sent_map.iteritems():
if char in sent['nostop']:
ix_lst.append(ix)
return char_sent_map
def writeData(bookfile, char_list, wsent, sentences):
"""
Write data relevant to book to pickle files
"""
file_prefix = '../books-txt/predicted-data/'
name_prefix = bookfile.split('/')[-1][:-4] # TODO get without .txt
# write list to file, one element per line
with codecs.open(file_prefix + name_prefix + '-chars.p', mode='wb') as f:
pickle.dump(char_list, f)
# write characters sentences dict to file in json format
with codecs.open(file_prefix + name_prefix + '-charsents.p', mode='wb') as f:
pickle.dump(wsent, f)
# write sentences dict to file in json format
with codecs.open(file_prefix + name_prefix + '-sents.p', mode='wb') as f:
pickle.dump(sentences, f)
def getSurroundings(array, idx, window=2):
"""
Return words +-2 from idx
"""
surroundings = []
if idx > 1:
surroundings.append(array[idx - 2])
else:
surroundings.append('---')
if idx > 0:
surroundings.append(array[idx - 1])
else:
surroundings.append('---')
if idx < len(array) - 1:
surroundings.append(array[idx + 1])
else:
surroundings.append('---')
if idx < len(array) - 2:
surroundings.append(array[idx + 2])
else:
surroundings.append('---')
return surroundings
def getWindow(lst, index, window):
"""
:param lst: Some list
:param index: index at senter of window
:param window: window size -> +- window on each side
Total size of 2*window+1
"""
min_idx = index-window if index-window >= 0 else 0
max_idx = index+window if index+window < len(lst) else len(lst)-1
return range(min_idx, max_idx+1)
def removeAccents(in_str):
encoding = "utf-8"
if(is_ascii(in_str)):
in_str = in_str.decode(encoding)
in_str = unicodedata.normalize('NFKD', in_str)
in_str = in_str.encode('ASCII', 'ignore')
return in_str
def is_ascii(mystr):
try:
mystr.decode('ascii')
return True
except UnicodeDecodeError:
return False
def camelSplit(name):
"""
Returns the string split if written in Camel case
"""
return re.sub('(?!^)([A-Z][a-z]+)', r' \1', name).split()
def objFromByte(r):
try:
return literal_eval(r.content.decode('utf-8'))
except ValueError:
return None
|
the-stack_0_12027 | #!/usr/bin/env python3
#
# This file is part of GreatFET
from __future__ import print_function
import ast
import argparse
from greatfet.utils import GreatFETArgumentParser, log_silent, log_verbose
def int_auto_base(s):
"""
Allows the user to pass an integer argument on the command line e.g. in decimal, or in hex with 0x notation.
Used with argparse like `type=int_auto_base`, since argparse's `type` argument accepts any function.
"""
# base=0 means autodetect the base from the prefix (if any).
return int(s, base=0)
def main():
# Set up a simple argument parser.
parser = GreatFETArgumentParser(description="""Utility for chipcon debugging via GreatFET
(See /firmware/common/swra.c for pin mappings)""",
verbose_by_default=True)
parser.add_argument('--chip-id', action='store_true', # Short options (one dash) should always be one letter
help="Print the chip ID of the connected device.")
parser.add_argument('-a', '--address', dest='address', metavar='<n>', type=int_auto_base,
help="Starting address (default: 0)", default=0)
parser.add_argument('-l', '--length', dest='length', metavar='<n>', type=int_auto_base,
help="Length of data to read")
parser.add_argument('-r', '--read', metavar='<filename>', type=argparse.FileType('wb'),
help="Read data into file")
parser.add_argument('--no-erase', dest='erase', default=True, action='store_false',
help="Do not erase the flash before performing a write operation")
parser.add_argument('--no-verify', dest='verify', action='store_false', default=True,
help="Do not verify the flash after performing a write operation")
parser.add_argument('-E', '--mass-erase', action='store_true', help="Erase the entire flash memory")
parser.add_argument('-w', '--write', metavar='<filename>', type=argparse.FileType('rb'),
help="Write data from file")
args = parser.parse_args()
log_function = log_verbose if args.verbose else log_silent
device = parser.find_specified_device()
chipcon = device.create_programmer('chipcon')
chipcon.debug_init()
if args.chip_id:
chip_id(chipcon)
if args.read:
if not args.length:
parser.error("argument -s/--length: expected one argument")
read_flash(chipcon, args.read, args.address, args.length, log_function)
if args.mass_erase:
mass_erase_flash(chipcon, log_function)
if args.write:
program_flash(chipcon, args.write, args.address, args.erase, args.verify, log_function)
def chip_id(programmer):
print("Chip ID:", programmer.get_chip_id())
def read_flash(programmer, out_file, start_address, length, log_function):
log_function("Reading {} bytes starting at address {:02x}...".format(length, start_address))
data = programmer.read_flash(start_address=start_address, length=length)
out_file.write(data)
def mass_erase_flash(programmer, log_function):
log_function("Erasing entire flash...")
programmer.mass_erase_flash()
def program_flash(programmer, in_file, start_address, erase, verify, log_function):
log_function("Writing data to flash...")
image_array = in_file.read()
programmer.program_flash(image_array, erase=erase, verify=verify, start=start_address)
if __name__ == '__main__':
main()
|
the-stack_0_12028 | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2cli.node.strategy import add_arguments
from ros2cli.node.strategy import NodeStrategy
from ros2component.api import container_node_name_completer
from ros2component.api import find_container_node_names
from ros2component.api import get_components_in_container
from ros2component.api import get_components_in_containers
from ros2component.verb import VerbExtension
from ros2node.api import get_node_names
class ListVerb(VerbExtension):
"""Output a list of running containers and components."""
def add_arguments(self, parser, cli_name):
add_arguments(parser)
argument = parser.add_argument(
'container_node_name', nargs='?', default=None,
help='Name of the container node to list components from')
argument.completer = container_node_name_completer
parser.add_argument(
'--containers-only', action='store_true',
help='List found containers nodes only')
def main(self, *, args):
with NodeStrategy(args) as node:
container_node_names = find_container_node_names(
node=node, node_names=get_node_names(node=node)
)
if args.container_node_name is not None:
if args.container_node_name not in [n.full_name for n in container_node_names]:
return "Unable to find container node '" + args.container_node_name + "'"
if not args.containers_only:
ok, outcome = get_components_in_container(
node=node, remote_container_node_name=args.container_node_name
)
if not ok:
return f'{outcome} when listing components in {args.container_node_name}'
if any(outcome):
print(*[
f'{component.uid} {component.name}' for component in outcome
], sep='\n')
else:
results = get_components_in_containers(node=node, remote_containers_node_names=[
n.full_name for n in container_node_names
])
for container_node_name, (ok, outcome) in results.items():
print(container_node_name)
if not args.containers_only:
if not ok:
print(f'{outcome} when listing components')
continue
if any(outcome):
print(*[
f' {component.uid} {component.name}' for component in outcome
], sep='\n')
|
the-stack_0_12030 | import torch
import torch.nn as nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin: float = 2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output1: torch.Tensor, output2: torch.Tensor, label: torch.Tensor):
euclidean_distance = F.pairwise_distance(output1, output2)
losses = 0.5 * (label.float() * euclidean_distance
+ (1 + (-1 * label)).float() * F.relu(self.margin
- (euclidean_distance + self.eps).sqrt()).pow(2))
loss_contrastive = torch.mean(losses)
return loss_contrastive
|
the-stack_0_12031 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import jmespath
from chart.tests.helm_template_generator import render_chart
class ResourceQuotaTest(unittest.TestCase):
def test_resource_quota_template(self):
docs = render_chart(
values={
"quotas": {
"configmaps": "10",
"persistentvolumeclaims": "4",
"pods": "4",
"replicationcontrollers": "20",
"secrets": "10",
"services": "10",
}
},
show_only=["templates/resourcequota.yaml"],
)
assert "ResourceQuota" == jmespath.search("kind", docs[0])
assert "20" == jmespath.search("spec.hard.replicationcontrollers", docs[0])
def test_resource_quota_are_not_added_by_default(self):
docs = render_chart(
show_only=["templates/resourcequota.yaml"],
)
assert docs == []
|
the-stack_0_12033 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
from logging import DEBUG, getLogger
from ._vendor.auxlib.decorators import memoize
from ._vendor.toolz import concat, groupby
from .base.constants import ChannelPriority, MAX_CHANNEL_PRIORITY
from .base.context import context
from .common.compat import iteritems, iterkeys, itervalues, odict, on_win, text_type
from .common.io import time_recorder
from .common.logic import Clauses, get_sat_solver_cls, minimal_unsatisfiable_subset
from .common.toposort import toposort
from .exceptions import InvalidSpec, ResolvePackageNotFound, UnsatisfiableError
from .models.channel import Channel, MultiChannel
from .models.enums import NoarchType
from .models.match_spec import MatchSpec
from .models.records import PackageRecord
from .models.version import VersionOrder
log = getLogger(__name__)
stdoutlog = getLogger('conda.stdoutlog')
# used in conda build
Unsatisfiable = UnsatisfiableError
ResolvePackageNotFound = ResolvePackageNotFound
get_sat_solver_cls = memoize(get_sat_solver_cls)
def dashlist(iterable, indent=2):
return ''.join('\n' + ' ' * indent + '- ' + str(x) for x in iterable)
class Resolve(object):
def __init__(self, index, sort=False, processed=False, channels=()):
self.index = index
self.channels = channels
self._channel_priorities_map = self._make_channel_priorities(channels) if channels else {}
self._channel_priority = context.channel_priority
self._solver_ignore_timestamps = context.solver_ignore_timestamps
groups = groupby("name", itervalues(index))
trackers = defaultdict(list)
for name in groups:
unmanageable_precs = [prec for prec in groups[name] if prec.is_unmanageable]
if unmanageable_precs:
log.debug("restricting to unmanageable packages: %s", name)
groups[name] = unmanageable_precs
tf_precs = (prec for prec in groups[name] if prec.track_features)
for prec in tf_precs:
for feature_name in prec.track_features:
trackers[feature_name].append(prec)
self.groups = groups # Dict[package_name, List[PackageRecord]]
self.trackers = trackers # Dict[track_feature, List[PackageRecord]]
self._cached_find_matches = {} # Dict[MatchSpec, Set[PackageRecord]]
self.ms_depends_ = {} # Dict[PackageRecord, List[MatchSpec]]
self._reduced_index_cache = {}
self._strict_channel_cache = {}
if sort:
for group in itervalues(groups):
group.sort(key=self.version_key, reverse=True)
def default_filter(self, features=None, filter=None):
# TODO: fix this import; this is bad
from .core.subdir_data import make_feature_record
if filter is None:
filter = {}
else:
filter.clear()
filter.update({make_feature_record(fstr): False for fstr in iterkeys(self.trackers)})
if features:
filter.update({make_feature_record(fstr): True for fstr in features})
return filter
def valid(self, spec_or_prec, filter, optional=True):
"""Tests if a package, MatchSpec, or a list of both has satisfiable
dependencies, assuming cyclic dependencies are always valid.
Args:
spec_or_prec: a package record, a MatchSpec, or an iterable of these.
filter: a dictionary of (fkey,valid) pairs, used to consider a subset
of dependencies, and to eliminate repeated searches.
optional: if True (default), do not enforce optional specifications
when considering validity. If False, enforce them.
Returns:
True if the full set of dependencies can be satisfied; False otherwise.
If filter is supplied and update is True, it will be updated with the
search results.
"""
def v_(spec):
return v_ms_(spec) if isinstance(spec, MatchSpec) else v_fkey_(spec)
def v_ms_(ms):
return (optional and ms.optional
or any(v_fkey_(fkey) for fkey in self.find_matches(ms)))
def v_fkey_(prec):
val = filter.get(prec)
if val is None:
filter[prec] = True
try:
depends = self.ms_depends(prec)
except InvalidSpec as e:
val = filter[prec] = False
else:
val = filter[prec] = all(v_ms_(ms) for ms in depends)
return val
result = v_(spec_or_prec)
return result
def valid2(self, spec_or_prec, filter_out, optional=True):
def is_valid(_spec_or_prec):
if isinstance(_spec_or_prec, MatchSpec):
return is_valid_spec(_spec_or_prec)
else:
return is_valid_prec(_spec_or_prec)
def is_valid_spec(_spec):
return optional and _spec.optional or any(
is_valid_prec(_prec) for _prec in self.find_matches(_spec)
)
def is_valid_prec(prec):
val = filter_out.get(prec)
if val is None:
filter_out[prec] = False
try:
has_valid_deps = all(is_valid_spec(ms) for ms in self.ms_depends(prec))
except InvalidSpec as e:
val = filter_out[prec] = "invalid dep specs"
else:
val = filter_out[prec] = False if has_valid_deps else "invalid depends specs"
return not val
return is_valid(spec_or_prec)
def invalid_chains(self, spec, filter, optional=True):
"""Constructs a set of 'dependency chains' for invalid specs.
A dependency chain is a tuple of MatchSpec objects, starting with
the requested spec, proceeding down the dependency tree, ending at
a specification that cannot be satisfied. Uses self.valid_ as a
filter, both to prevent chains and to allow other routines to
prune the list of valid packages with additional criteria.
Args:
spec: a package key or MatchSpec
filter: a dictionary of (prec, valid) pairs to be used when
testing for package validity.
optional: if True (default), do not enforce optional specifications
when considering validity. If False, enforce them.
Returns:
A generator of tuples, empty if the MatchSpec is valid.
"""
def chains_(spec, names):
if spec.name in names:
return
names.add(spec.name)
if self.valid(spec, filter, optional):
return
precs = self.find_matches(spec)
found = False
for prec in precs:
for m2 in self.ms_depends(prec):
for x in chains_(m2, names):
found = True
yield (spec,) + x
if not found:
yield (spec,)
return chains_(spec, set())
def invalid_chains2(self, spec, filter_out, optional=True):
def chains_(spec, names):
if spec.name in names:
return
names.add(spec.name)
if self.valid2(spec, filter_out, optional):
return
precs = self.find_matches(spec)
found = False
for prec in precs:
for m2 in self.ms_depends(prec):
for x in chains_(m2, names):
found = True
yield (spec,) + x
if not found:
yield (spec,)
return chains_(spec, set())
def verify_specs(self, specs):
"""Perform a quick verification that specs and dependencies are reasonable.
Args:
specs: An iterable of strings or MatchSpec objects to be tested.
Returns:
Nothing, but if there is a conflict, an error is thrown.
Note that this does not attempt to resolve circular dependencies.
"""
non_tf_specs = []
bad_deps = []
feature_names = set()
for ms in specs:
_feature_names = ms.get_exact_value('track_features')
if _feature_names:
feature_names.update(_feature_names)
else:
non_tf_specs.append(ms)
filter = self.default_filter(feature_names)
for ms in non_tf_specs:
bad_deps.extend(self.invalid_chains(ms, filter.copy()))
if bad_deps:
raise ResolvePackageNotFound(bad_deps)
return non_tf_specs, feature_names
def find_conflicts(self, specs):
"""Perform a deeper analysis on conflicting specifications, by attempting
to find the common dependencies that might be the cause of conflicts.
Args:
specs: An iterable of strings or MatchSpec objects to be tested.
It is assumed that the specs conflict.
Returns:
Nothing, because it always raises an UnsatisfiableError.
Strategy:
If we're here, we know that the specs conflict. This could be because:
- One spec conflicts with another; e.g.
['numpy 1.5*', 'numpy >=1.6']
- One spec conflicts with a dependency of another; e.g.
['numpy 1.5*', 'scipy 0.12.0b1']
- Each spec depends on *the same package* but in a different way; e.g.,
['A', 'B'] where A depends on numpy 1.5, and B on numpy 1.6.
Technically, all three of these cases can be boiled down to the last
one if we treat the spec itself as one of the "dependencies". There
might be more complex reasons for a conflict, but this code only
considers the ones above.
The purpose of this code, then, is to identify packages (like numpy
above) that all of the specs depend on *but in different ways*. We
then identify the dependency chains that lead to those packages.
"""
sdeps = {}
# For each spec, assemble a dictionary of dependencies, with package
# name as key, and all of the matching packages as values.
for ms in specs:
rec = sdeps.setdefault(ms, {})
slist = [ms]
while slist:
ms2 = slist.pop()
deps = rec.setdefault(ms2.name, set())
for fkey in self.find_matches(ms2):
if fkey not in deps:
deps.add(fkey)
slist.extend(ms3 for ms3 in self.ms_depends(fkey) if ms3.name != ms.name)
# Find the list of dependencies they have in common. And for each of
# *those*, find the individual packages that they all share. Those need
# to be removed as conflict candidates.
commkeys = set.intersection(*(set(s.keys()) for s in sdeps.values()))
commkeys = {k: set.intersection(*(v[k] for v in sdeps.values())) for k in commkeys}
# and find the dependency chains that lead to them.
bad_deps = []
for ms, sdep in iteritems(sdeps):
filter = {}
for mn, v in sdep.items():
if mn != ms.name and mn in commkeys:
# Mark this package's "unique" dependencies as invalid
for fkey in v - commkeys[mn]:
filter[fkey] = False
# Find the dependencies that lead to those invalid choices
ndeps = set(self.invalid_chains(ms, filter, False))
# This may produce some additional invalid chains that we
# don't care about. Select only those that terminate in our
# predetermined set of "common" keys.
ndeps = [nd for nd in ndeps if nd[-1].name in commkeys]
if ndeps:
bad_deps.extend(ndeps)
else:
# This means the package *itself* was the common conflict.
bad_deps.append((ms,))
raise UnsatisfiableError(bad_deps)
def _get_strict_channel(self, package_name):
try:
channel_name = self._strict_channel_cache[package_name]
except KeyError:
all_channel_names = set(prec.channel.name for prec in self.groups[package_name])
by_cp = {self._channel_priorities_map.get(cn, 1): cn for cn in all_channel_names}
highest_priority = sorted(by_cp)[0] # highest priority is the lowest number
channel_name = self._strict_channel_cache[package_name] = by_cp[highest_priority]
return channel_name
@time_recorder(module_name=__name__)
def get_reduced_index(self, specs):
# TODO: fix this import; this is bad
from .core.subdir_data import make_feature_record
strict_channel_priority = context.channel_priority == ChannelPriority.STRICT
cache_key = strict_channel_priority, frozenset(specs)
if cache_key in self._reduced_index_cache:
return self._reduced_index_cache[cache_key]
if log.isEnabledFor(DEBUG):
log.debug('Retrieving packages for: %s', dashlist(sorted(text_type(s) for s in specs)))
specs, features = self.verify_specs(specs)
filter_out = {prec: False if val else "feature not enabled"
for prec, val in iteritems(self.default_filter(features))}
snames = set()
top_level_spec = None
cp_filter_applied = set() # values are package names
def filter_group(_specs):
# all _specs should be for the same package name
name = next(iter(_specs)).name
group = self.groups.get(name, ())
# implement strict channel priority
if strict_channel_priority and name not in cp_filter_applied:
sole_source_channel_name = self._get_strict_channel(name)
for prec in group:
if prec.channel.name != sole_source_channel_name:
filter_out[prec] = "removed due to strict channel priority"
cp_filter_applied.add(name)
# Prune packages that don't match any of the patterns
# or which have unsatisfiable dependencies
nold = nnew = 0
for prec in group:
if not filter_out.setdefault(prec, False):
nold += 1
if not self.match_any(_specs, prec):
filter_out[prec] = "incompatible with required spec %s" % top_level_spec
continue
unsatisfiable_dep_specs = tuple(
ms for ms in self.ms_depends(prec)
if not any(not filter_out.get(rec, False) for rec in self.find_matches(ms))
)
if unsatisfiable_dep_specs:
filter_out[prec] = "unsatisfiable dependencies %s" % " ".join(
str(s) for s in unsatisfiable_dep_specs
)
continue
filter_out[prec] = False
nnew += 1
reduced = nnew < nold
if reduced:
log.debug('%s: pruned from %d -> %d' % (name, nold, nnew))
if any(ms.optional for ms in _specs):
return reduced
elif nnew == 0:
# Indicates that a conflict was found; we can exit early
return None
# Perform the same filtering steps on any dependencies shared across
# *all* packages in the group. Even if just one of the packages does
# not have a particular dependency, it must be ignored in this pass.
# Otherwise, we might do more filtering than we should---and it is
# better to have extra packages here than missing ones.
if reduced or name not in snames:
snames.add(name)
_dep_specs = groupby(lambda s: s.name, (
dep_spec
for prec in group if not filter_out.get(prec, False)
for dep_spec in self.ms_depends(prec) if not dep_spec.optional
))
_dep_specs.pop("*", None) # discard track_features specs
for deps in itervalues(_dep_specs):
if len(deps) >= nnew:
res = filter_group(set(deps))
if res:
reduced = True
elif res is None:
# Indicates that a conflict was found; we can exit early
return None
return reduced
# Iterate on pruning until no progress is made. We've implemented
# what amounts to "double-elimination" here; packages get one additional
# chance after their first "False" reduction. This catches more instances
# where one package's filter affects another. But we don't have to be
# perfect about this, so performance matters.
for _ in range(2):
snames.clear()
slist = list(specs)
reduced = False
while slist:
s = slist.pop()
top_level_spec = s
reduced = filter_group([s])
if reduced:
slist.append(s)
elif reduced is None:
break
if reduced is None:
# This filter reset means that unsatisfiable indexes leak through.
filter_out = {prec: False if val else "feature not enabled"
for prec, val in iteritems(self.default_filter(features))}
# TODO: raise unsatisfiable exception here
# Messaging to users should be more descriptive.
# 1. Are there no direct matches?
# 2. Are there no matches for first-level dependencies?
# 3. Have the first level dependencies been invalidated?
break
# Determine all valid packages in the dependency graph
reduced_index2 = {prec: prec for prec in (make_feature_record(fstr) for fstr in features)}
processed_specs = set()
specs_queue = set(specs)
while specs_queue:
this_spec = specs_queue.pop()
processed_specs.add(this_spec)
add_these_precs2 = tuple(
prec for prec in self.find_matches(this_spec)
if prec not in reduced_index2 and self.valid2(prec, filter_out)
)
if strict_channel_priority and add_these_precs2:
strict_chanel_name = self._get_strict_channel(add_these_precs2[0].name)
add_these_precs2 = tuple(
prec for prec in add_these_precs2 if prec.channel.name == strict_chanel_name
)
reduced_index2.update((prec, prec) for prec in add_these_precs2)
# We do not pull packages into the reduced index due
# to a track_features dependency. Remember, a feature
# specifies a "soft" dependency: it must be in the
# environment, but it is not _pulled_ in. The SAT
# logic doesn't do a perfect job of capturing this
# behavior, but keeping these packages out of the
# reduced index helps. Of course, if _another_
# package pulls it in by dependency, that's fine.
specs_queue.update(
ms for prec in add_these_precs2 for ms in self.ms_depends(prec)
if "track_features" not in ms and ms not in processed_specs
)
self._reduced_index_cache[cache_key] = reduced_index2
return reduced_index2
def match_any(self, mss, prec):
return any(ms.match(prec) for ms in mss)
def find_matches(self, spec):
# type: (MatchSpec) -> Set[PackageRecord]
res = self._cached_find_matches.get(spec, None)
if res is not None:
return res
spec_name = spec.get_exact_value('name')
if spec_name:
candidate_precs = self.groups.get(spec_name, ())
elif spec.get_exact_value('track_features'):
feature_names = spec.get_exact_value('track_features')
candidate_precs = concat(
self.trackers.get(feature_name, ()) for feature_name in feature_names
)
else:
candidate_precs = itervalues(self.index)
res = frozenset(p for p in candidate_precs if spec.match(p))
self._cached_find_matches[spec] = res
return res
def ms_depends(self, prec):
# type: (PackageRecord) -> List[MatchSpec]
deps = self.ms_depends_.get(prec)
if deps is None:
deps = [MatchSpec(d) for d in prec.combined_depends]
deps.extend(MatchSpec(track_features=feat) for feat in prec.features)
self.ms_depends_[prec] = deps
return deps
def version_key(self, prec, vtype=None):
channel = prec.channel
channel_priority = self._channel_priorities_map.get(channel.name, 1) # TODO: ask @mcg1969 why the default value is 1 here # NOQA
valid = 1 if channel_priority < MAX_CHANNEL_PRIORITY else 0
version_comparator = VersionOrder(prec.get('version', ''))
build_number = prec.get('build_number', 0)
build_string = prec.get('build')
ts = prec.get('timestamp', 0)
if self._channel_priority != ChannelPriority.DISABLED:
vkey = [valid, -channel_priority, version_comparator, build_number]
else:
vkey = [valid, version_comparator, -channel_priority, build_number]
if self._solver_ignore_timestamps:
vkey.append(build_string)
else:
vkey.extend((ts, build_string))
return vkey
@staticmethod
def _make_channel_priorities(channels):
priorities_map = odict()
for priority_counter, chn in enumerate(concat(
(Channel(cc) for cc in c._channels) if isinstance(c, MultiChannel) else (c,)
for c in (Channel(c) for c in channels)
)):
channel_name = chn.name
if channel_name in priorities_map:
continue
priorities_map[channel_name] = min(priority_counter, MAX_CHANNEL_PRIORITY - 1)
return priorities_map
def get_pkgs(self, ms, emptyok=False): # pragma: no cover
# legacy method for conda-build
ms = MatchSpec(ms)
precs = self.find_matches(ms)
if not precs and not emptyok:
raise ResolvePackageNotFound([(ms,)])
return sorted(precs, key=self.version_key)
@staticmethod
def to_sat_name(val):
# val can be a PackageRecord or MatchSpec
if isinstance(val, PackageRecord):
return val.dist_str()
elif isinstance(val, MatchSpec):
return '@s@' + text_type(val) + ('?' if val.optional else '')
else:
raise NotImplementedError()
@staticmethod
def to_feature_metric_id(prec_dist_str, feat):
return '@fm@%s@%s' % (prec_dist_str, feat)
def push_MatchSpec(self, C, spec):
spec = MatchSpec(spec)
sat_name = self.to_sat_name(spec)
m = C.from_name(sat_name)
if m is not None:
# the spec has already been pushed onto the clauses stack
return sat_name
simple = spec._is_single()
nm = spec.get_exact_value('name')
tf = frozenset(_tf for _tf in (
f.strip() for f in spec.get_exact_value('track_features') or ()
) if _tf)
if nm:
tgroup = libs = self.groups.get(nm, [])
elif tf:
assert len(tf) == 1
k = next(iter(tf))
tgroup = libs = self.trackers.get(k, [])
else:
tgroup = libs = self.index.keys()
simple = False
if not simple:
libs = [fkey for fkey in tgroup if spec.match(fkey)]
if len(libs) == len(tgroup):
if spec.optional:
m = True
elif not simple:
ms2 = MatchSpec(track_features=tf) if tf else MatchSpec(nm)
m = C.from_name(self.push_MatchSpec(C, ms2))
if m is None:
sat_names = [self.to_sat_name(prec) for prec in libs]
if spec.optional:
ms2 = MatchSpec(track_features=tf) if tf else MatchSpec(nm)
sat_names.append('!' + self.to_sat_name(ms2))
m = C.Any(sat_names)
C.name_var(m, sat_name)
return sat_name
@time_recorder(module_name=__name__)
def gen_clauses(self):
C = Clauses(sat_solver_cls=get_sat_solver_cls(context.sat_solver))
for name, group in iteritems(self.groups):
group = [self.to_sat_name(prec) for prec in group]
# Create one variable for each package
for sat_name in group:
C.new_var(sat_name)
# Create one variable for the group
m = C.new_var(self.to_sat_name(MatchSpec(name)))
# Exactly one of the package variables, OR
# the negation of the group variable, is true
C.Require(C.ExactlyOne, group + [C.Not(m)])
# If a package is installed, its dependencies must be as well
for prec in itervalues(self.index):
nkey = C.Not(self.to_sat_name(prec))
for ms in self.ms_depends(prec):
C.Require(C.Or, nkey, self.push_MatchSpec(C, ms))
if log.isEnabledFor(DEBUG):
log.debug("gen_clauses returning with clause count: %d", C.get_clause_count())
return C
def generate_spec_constraints(self, C, specs):
result = [(self.push_MatchSpec(C, ms),) for ms in specs]
if log.isEnabledFor(DEBUG):
log.debug(
"generate_spec_constraints returning with clause count: %d",
C.get_clause_count())
return result
def generate_feature_count(self, C):
result = {self.push_MatchSpec(C, MatchSpec(track_features=name)): 1
for name in iterkeys(self.trackers)}
if log.isEnabledFor(DEBUG):
log.debug(
"generate_feature_count returning with clause count: %d", C.get_clause_count())
return result
def generate_update_count(self, C, specs):
return {'!'+ms.target: 1 for ms in specs if ms.target and C.from_name(ms.target)}
def generate_feature_metric(self, C):
eq = {} # a C.minimize() objective: Dict[varname, coeff]
# Given a pair (prec, feature), assign a "1" score IF:
# - The prec is installed
# - The prec does NOT require the feature
# - At least one package in the group DOES require the feature
# - A package that tracks the feature is installed
for name, group in iteritems(self.groups):
prec_feats = {self.to_sat_name(prec): set(prec.features) for prec in group}
active_feats = set.union(*prec_feats.values()).intersection(self.trackers)
for feat in active_feats:
clause_id_for_feature = self.push_MatchSpec(C, MatchSpec(track_features=feat))
for prec_sat_name, features in prec_feats.items():
if feat not in features:
feature_metric_id = self.to_feature_metric_id(prec_sat_name, feat)
C.name_var(C.And(prec_sat_name, clause_id_for_feature), feature_metric_id)
eq[feature_metric_id] = 1
return eq
def generate_removal_count(self, C, specs):
return {'!'+self.push_MatchSpec(C, ms.name): 1 for ms in specs}
def generate_install_count(self, C, specs):
return {self.push_MatchSpec(C, ms.name): 1 for ms in specs if ms.optional}
def generate_package_count(self, C, missing):
return {self.push_MatchSpec(C, nm): 1 for nm in missing}
def generate_version_metrics(self, C, specs, include0=False):
# each of these are weights saying how well packages match the specs
# format for each: a C.minimize() objective: Dict[varname, coeff]
eqc = {} # channel
eqv = {} # version
eqb = {} # build number
eqt = {} # timestamp
sdict = {} # Dict[package_name, PackageRecord]
for s in specs:
s = MatchSpec(s) # needed for testing
sdict.setdefault(s.name, [])
# # TODO: this block is important! can't leave it commented out
# rec = sdict.setdefault(s.name, [])
# if s.target:
# dist = Dist(s.target)
# if dist in self.index:
# if self.index[dist].get('priority', 0) < MAX_CHANNEL_PRIORITY:
# rec.append(dist)
for name, targets in iteritems(sdict):
pkgs = [(self.version_key(p), p) for p in self.groups.get(name, [])]
pkey = None
# keep in mind that pkgs is already sorted according to version_key (a tuple,
# so composite sort key). Later entries in the list are, by definition,
# greater in some way, so simply comparing with != suffices.
for version_key, prec in pkgs:
if targets and any(prec == t for t in targets):
continue
if pkey is None:
ic = iv = ib = it = 0
# valid package, channel priority
elif pkey[0] != version_key[0] or pkey[1] != version_key[1]:
ic += 1
iv = ib = it = 0
# version
elif pkey[2] != version_key[2]:
iv += 1
ib = it = 0
# build number
elif pkey[3] != version_key[3]:
ib += 1
it = 0
elif not self._solver_ignore_timestamps and pkey[4] != version_key[4]:
it += 1
prec_sat_name = self.to_sat_name(prec)
if ic or include0:
eqc[prec_sat_name] = ic
if iv or include0:
eqv[prec_sat_name] = iv
if ib or include0:
eqb[prec_sat_name] = ib
if it or include0:
eqt[prec_sat_name] = it
pkey = version_key
return eqc, eqv, eqb, eqt
def dependency_sort(self, must_have):
# type: (Dict[package_name, PackageRecord]) -> List[PackageRecord]
assert isinstance(must_have, dict)
digraph = {} # Dict[package_name, Set[dependent_package_names]]
for package_name, prec in iteritems(must_have):
if prec in self.index:
digraph[package_name] = set(ms.name for ms in self.ms_depends(prec))
# There are currently at least three special cases to be aware of.
# 1. The `toposort()` function, called below, contains special case code to remove
# any circular dependency between python and pip.
# 2. conda/plan.py has special case code for menuinst
# Always link/unlink menuinst first/last on windows in case a subsequent
# package tries to import it to create/remove a shortcut
# 3. On windows, python noarch packages need an implicit dependency on conda added, if
# conda is in the list of packages for the environment. Python noarch packages
# that have entry points use conda's own conda.exe python entry point binary. If conda
# is going to be updated during an operation, the unlink / link order matters.
# See issue #6057.
if on_win and 'conda' in digraph:
for package_name, dist in iteritems(must_have):
record = self.index.get(prec)
if hasattr(record, 'noarch') and record.noarch == NoarchType.python:
digraph[package_name].add('conda')
sorted_keys = toposort(digraph)
must_have = must_have.copy()
# Take all of the items in the sorted keys
# Don't fail if the key does not exist
result = [must_have.pop(key) for key in sorted_keys if key in must_have]
# Take any key that were not sorted
result.extend(must_have.values())
return result
def environment_is_consistent(self, installed):
log.debug('Checking if the current environment is consistent')
if not installed:
return None, []
sat_name_map = {} # Dict[sat_name, PackageRecord]
specs = []
for prec in installed:
sat_name_map[self.to_sat_name(prec)] = prec
specs.append(MatchSpec('%s %s %s' % (prec.name, prec.version, prec.build)))
r2 = Resolve({prec: prec for prec in installed}, True, True, channels=self.channels)
C = r2.gen_clauses()
constraints = r2.generate_spec_constraints(C, specs)
solution = C.sat(constraints)
return bool(solution)
def get_conflicting_specs(self, specs):
if not specs:
return ()
reduced_index = self.get_reduced_index(specs)
# Check if satisfiable
def mysat(specs, add_if=False):
constraints = r2.generate_spec_constraints(C, specs)
return C.sat(constraints, add_if)
r2 = Resolve(reduced_index, True, True, channels=self.channels)
C = r2.gen_clauses()
solution = mysat(specs, True)
if solution:
return ()
else:
# This first result is just a single unsatisfiable core. There may be several.
unsat_specs = list(minimal_unsatisfiable_subset(specs, sat=mysat))
satisfiable_specs = set(specs) - set(unsat_specs)
# In this loop, we test each unsatisfiable spec individually against the satisfiable
# specs to ensure there are no other unsatisfiable specs in the set.
final_unsat_specs = set()
while unsat_specs:
this_spec = unsat_specs.pop(0)
final_unsat_specs.add(this_spec)
test_specs = satisfiable_specs | {this_spec}
C = r2.gen_clauses() # TODO: wasteful call, but Clauses() needs refactored
solution = mysat(test_specs, True)
if not solution:
these_unsat = minimal_unsatisfiable_subset(test_specs, sat=mysat)
if len(these_unsat) > 1:
unsat_specs.extend(these_unsat)
satisfiable_specs -= set(unsat_specs)
return tuple(final_unsat_specs)
def bad_installed(self, installed, new_specs):
log.debug('Checking if the current environment is consistent')
if not installed:
return None, []
sat_name_map = {} # Dict[sat_name, PackageRecord]
specs = []
for prec in installed:
sat_name_map[self.to_sat_name(prec)] = prec
specs.append(MatchSpec('%s %s %s' % (prec.name, prec.version, prec.build)))
new_index = {prec: prec for prec in itervalues(sat_name_map)}
r2 = Resolve(new_index, True, True, channels=self.channels)
C = r2.gen_clauses()
constraints = r2.generate_spec_constraints(C, specs)
solution = C.sat(constraints)
limit = xtra = None
if not solution or xtra:
def get_(name, snames):
if name not in snames:
snames.add(name)
for fn in self.groups.get(name, []):
for ms in self.ms_depends(fn):
get_(ms.name, snames)
# New addition: find the largest set of installed packages that
# are consistent with each other, and include those in the
# list of packages to maintain consistency with
snames = set()
eq_optional_c = r2.generate_removal_count(C, specs)
solution, _ = C.minimize(eq_optional_c, C.sat())
snames.update(sat_name_map[sat_name]['name']
for sat_name in (C.from_index(s) for s in solution)
if sat_name and sat_name[0] != '!' and '@' not in sat_name)
# Existing behavior: keep all specs and their dependencies
for spec in new_specs:
get_(MatchSpec(spec).name, snames)
if len(snames) < len(sat_name_map):
limit = snames
xtra = [rec for sat_name, rec in iteritems(sat_name_map)
if rec['name'] not in snames]
log.debug('Limiting solver to the following packages: %s', ', '.join(limit))
if xtra:
log.debug('Packages to be preserved: %s', xtra)
return limit, xtra
def restore_bad(self, pkgs, preserve):
if preserve:
sdict = {prec.name: prec for prec in pkgs}
pkgs.extend(p for p in preserve if p.name not in sdict)
def install_specs(self, specs, installed, update_deps=True):
specs = list(map(MatchSpec, specs))
snames = {s.name for s in specs}
log.debug('Checking satisfiability of current install')
limit, preserve = self.bad_installed(installed, specs)
for prec in installed:
if prec not in self.index:
continue
name, version, build = prec.name, prec.version, prec.build
schannel = prec.channel.canonical_name
if name in snames or limit is not None and name not in limit:
continue
# If update_deps=True, set the target package in MatchSpec so that
# the solver can minimize the version change. If update_deps=False,
# fix the version and build so that no change is possible.
if update_deps:
# TODO: fix target here
spec = MatchSpec(name=name, target=prec.dist_str())
else:
spec = MatchSpec(name=name, version=version,
build=build, channel=schannel)
specs.append(spec)
return specs, preserve
def install(self, specs, installed=None, update_deps=True, returnall=False):
specs, preserve = self.install_specs(specs, installed or [], update_deps)
pkgs = self.solve(specs, returnall=returnall, _remove=False)
self.restore_bad(pkgs, preserve)
return pkgs
def remove_specs(self, specs, installed):
nspecs = []
# There's an imperfect thing happening here. "specs" nominally contains
# a list of package names or track_feature values to be removed. But
# because of add_defaults_to_specs it may also contain version contraints
# like "python 2.7*", which are *not* asking for python to be removed.
# We need to separate these two kinds of specs here.
for s in map(MatchSpec, specs):
# Since '@' is an illegal version number, this ensures that all of
# these matches will never match an actual package. Combined with
# optional=True, this has the effect of forcing their removal.
if s._is_single():
nspecs.append(MatchSpec(s, version='@', optional=True))
else:
nspecs.append(MatchSpec(s, optional=True))
snames = set(s.name for s in nspecs if s.name)
limit, _ = self.bad_installed(installed, nspecs)
preserve = []
for prec in installed:
nm, ver = prec.name, prec.version
if nm in snames:
continue
elif limit is not None:
preserve.append(prec)
else:
# TODO: fix target here
nspecs.append(MatchSpec(name=nm,
version='>='+ver if ver else None,
optional=True,
target=prec.dist_str()))
return nspecs, preserve
def remove(self, specs, installed):
specs, preserve = self.remove_specs(specs, installed)
pkgs = self.solve(specs, _remove=True)
self.restore_bad(pkgs, preserve)
return pkgs
@time_recorder(module_name=__name__)
def solve(self, specs, returnall=False, _remove=False):
# type: (List[str], bool) -> List[PackageRecord]
if log.isEnabledFor(DEBUG):
log.debug('Solving for: %s', dashlist(sorted(text_type(s) for s in specs)))
# Find the compliant packages
log.debug("Solve: Getting reduced index of compliant packages")
len0 = len(specs)
specs = tuple(map(MatchSpec, specs))
reduced_index = self.get_reduced_index(specs)
if not reduced_index:
return False if reduced_index is None else ([[]] if returnall else [])
# Check if satisfiable
log.debug("Solve: determining satisfiability")
def mysat(specs, add_if=False):
constraints = r2.generate_spec_constraints(C, specs)
return C.sat(constraints, add_if)
r2 = Resolve(reduced_index, True, True, channels=self.channels)
C = r2.gen_clauses()
solution = mysat(specs, True)
if not solution:
specs = minimal_unsatisfiable_subset(specs, sat=mysat)
self.find_conflicts(specs)
speco = [] # optional packages
specr = [] # requested packages
speca = [] # all other packages
specm = set(r2.groups) # missing from specs
for k, s in enumerate(specs):
if s.name in specm:
specm.remove(s.name)
if not s.optional:
(speca if s.target or k >= len0 else specr).append(s)
elif any(r2.find_matches(s)):
s = MatchSpec(s.name, optional=True, target=s.target)
speco.append(s)
speca.append(s)
speca.extend(MatchSpec(s) for s in specm)
# Removed packages: minimize count
log.debug("Solve: minimize removed packages")
if _remove:
eq_optional_c = r2.generate_removal_count(C, speco)
solution, obj7 = C.minimize(eq_optional_c, solution)
log.debug('Package removal metric: %d', obj7)
# Requested packages: maximize versions
log.debug("Solve: maximize versions of requested packages")
eq_req_c, eq_req_v, eq_req_b, eq_req_t = r2.generate_version_metrics(C, specr)
solution, obj3a = C.minimize(eq_req_c, solution)
solution, obj3 = C.minimize(eq_req_v, solution)
log.debug('Initial package channel/version metric: %d/%d', obj3a, obj3)
# Track features: minimize feature count
log.debug("Solve: minimize track_feature count")
eq_feature_count = r2.generate_feature_count(C)
solution, obj1 = C.minimize(eq_feature_count, solution)
log.debug('Track feature count: %d', obj1)
# Featured packages: minimize number of featureless packages
# installed when a featured alternative is feasible.
# For example, package name foo exists with two built packages. One with
# 'track_features: 'feat1', and one with 'track_features': 'feat2'.
# The previous "Track features" minimization pass has chosen 'feat1' for the
# environment, but not 'feat2'. In this case, the 'feat2' version of foo is
# considered "featureless."
if not context.featureless_minimization_disabled_feature_flag:
log.debug("Solve: maximize number of packages that have necessary features")
eq_feature_metric = r2.generate_feature_metric(C)
solution, obj2 = C.minimize(eq_feature_metric, solution)
log.debug('Package misfeature count: %d', obj2)
# Requested packages: maximize builds
log.debug("Solve: maximize build numbers of requested packages")
solution, obj4 = C.minimize(eq_req_b, solution)
log.debug('Initial package build metric: %d', obj4)
# Optional installations: minimize count
if not _remove:
log.debug("Solve: minimize number of optional installations")
eq_optional_install = r2.generate_install_count(C, speco)
solution, obj49 = C.minimize(eq_optional_install, solution)
log.debug('Optional package install metric: %d', obj49)
# Dependencies: minimize the number of packages that need upgrading
log.debug("Solve: minimize number of necessary upgrades")
eq_u = r2.generate_update_count(C, speca)
solution, obj50 = C.minimize(eq_u, solution)
log.debug('Dependency update count: %d', obj50)
# Remaining packages: maximize versions, then builds
log.debug("Solve: maximize versions and builds of indirect dependencies")
eq_c, eq_v, eq_b, eq_t = r2.generate_version_metrics(C, speca)
solution, obj5a = C.minimize(eq_c, solution)
solution, obj5 = C.minimize(eq_v, solution)
solution, obj6 = C.minimize(eq_b, solution)
log.debug('Additional package channel/version/build metrics: %d/%d/%d',
obj5a, obj5, obj6)
# Maximize timestamps
log.debug("Solve: maximize timestamps")
eq_t.update(eq_req_t)
solution, obj6t = C.minimize(eq_t, solution)
log.debug('Timestamp metric: %d', obj6t)
# Prune unnecessary packages
log.debug("Solve: prune unnecessary packages")
eq_c = r2.generate_package_count(C, specm)
solution, obj7 = C.minimize(eq_c, solution, trymax=True)
log.debug('Weak dependency count: %d', obj7)
def clean(sol):
return [q for q in (C.from_index(s) for s in sol)
if q and q[0] != '!' and '@' not in q]
log.debug('Looking for alternate solutions')
nsol = 1
psolutions = []
psolution = clean(solution)
psolutions.append(psolution)
while True:
nclause = tuple(C.Not(C.from_name(q)) for q in psolution)
solution = C.sat((nclause,), True)
if solution is None:
break
nsol += 1
if nsol > 10:
log.debug('Too many solutions; terminating')
break
psolution = clean(solution)
psolutions.append(psolution)
if nsol > 1:
psols2 = list(map(set, psolutions))
common = set.intersection(*psols2)
diffs = [sorted(set(sol) - common) for sol in psols2]
if not context.json:
stdoutlog.info(
'\nWarning: %s possible package resolutions '
'(only showing differing packages):%s%s' %
('>10' if nsol > 10 else nsol,
dashlist(', '.join(diff) for diff in diffs),
'\n ... and others' if nsol > 10 else ''))
# def stripfeat(sol):
# return sol.split('[')[0]
new_index = {self.to_sat_name(prec): prec for prec in itervalues(self.index)}
if returnall:
if len(psolutions) > 1:
raise RuntimeError()
# TODO: clean up this mess
# return [sorted(Dist(stripfeat(dname)) for dname in psol) for psol in psolutions]
# return [sorted((new_index[sat_name] for sat_name in psol), key=lambda x: x.name)
# for psol in psolutions]
# return sorted(Dist(stripfeat(dname)) for dname in psolutions[0])
return sorted((new_index[sat_name] for sat_name in psolutions[0]), key=lambda x: x.name)
|
the-stack_0_12034 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a stomp test script.
Individual stomp test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave stompds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop stompds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing stompd/stomp-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: stompds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a stompd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple stompds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a stompd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple stompd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'stompd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "stompd should have exited with an error"
else:
assert_msg = "stompd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as stompd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "stompd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some stompd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "stompd"),
help="stompd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "stompd"),
help="stompd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
the-stack_0_12036 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
def fft_wavenumbers(x, y, shape_dat, shape_pdat):
"""
Compute the wavenumbers.
Parameters
----------
x : 1D array
Coordinates along x direction.
y : 1D array
Coordinates along y direction.
shape_dat : tuple
Shape of the input data.
shape_pdat : tuple
Shape of the pad.
Returns
-------
u : array
Wavenumber.
v : TYPE
Wavenumber.
"""
dx = (np.amax(x) - np.amin(x))/(shape_dat[0] - 1)
dy = (np.amax(y) - np.amin(y))/(shape_dat[1] - 1)
fx = 2*np.pi*np.fft.fftfreq(shape_pdat[0], dx)
fy = 2*np.pi*np.fft.fftfreq(shape_pdat[1], dy)
v,u=np.meshgrid(fy, fx)
return (u,v)
def fft_pad_data(data, mode='edge'):
"""
Perform the 2D discrete Fourier transform and extend the data with padding.
Parameters
----------
data : 2D array
Input data.
mode : TYPE, optional
The type of the pad, available on numpy.pad. The default is 'edge'.
Returns
-------
fpdat : 2D array
The padded data.
mask : boolean
The mask to perform the unppading.
"""
n_points=int(2**(np.ceil(np.log(np.max(data.shape))/np.log(2))))
nx, ny = data.shape
padx = int((n_points - nx)/2)
pady = int((n_points - ny)/2)
padded_data = np.pad(data, ((padx, padx), (pady, pady)),mode)
mask = np.zeros_like(padded_data, dtype=bool)
mask[padx:padx+data.shape[0], pady:pady+data.shape[1]] = True
fpdat = np.fft.fft2(padded_data)
return (fpdat,mask)
def ifft_unpad_data(data_p, mask, shape_dat):
'''
Unpad the extended data to fit the original data shape.
Parameters
----------
data_p : 2D array
Padded data.
mask : boolean
The mask that will be used to unpad the data.
shape_dat : tuple
Shape of the original data.
Returns
-------
data : array
Unpadded data.
'''
ifft_data = np.real(np.fft.ifft2(data_p))
data = ifft_data[mask]
return np.reshape(data, shape_dat)
def butter2d_lp(shape, f, n):
"""
Designs a lowpass 2D Butterworth filter.
Modified from Peirce JW (2009) Generating stimuli for neuroscience using
PsychoPy. Front. Neuroinform. 2:10.
doi:10.3389/neuro.11.010.2008.
Parameters
----------
shape : tuple
Size of the filter.
f : float
Relative cutoff frequency of the filter.
n : int
Order of the filter, the higher n is the sharper the transition is.
Returns
-------
filt : 2D array
Filter kernel centered.
"""
rows, cols = shape
x = np.linspace(-0.5, 0.5, cols)
y = np.linspace(-0.5, 0.5, rows)
radius = np.sqrt((x**2)[np.newaxis] + (y**2)[:, np.newaxis])
filt = 1 / (1.0 + (radius / f)**(2*n))
return (filt)
def plot_wav(decomp):
"""
Plot the data in DWT domain
Parameters
----------
data : list
Data in wavelet domain.
Returns
-------
None.
"""
plt.figure(figsize=(10,10))
gs = GridSpec(4, 4)
ax = plt.subplot(gs[0, 0])
plt.imshow(decomp[0])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[1,0])
plt.imshow(decomp[1][0])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[0, 1])
plt.imshow(decomp[1][1])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[1, 1])
plt.imshow(decomp[1][2])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[2:,:2])
plt.imshow(decomp[2][0])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[:2,2:])
plt.imshow(decomp[2][1])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[2:,2:])
plt.imshow(decomp[2][2])
plt.xticks([])
plt.yticks([])
plt.tight_layout()
return |
the-stack_0_12041 | from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass,
fuel_capacity,
brake_deadband,
decel_limit,
accel_limit,
wheel_radius,
wheel_base,
steer_ratio,
max_lat_accel,
max_steer_angle):
self.yaw_controller = YawController(wheel_base,
steer_ratio,
0.1,
max_lat_accel,
max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.0
mn = 0. # minimum throttle value
mx = 0.2 #max throttle
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5 # 1/(2pi*tau) = cutoff frequency
ts = 0.02 # sample time
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
# rospy.logwarn("Angular vel: {0}".format(angular_vel))
# rospy.logwarn("Current vel: {0}".format(current_vel))
# rospy.logwarn("Target vel: {0}".format(linear_vel))
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
# could add damping --> based on target_ang_vel - current_ang_vel
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
if linear_vel == 0. and current_vel <0.1:
throttle = 0
brake = 700 # N*m to hold car in place if stopped at light; acc ~ 1 m/s^2
elif linear_vel <.1 and vel_error < 0:
throttle = 0
decel = max(vel_error, self.decel_limit)
brake = abs(decel)*self.vehicle_mass*self.wheel_radius # Torque N*m
return throttle, brake, steering
|
the-stack_0_12042 | import base64
import unittest
import zlib
from os.path import abspath, basename, dirname, join
from robot.utils.asserts import assert_equal, assert_true
from robot.utils.platform import PY2
from robot.result import Keyword, Message, TestCase, TestSuite
from robot.result.executionerrors import ExecutionErrors
from robot.model import Statistics
from robot.reporting.jsmodelbuilders import *
from robot.reporting.stringcache import StringIndex
try:
long
except NameError:
long = int
CURDIR = dirname(abspath(__file__))
def decode_string(string):
string = string if PY2 else string.encode('ASCII')
return zlib.decompress(base64.b64decode(string)).decode('UTF-8')
def remap(model, strings):
if isinstance(model, StringIndex):
if strings[model].startswith('*'):
# Strip the asterisk from a raw string.
return strings[model][1:]
return decode_string(strings[model])
elif isinstance(model, (int, long, type(None))):
return model
elif isinstance(model, tuple):
return tuple(remap(item, strings) for item in model)
else:
raise AssertionError("Item '%s' has invalid type '%s'" % (model, type(model)))
class TestBuildTestSuite(unittest.TestCase):
def test_default_suite(self):
self._verify_suite(TestSuite())
def test_suite_with_values(self):
suite = TestSuite('Name', 'Doc', {'m1': 'v1', 'M2': 'V2'}, None, 'Message',
'20111204 19:00:00.000', '20111204 19:00:42.001')
self._verify_suite(suite, 'Name', 'Doc', ('m1', '<p>v1</p>', 'M2', '<p>V2</p>'),
message='Message', start=0, elapsed=42001)
def test_relative_source(self):
self._verify_suite(TestSuite(source='non-existing'), source='non-existing')
source = join(CURDIR, 'test_jsmodelbuilders.py')
self._verify_suite(TestSuite(source=source), source=source,
relsource=basename(source))
def test_suite_html_formatting(self):
self._verify_suite(TestSuite(name='*xxx*', doc='*bold* <&>',
metadata={'*x*': '*b*', '<': '>'}),
name='*xxx*', doc='<b>bold</b> <&>',
metadata=('*x*', '<p><b>b</b></p>', '<', '<p>></p>'))
def test_default_test(self):
self._verify_test(TestCase())
def test_test_with_values(self):
test = TestCase('Name', '*Doc*', ['t1', 't2'], '1 minute', 'PASS', 'Msg',
'20111204 19:22:22.222', '20111204 19:22:22.333')
test.setup.config(kwname='setup', type='setup')
test.teardown.config(kwname='td', type='teardown')
k1 = self._verify_keyword(test.setup, type=1, kwname='setup')
k2 = self._verify_keyword(test.teardown, type=2, kwname='td')
self._verify_test(test, 'Name', '<b>Doc</b>', ('t1', 't2'),
'1 minute', 1, 'Msg', 0, 111, (k1, k2))
def test_name_escaping(self):
kw = Keyword('quote:"', 'and *url* https://url.com', '*"Doc"*',)
self._verify_keyword(kw, 0, 'quote:"', 'and *url* https://url.com', '<b>"Doc"</b>')
test = TestCase('quote:" and *url* https://url.com', '*"Doc"*',)
self._verify_test(test, 'quote:" and *url* https://url.com', '<b>"Doc"</b>')
suite = TestSuite('quote:" and *url* https://url.com', '*"Doc"*',)
self._verify_suite(suite, 'quote:" and *url* https://url.com', '<b>"Doc"</b>')
def test_default_keyword(self):
self._verify_keyword(Keyword())
def test_keyword_with_values(self):
kw = Keyword('KW Name', 'libname', 'http://doc', ('arg1', 'arg2'),
('${v1}', '${v2}'), ('tag1', 'tag2'), '1 second', 'setup',
'PASS', '20111204 19:42:42.000', '20111204 19:42:42.042')
self._verify_keyword(kw, 1, 'KW Name', 'libname',
'<a href="http://doc">http://doc</a>',
'arg1, arg2', '${v1}, ${v2}', 'tag1, tag2',
'1 second', 1, 0, 42)
def test_default_message(self):
self._verify_message(Message())
self._verify_min_message_level('INFO')
def test_message_with_values(self):
msg = Message('Message', 'DEBUG', timestamp='20111204 22:04:03.210')
self._verify_message(msg, 'Message', 1, 0)
self._verify_min_message_level('DEBUG')
def test_warning_linking(self):
msg = Message('Message', 'WARN', timestamp='20111204 22:04:03.210',
parent=TestCase().body.create_keyword())
self._verify_message(msg, 'Message', 3, 0)
links = self.context._msg_links
assert_equal(len(links), 1)
key = (msg.message, msg.level, msg.timestamp)
assert_equal(remap(links[key], self.context.strings), 't1-k1')
def test_error_linking(self):
msg = Message('ERROR Message', 'ERROR', timestamp='20150609 01:02:03.004',
parent=TestCase().body.create_keyword().body.create_keyword())
self._verify_message(msg, 'ERROR Message', 4, 0)
links = self.context._msg_links
assert_equal(len(links), 1)
key = (msg.message, msg.level, msg.timestamp)
assert_equal(remap(links[key], self.context.strings), 't1-k1-k1')
def test_message_with_html(self):
self._verify_message(Message('<img>'), '<img>')
self._verify_message(Message('<b></b>', html=True), '<b></b>')
def test_nested_structure(self):
suite = TestSuite()
suite.setup.config(kwname='setup', type='setup')
suite.teardown.config(kwname='td', type='teardown')
K1 = self._verify_keyword(suite.setup, type=1, kwname='setup')
K2 = self._verify_keyword(suite.teardown, type=2, kwname='td')
suite.suites = [TestSuite()]
suite.suites[0].tests = [TestCase(tags=['crit', 'xxx'])]
t = self._verify_test(suite.suites[0].tests[0], tags=('crit', 'xxx'))
suite.tests = [TestCase(), TestCase(status='PASS')]
S1 = self._verify_suite(suite.suites[0],
status=0, tests=(t,), stats=(1, 0, 1, 0))
suite.tests[0].body = [Keyword(type=Keyword.FOR_TYPE), Keyword()]
suite.tests[0].body[0].body = [Keyword(type=Keyword.FOR_ITEM_TYPE), Message()]
k = self._verify_keyword(suite.tests[0].body[0].body[0], type=4)
m = self._verify_message(suite.tests[0].body[0].messages[0])
k1 = self._verify_keyword(suite.tests[0].body[0], type=3, body=(k, m))
suite.tests[0].body[1].body = [Message(), Message('msg', level='TRACE')]
m1 = self._verify_message(suite.tests[0].body[1].messages[0])
m2 = self._verify_message(suite.tests[0].body[1].messages[1], 'msg', level=0)
k2 = self._verify_keyword(suite.tests[0].body[1], body=(m1, m2))
T1 = self._verify_test(suite.tests[0], body=(k1, k2))
T2 = self._verify_test(suite.tests[1], status=1)
self._verify_suite(suite, status=0, keywords=(K1, K2), suites=(S1,),
tests=(T1, T2), stats=(3, 1, 2, 0))
self._verify_min_message_level('TRACE')
def test_timestamps(self):
suite = TestSuite(starttime='20111205 00:33:33.333')
suite.setup.config(kwname='s1', starttime='20111205 00:33:33.334')
suite.setup.body.create_message('Message', timestamp='20111205 00:33:33.343')
suite.setup.body.create_message(level='DEBUG', timestamp='20111205 00:33:33.344')
suite.tests.create(starttime='20111205 00:33:34.333')
context = JsBuildingContext()
model = SuiteBuilder(context).build(suite)
self._verify_status(model[5], start=0)
self._verify_status(model[-2][0][8], start=1)
self._verify_mapped(model[-2][0][-1], context.strings,
((8, 10, 2, 'Message'), (8, 11, 1, '')))
self._verify_status(model[-3][0][4], start=1000)
def test_if(self):
test = TestSuite().tests.create()
if_ = test.body.create_if(condition='$x > 0', branch_status='NOT RUN')
else_if = if_.orelse.config(condition='$y > 0', branch_status='PASS')
else_ = else_if.orelse.config()
else_.body.create_keyword('z')
exp_if = (
5, '$x > 0', '', '', '', '', '', '', (3, None, 0), ()
)
exp_else_if = (
6, '$y > 0', '', '', '', '', '', '', (1, None, 0), ()
)
exp_else = (
7, '', '', '', '', '', '', '', (0, None, 0),
((0, 'z', '', '', '', '', '', '', (0, None, 0), ()),)
)
self._verify_test(test, body=(exp_if, exp_else_if, exp_else))
def _verify_status(self, model, status=0, start=None, elapsed=0):
assert_equal(model, (status, start, elapsed))
def _verify_suite(self, suite, name='', doc='', metadata=(), source='',
relsource='', status=2, message='', start=None, elapsed=0,
suites=(), tests=(), keywords=(), stats=(0, 0, 0, 0)):
status = (status, start, elapsed, message) \
if message else (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(SuiteBuilder, suite, name, source,
relsource, doc, metadata, status,
suites, tests, keywords, stats)
def _get_status(self, *elements):
return elements if elements[-1] else elements[:-1]
def _verify_test(self, test, name='', doc='', tags=(), timeout='',
status=0, message='', start=None, elapsed=0, body=()):
status = (status, start, elapsed, message) \
if message else (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(TestBuilder, test, name, timeout,
doc, tags, status, body)
def _verify_keyword(self, keyword, type=0, kwname='', libname='', doc='',
args='', assign='', tags='', timeout='', status=0,
start=None, elapsed=0, body=()):
status = (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(KeywordBuilder, keyword, type, kwname,
libname, timeout, doc, args, assign, tags,
status, body)
def _verify_message(self, msg, message='', level=2, timestamp=None):
return self._build_and_verify(MessageBuilder, msg, 8, timestamp, level, message)
def _verify_min_message_level(self, expected):
assert_equal(self.context.min_level, expected)
def _build_and_verify(self, builder_class, item, *expected):
self.context = JsBuildingContext(log_path=join(CURDIR, 'log.html'))
model = builder_class(self.context).build(item)
self._verify_mapped(model, self.context.strings, expected)
return expected
def _verify_mapped(self, model, strings, expected):
mapped_model = tuple(remap(model, strings))
assert_equal(mapped_model, expected)
class TestSplitting(unittest.TestCase):
def test_test_keywords(self):
suite = self._get_suite_with_tests()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-3][0][-1], expected[-3][1][-1]]
expected[-3][0][-1], expected[-3][1][-1] = 1, 2
model, context = self._build_and_remap(suite, split_log=True)
assert_equal(context.strings, ('*', '*suite', '*t1', '*t2'))
assert_equal(model, expected)
assert_equal([strings for _, strings in context.split_results],
[('*', '*t1-k1', '*t1-k1-k1', '*t1-k2'), ('*', '*t2-k1')])
assert_equal([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_suite_with_tests(self):
suite = TestSuite(name='suite')
suite.tests = [TestCase('t1'), TestCase('t2')]
suite.tests[0].body = [Keyword('t1-k1'), Keyword('t1-k2')]
suite.tests[0].body[0].body = [Keyword('t1-k1-k1')]
suite.tests[1].body = [Keyword('t2-k1')]
return suite
def _build_and_remap(self, suite, split_log=False):
context = JsBuildingContext(split_log=split_log)
model = remap(SuiteBuilder(context).build(suite), context.strings)
return self._to_list(model), context
def _to_list(self, model):
return list(self._to_list(item) if isinstance(item, tuple) else item
for item in model)
def test_suite_keywords(self):
suite = self._get_suite_with_keywords()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-2][0][-1], expected[-2][1][-1]]
expected[-2][0][-1], expected[-2][1][-1] = 1, 2
model, context = self._build_and_remap(suite, split_log=True)
assert_equal(context.strings, ('*', '*root', '*k1', '*k2'))
assert_equal(model, expected)
assert_equal([strings for _, strings in context.split_results],
[('*', '*k1-k2'), ('*',)])
assert_equal([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_suite_with_keywords(self):
suite = TestSuite(name='root')
suite.setup.config(kwname='k1')
suite.teardown.config(kwname='k2')
suite.setup.body.create_keyword('k1-k2')
return suite
def test_nested_suite_and_test_keywords(self):
suite = self._get_nested_suite_with_tests_and_keywords()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-4][0][-3][0][-1], expected[-4][0][-3][1][-1],
expected[-4][1][-3][0][-1], expected[-4][1][-2][0][-1],
expected[-2][0][-1], expected[-2][1][-1]]
(expected[-4][0][-3][0][-1], expected[-4][0][-3][1][-1],
expected[-4][1][-3][0][-1], expected[-4][1][-2][0][-1],
expected[-2][0][-1], expected[-2][1][-1]) = 1, 2, 3, 4, 5, 6
model, context = self._build_and_remap(suite, split_log=True)
assert_equal(model, expected)
assert_equal([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_nested_suite_with_tests_and_keywords(self):
suite = self._get_suite_with_keywords()
sub = TestSuite(name='suite2')
suite.suites = [self._get_suite_with_tests(), sub]
sub.setup.config(kwname='kw')
sub.setup.body.create_keyword('skw').body.create_message('Message')
sub.tests.create('test', doc='tdoc').body.create_keyword('koowee', doc='kdoc')
return suite
def test_message_linking(self):
suite = self._get_suite_with_keywords()
msg1 = suite.setup.body[0].body.create_message(
'Message 1', 'WARN', timestamp='20111204 22:04:03.210'
)
msg2 = suite.tests.create().body.create_keyword().body.create_message(
'Message 2', 'ERROR', timestamp='20111204 22:04:04.210'
)
context = JsBuildingContext(split_log=True)
SuiteBuilder(context).build(suite)
errors = ErrorsBuilder(context).build(ExecutionErrors([msg1, msg2]))
assert_equal(remap(errors, context.strings),
((8, -1000, 3, 'Message 1', 's1-k1-k1'),
(8, 0, 4, 'Message 2', 's1-t1-k1')))
assert_equal(remap(context.link(msg1), context.strings), 's1-k1-k1')
assert_equal(remap(context.link(msg2), context.strings), 's1-t1-k1')
assert_true('*s1-k1-k1' in context.strings)
assert_true('*s1-t1-k1' in context.strings)
for res in context.split_results:
assert_true('*s1-k1-k1' not in res[1])
assert_true('*s1-t1-k1' not in res[1])
class TestPruneInput(unittest.TestCase):
def setUp(self):
self.suite = TestSuite()
self.suite.setup.config(kwname='s')
self.suite.teardown.config(kwname='t')
s1 = self.suite.suites.create()
s1.setup.config(kwname='s1')
tc = s1.tests.create()
tc.setup.config(kwname='tcs')
tc.teardown.config(kwname='tct')
tc.body = [Keyword(), Keyword(), Keyword()]
tc.body[0].body = [Keyword(), Keyword(), Message(), Message(), Message()]
tc.body[0].teardown.config(kwname='kt')
s2 = self.suite.suites.create()
t1 = s2.tests.create()
t2 = s2.tests.create()
t1.body = [Keyword()]
t2.body = [Keyword(), Keyword()]
def test_no_pruning(self):
SuiteBuilder(JsBuildingContext(prune_input=False)).build(self.suite)
assert_equal(self.suite.setup.kwname, 's')
assert_equal(self.suite.teardown.kwname, 't')
assert_equal(self.suite.suites[0].setup.kwname, 's1')
assert_equal(self.suite.suites[0].teardown.kwname, None)
assert_equal(self.suite.suites[0].tests[0].setup.kwname, 'tcs')
assert_equal(self.suite.suites[0].tests[0].teardown.kwname, 'tct')
assert_equal(len(self.suite.suites[0].tests[0].body), 3)
assert_equal(len(self.suite.suites[0].tests[0].body[0].body), 5)
assert_equal(len(self.suite.suites[0].tests[0].body[0].messages), 3)
assert_equal(self.suite.suites[0].tests[0].body[0].teardown.kwname, 'kt')
assert_equal(len(self.suite.suites[1].tests[0].body), 1)
assert_equal(len(self.suite.suites[1].tests[1].body), 2)
def test_prune_suites_from_suite(self):
suite = self.suite
assert_equal(len(suite.suites), 2)
assert_equal(len(suite.tests), 0)
SuiteBuilder(JsBuildingContext(prune_input=True)).build(suite)
assert_equal(len(suite.suites), 0)
assert_equal(len(suite.tests), 0)
def test_prune_test_from_suite(self):
suite = self.suite.suites[0]
assert_equal(len(suite.suites), 0)
assert_equal(len(suite.tests), 1)
SuiteBuilder(JsBuildingContext(prune_input=True)).build(suite)
assert_equal(len(suite.suites), 0)
assert_equal(len(suite.tests), 0)
def test_prune_test(self):
test = self.suite.suites[0].tests[0]
assert_equal(len(test.body), 3)
TestBuilder(JsBuildingContext(prune_input=True)).build(test)
assert_equal(len(test.body), 0)
def test_prune_keyword(self):
kw = self.suite.suites[0].tests[0].body[0]
assert_equal(len(kw.body), 5)
assert_equal(len(kw.messages), 3)
KeywordBuilder(JsBuildingContext(prune_input=True)).build(kw)
assert_equal(len(kw.body), 0)
assert_equal(len(kw.messages), 0)
def test_prune_errors(self):
errors = ExecutionErrors([Message(), Message()])
ErrorsBuilder(JsBuildingContext(prune_input=False)).build(errors)
assert_equal(len(errors), 2)
ErrorsBuilder(JsBuildingContext(prune_input=True)).build(errors)
assert_equal(len(errors), 0)
class TestBuildStatistics(unittest.TestCase):
def test_total_stats(self):
all = self._build_statistics()[0][0]
self._verify_stat(all, 2, 2, 1, 'All Tests', '00:00:33')
def test_tag_stats(self):
stats = self._build_statistics()[1]
comb, t1, t2, t3 = self._build_statistics()[1]
self._verify_stat(t2, 2, 0, 0, 't2', '00:00:22',
doc='doc', links='t:url')
self._verify_stat(comb, 2, 0, 0, 'name', '00:00:22',
info='combined', combined='t1&t2')
self._verify_stat(t1, 2, 2, 0, 't1', '00:00:33')
self._verify_stat(t3, 0, 1, 1, 't3', '00:00:01')
def test_suite_stats(self):
root, sub1, sub2 = self._build_statistics()[2]
self._verify_stat(root, 2, 2, 1, 'root', '00:00:42', name='root', id='s1')
self._verify_stat(sub1, 1, 1, 1, 'root.sub1', '00:00:10', name='sub1', id='s1-s1')
self._verify_stat(sub2, 1, 1, 0, 'root.sub2', '00:00:30', name='sub2', id='s1-s2')
def _build_statistics(self):
return StatisticsBuilder().build(self._get_statistics())
def _get_statistics(self):
return Statistics(self._get_suite(),
suite_stat_level=2,
tag_stat_combine=[('t1&t2', 'name')],
tag_doc=[('t2', 'doc')],
tag_stat_link=[('?2', 'url', '%1')])
def _get_suite(self):
ts = lambda s, ms=0: '20120816 16:09:%02d.%03d' % (s, ms)
suite = TestSuite(name='root', starttime=ts(0), endtime=ts(42))
sub1 = TestSuite(name='sub1', starttime=ts(0), endtime=ts(10))
sub2 = TestSuite(name='sub2')
suite.suites = [sub1, sub2]
sub1.tests = [
TestCase(tags=['t1', 't2'], status='PASS', starttime=ts(0), endtime=ts(1, 500)),
TestCase(tags=['t1', 't3'], status='FAIL', starttime=ts(2), endtime=ts(3, 499)),
TestCase(tags=['t3'], status='SKIP', starttime=ts(3, 560), endtime=ts(3, 560))
]
sub2.tests = [
TestCase(tags=['t1', 't2'], status='PASS', starttime=ts(10), endtime=ts(30))
]
sub2.suites.create(name='below suite stat level')\
.tests.create(tags=['t1'], status='FAIL', starttime=ts(30), endtime=ts(40))
return suite
def _verify_stat(self, stat, pass_, fail, skip, label, elapsed, **attrs):
attrs.update({'pass': pass_, 'fail': fail, 'skip': skip,
'label': label, 'elapsed': elapsed})
assert_equal(stat, attrs)
class TestBuildErrors(unittest.TestCase):
def setUp(self):
msgs = [Message('Error', 'ERROR', timestamp='20111206 14:33:00.000'),
Message('Warning', 'WARN', timestamp='20111206 14:33:00.042')]
self.errors = ExecutionErrors(msgs)
def test_errors(self):
context = JsBuildingContext()
model = ErrorsBuilder(context).build(self.errors)
model = remap(model, context.strings)
assert_equal(model, ((8, 0, 4, 'Error'), (8, 42, 3, 'Warning')))
def test_linking(self):
self.errors.messages.create('Linkable', 'WARN',
timestamp='20111206 14:33:00.001')
context = JsBuildingContext()
msg = TestSuite().tests.create().body.create_keyword().body.create_message(
'Linkable', 'WARN', timestamp='20111206 14:33:00.001'
)
MessageBuilder(context).build(msg)
model = ErrorsBuilder(context).build(self.errors)
model = remap(model, context.strings)
assert_equal(model, ((8, -1, 4, 'Error'),
(8, 41, 3, 'Warning'),
(8, 0, 3, 'Linkable', 's1-t1-k1')))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12043 | # Copyright (c) 2020 original authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from expertai.nlapi.v1 import constants
from expertai.nlapi.v1.errors import ExpertAiRequestError, MissingParametersError
from expertai.nlapi.v1.object_mapper import ObjectMapper
from expertai.nlapi.v1.request import ExpertAiRequest
from expertai.nlapi.v1.response import ExpertAiResponse
from expertai.nlapi.v1.validate import ExpertAiValidation
class ExpertAiClient:
def __init__(self):
self.response_class = ExpertAiResponse
self._endpoint_path = ""
def urlpath_keywords(self, endpoint_path):
return re.findall(r"\{(\w+)\}", endpoint_path)
def verify_request(self, endpoint_path, **kwargs):
"""
Verify that the user has set all the required parameters.
Some of the endpoint url paths are parameterised, therefore
the user has to provide some value when setting up the
endpoint method
"""
required_params = self.urlpath_keywords(endpoint_path)
if not required_params:
return
params = kwargs.get("params") or {}
missing_params = set(required_params).difference(set(params.keys()))
if required_params and missing_params:
raise MissingParametersError(
"Missing request parameters: {}".format(
",".join(*[missing_params])
)
)
ExpertAiValidation().check_parameters(params=params)
def get_method_name_for_endpoint(self, endpoint_path):
return dict(constants.URLS_AND_METHODS).get(endpoint_path)
def create_request(self, endpoint_path, params=None, body=None):
http_method_name = self.get_method_name_for_endpoint(endpoint_path)
if params:
self.verify_request(endpoint_path, params=params)
endpoint_path = endpoint_path.format(**params)
return ExpertAiRequest(
endpoint_path=endpoint_path,
http_method_name=http_method_name,
body=body,
)
def process_response(self, response):
if not response.successful:
raise ExpertAiRequestError(
"Response status code: {}".format(response.status_code)
)
elif response.bad_request:
return ExpertAiRequestError(
response.bad_request_message(response.json)
)
return ObjectMapper().read_json(response.json)
def full_analysis(self, params, body):
request = self.create_request(
endpoint_path=constants.FULL_ANALYSIS_PATH,
params=params,
body=body,
)
response = self.response_class(response=request.send())
return self.process_response(response)
def specific_resource_analysis(self, params, body):
request = self.create_request(
endpoint_path=constants.SPECIFIC_RESOURCE_ANALYSIS_PATH,
params=params,
body=body,
)
response = self.response_class(response=request.send())
return self.process_response(response)
def iptc_media_topics_classification(self, params, body):
request = self.create_request(
endpoint_path=constants.IPTC_MEDIA_TOPICS_CLASSIFICATION_PATH,
params=params,
body=body,
)
response = self.response_class(response=request.send())
return self.process_response(response)
def contexts(self):
request = self.create_request(endpoint_path=constants.CONTEXTS_PATH)
response = self.response_class(response=request.send())
return self.process_response(response)
def contexts_standard(self):
request = self.create_request(
endpoint_path=constants.CONTEXTS_STANDARD_PATH
)
response = self.response_class(response=request.send())
return self.process_response(response)
def iptc_taxonomies_list(self):
request = self.create_request(
endpoint_path=constants.TAXONOMIES_LIST_PATH
)
response = self.response_class(response=request.send())
return self.process_response(response)
def iptc_taxonomies(self):
request = self.create_request(
endpoint_path=constants.IPTC_TAXONOMIES_PATH
)
response = self.response_class(response=request.send())
return self.process_response(response)
|
the-stack_0_12044 | """
Renderer Module
This module defines the PlotlyRenderer class and a single function,
fig_to_plotly, which is intended to be the main way that user's will interact
with the matplotlylib package.
"""
from __future__ import absolute_import
import six
import warnings
import plotly.graph_objs as go
from plotly.matplotlylib.mplexporter import Renderer
from plotly.matplotlylib import mpltools
# Warning format
def warning_on_one_line(msg, category, filename, lineno, file=None, line=None):
return "%s:%s: %s:\n\n%s\n\n" % (filename, lineno, category.__name__, msg)
warnings.formatwarning = warning_on_one_line
class PlotlyRenderer(Renderer):
"""A renderer class inheriting from base for rendering mpl plots in plotly.
A renderer class to be used with an exporter for rendering matplotlib
plots in Plotly. This module defines the PlotlyRenderer class which handles
the creation of the JSON structures that get sent to plotly.
All class attributes available are defined in __init__().
Basic Usage:
# (mpl code) #
fig = gcf()
renderer = PlotlyRenderer(fig)
exporter = Exporter(renderer)
exporter.run(fig) # ... et voila
"""
def __init__(self):
"""Initialize PlotlyRenderer obj.
PlotlyRenderer obj is called on by an Exporter object to draw
matplotlib objects like figures, axes, text, etc.
All class attributes are listed here in the __init__ method.
"""
self.plotly_fig = go.Figure()
self.mpl_fig = None
self.current_mpl_ax = None
self.bar_containers = None
self.current_bars = []
self.axis_ct = 0
self.x_is_mpl_date = False
self.mpl_x_bounds = (0, 1)
self.mpl_y_bounds = (0, 1)
self.msg = "Initialized PlotlyRenderer\n"
def open_figure(self, fig, props):
"""Creates a new figure by beginning to fill out layout dict.
The 'autosize' key is set to false so that the figure will mirror
sizes set by mpl. The 'hovermode' key controls what shows up when you
mouse around a figure in plotly, it's set to show the 'closest' point.
Positional agurments:
fig -- a matplotlib.figure.Figure object.
props.keys(): [
'figwidth',
'figheight',
'dpi'
]
"""
self.msg += "Opening figure\n"
self.mpl_fig = fig
self.plotly_fig["layout"] = go.Layout(
width=int(props["figwidth"] * props["dpi"]),
height=int(props["figheight"] * props["dpi"]),
autosize=False,
hovermode="closest",
)
self.mpl_x_bounds, self.mpl_y_bounds = mpltools.get_axes_bounds(fig)
margin = go.layout.Margin(
l=int(self.mpl_x_bounds[0] * self.plotly_fig["layout"]["width"]),
r=int((1 - self.mpl_x_bounds[1]) * self.plotly_fig["layout"]["width"]),
t=int((1 - self.mpl_y_bounds[1]) * self.plotly_fig["layout"]["height"]),
b=int(self.mpl_y_bounds[0] * self.plotly_fig["layout"]["height"]),
pad=0,
)
self.plotly_fig["layout"]["margin"] = margin
def close_figure(self, fig):
"""Closes figure by cleaning up data and layout dictionaries.
The PlotlyRenderer's job is to create an appropriate set of data and
layout dictionaries. When the figure is closed, some cleanup and
repair is necessary. This method removes inappropriate dictionary
entries, freeing up Plotly to use defaults and best judgements to
complete the entries. This method is called by an Exporter object.
Positional arguments:
fig -- a matplotlib.figure.Figure object.
"""
self.plotly_fig["layout"]["showlegend"] = False
self.msg += "Closing figure\n"
def open_axes(self, ax, props):
"""Setup a new axes object (subplot in plotly).
Plotly stores information about subplots in different 'xaxis' and
'yaxis' objects which are numbered. These are just dictionaries
included in the layout dictionary. This function takes information
from the Exporter, fills in appropriate dictionary entries,
and updates the layout dictionary. PlotlyRenderer keeps track of the
number of plots by incrementing the axis_ct attribute.
Setting the proper plot domain in plotly is a bit tricky. Refer to
the documentation for mpltools.convert_x_domain and
mpltools.convert_y_domain.
Positional arguments:
ax -- an mpl axes object. This will become a subplot in plotly.
props.keys() -- [
'axesbg', (background color for axes obj)
'axesbgalpha', (alpha, or opacity for background)
'bounds', ((x0, y0, width, height) for axes)
'dynamic', (zoom/pan-able?)
'axes', (list: [xaxis, yaxis])
'xscale', (log, linear, or date)
'yscale',
'xlim', (range limits for x)
'ylim',
'xdomain' (xdomain=xlim, unless it's a date)
'ydomain'
]
"""
self.msg += " Opening axes\n"
self.current_mpl_ax = ax
self.bar_containers = [
c
for c in ax.containers # empty is OK
if c.__class__.__name__ == "BarContainer"
]
self.current_bars = []
self.axis_ct += 1
# set defaults in axes
xaxis = go.layout.XAxis(
anchor="y{0}".format(self.axis_ct), zeroline=False, ticks="inside"
)
yaxis = go.layout.YAxis(
anchor="x{0}".format(self.axis_ct), zeroline=False, ticks="inside"
)
# update defaults with things set in mpl
mpl_xaxis, mpl_yaxis = mpltools.prep_xy_axis(
ax=ax, props=props, x_bounds=self.mpl_x_bounds, y_bounds=self.mpl_y_bounds
)
xaxis.update(mpl_xaxis)
yaxis.update(mpl_yaxis)
bottom_spine = mpltools.get_spine_visible(ax, "bottom")
top_spine = mpltools.get_spine_visible(ax, "top")
left_spine = mpltools.get_spine_visible(ax, "left")
right_spine = mpltools.get_spine_visible(ax, "right")
xaxis["mirror"] = mpltools.get_axis_mirror(bottom_spine, top_spine)
yaxis["mirror"] = mpltools.get_axis_mirror(left_spine, right_spine)
xaxis["showline"] = bottom_spine
yaxis["showline"] = top_spine
# put axes in our figure
self.plotly_fig["layout"]["xaxis{0}".format(self.axis_ct)] = xaxis
self.plotly_fig["layout"]["yaxis{0}".format(self.axis_ct)] = yaxis
# let all subsequent dates be handled properly if required
if "type" in dir(xaxis) and xaxis["type"] == "date":
self.x_is_mpl_date = True
def close_axes(self, ax):
"""Close the axes object and clean up.
Bars from bar charts are given to PlotlyRenderer one-by-one,
thus they need to be taken care of at the close of each axes object.
The self.current_bars variable should be empty unless a bar
chart has been created.
Positional arguments:
ax -- an mpl axes object, not required at this time.
"""
self.draw_bars(self.current_bars)
self.msg += " Closing axes\n"
self.x_is_mpl_date = False
def draw_bars(self, bars):
# sort bars according to bar containers
mpl_traces = []
for container in self.bar_containers:
mpl_traces.append(
[
bar_props
for bar_props in self.current_bars
if bar_props["mplobj"] in container
]
)
for trace in mpl_traces:
self.draw_bar(trace)
def draw_bar(self, coll):
"""Draw a collection of similar patches as a bar chart.
After bars are sorted, an appropriate data dictionary must be created
to tell plotly about this data. Just like draw_line or draw_markers,
draw_bar translates patch/path information into something plotly
understands.
Positional arguments:
patch_coll -- a collection of patches to be drawn as a bar chart.
"""
tol = 1e-10
trace = [mpltools.make_bar(**bar_props) for bar_props in coll]
widths = [bar_props["x1"] - bar_props["x0"] for bar_props in trace]
heights = [bar_props["y1"] - bar_props["y0"] for bar_props in trace]
vertical = abs(sum(widths[0] - widths[iii] for iii in range(len(widths)))) < tol
horizontal = (
abs(sum(heights[0] - heights[iii] for iii in range(len(heights)))) < tol
)
if vertical and horizontal:
# Check for monotonic x. Can't both be true!
x_zeros = [bar_props["x0"] for bar_props in trace]
if all(
(x_zeros[iii + 1] > x_zeros[iii] for iii in range(len(x_zeros[:-1])))
):
orientation = "v"
else:
orientation = "h"
elif vertical:
orientation = "v"
else:
orientation = "h"
if orientation == "v":
self.msg += " Attempting to draw a vertical bar chart\n"
old_heights = [bar_props["y1"] for bar_props in trace]
for bar in trace:
bar["y0"], bar["y1"] = 0, bar["y1"] - bar["y0"]
new_heights = [bar_props["y1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_heights, new_heights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "x"
x = [bar["x0"] + (bar["x1"] - bar["x0"]) / 2 for bar in trace]
y = [bar["y1"] for bar in trace]
bar_gap = mpltools.get_bar_gap(
[bar["x0"] for bar in trace], [bar["x1"] for bar in trace]
)
if self.x_is_mpl_date:
x = [bar["x0"] for bar in trace]
formatter = (
self.current_mpl_ax.get_xaxis()
.get_major_formatter()
.__class__.__name__
)
x = mpltools.mpl_dates_to_datestrings(x, formatter)
else:
self.msg += " Attempting to draw a horizontal bar chart\n"
old_rights = [bar_props["x1"] for bar_props in trace]
for bar in trace:
bar["x0"], bar["x1"] = 0, bar["x1"] - bar["x0"]
new_rights = [bar_props["x1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_rights, new_rights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "y"
x = [bar["x1"] for bar in trace]
y = [bar["y0"] + (bar["y1"] - bar["y0"]) / 2 for bar in trace]
bar_gap = mpltools.get_bar_gap(
[bar["y0"] for bar in trace], [bar["y1"] for bar in trace]
)
bar = go.Bar(
orientation=orientation,
x=x,
y=y,
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
opacity=trace[0]["alpha"], # TODO: get all alphas if array?
marker=go.bar.Marker(
color=trace[0]["facecolor"], # TODO: get all
line=dict(width=trace[0]["edgewidth"]),
),
) # TODO ditto
if len(bar["x"]) > 1:
self.msg += " Heck yeah, I drew that bar chart\n"
self.plotly_fig.add_trace(bar),
if bar_gap is not None:
self.plotly_fig["layout"]["bargap"] = bar_gap
else:
self.msg += " Bar chart not drawn\n"
warnings.warn(
"found box chart data with length <= 1, "
"assuming data redundancy, not plotting."
)
def draw_marked_line(self, **props):
"""Create a data dict for a line obj.
This will draw 'lines', 'markers', or 'lines+markers'.
props.keys() -- [
'coordinates', ('data', 'axes', 'figure', or 'display')
'data', (a list of xy pairs)
'mplobj', (the matplotlib.lines.Line2D obj being rendered)
'label', (the name of the Line2D obj being rendered)
'linestyle', (linestyle dict, can be None, see below)
'markerstyle', (markerstyle dict, can be None, see below)
]
props['linestyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'color', (color of the line if it exists, not the marker)
'linewidth',
'dasharray', (code for linestyle, see DASH_MAP in mpltools.py)
'zorder', (viewing precedence when stacked with other objects)
]
props['markerstyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'marker', (the mpl marker symbol, see SYMBOL_MAP in mpltools.py)
'facecolor', (color of the marker face)
'edgecolor', (color of the marker edge)
'edgewidth', (width of marker edge)
'markerpath', (an SVG path for drawing the specified marker)
'zorder', (viewing precedence when stacked with other objects)
]
"""
self.msg += " Attempting to draw a line "
line, marker = {}, {}
if props["linestyle"] and props["markerstyle"]:
self.msg += "... with both lines+markers\n"
mode = "lines+markers"
elif props["linestyle"]:
self.msg += "... with just lines\n"
mode = "lines"
elif props["markerstyle"]:
self.msg += "... with just markers\n"
mode = "markers"
if props["linestyle"]:
color = mpltools.merge_color_and_opacity(
props["linestyle"]["color"], props["linestyle"]["alpha"]
)
# print(mpltools.convert_dash(props['linestyle']['dasharray']))
line = go.scatter.Line(
color=color,
width=props["linestyle"]["linewidth"],
dash=mpltools.convert_dash(props["linestyle"]["dasharray"]),
)
if props["markerstyle"]:
marker = go.scatter.Marker(
opacity=props["markerstyle"]["alpha"],
color=props["markerstyle"]["facecolor"],
symbol=mpltools.convert_symbol(props["markerstyle"]["marker"]),
size=props["markerstyle"]["markersize"],
line=dict(
color=props["markerstyle"]["edgecolor"],
width=props["markerstyle"]["edgewidth"],
),
)
if props["coordinates"] == "data":
marked_line = go.Scatter(
mode=mode,
name=(
str(props["label"])
if isinstance(props["label"], six.string_types)
else props["label"]
),
x=[xy_pair[0] for xy_pair in props["data"]],
y=[xy_pair[1] for xy_pair in props["data"]],
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
line=line,
marker=marker,
)
if self.x_is_mpl_date:
formatter = (
self.current_mpl_ax.get_xaxis()
.get_major_formatter()
.__class__.__name__
)
marked_line["x"] = mpltools.mpl_dates_to_datestrings(
marked_line["x"], formatter
)
self.plotly_fig.add_trace(marked_line),
self.msg += " Heck yeah, I drew that line\n"
else:
self.msg += " Line didn't have 'data' coordinates, " "not drawing\n"
warnings.warn(
"Bummer! Plotly can currently only draw Line2D "
"objects from matplotlib that are in 'data' "
"coordinates!"
)
def draw_image(self, **props):
"""Draw image.
Not implemented yet!
"""
self.msg += " Attempting to draw image\n"
self.msg += " Not drawing image\n"
warnings.warn(
"Aw. Snap! You're gonna have to hold off on "
"the selfies for now. Plotly can't import "
"images from matplotlib yet!"
)
def draw_path_collection(self, **props):
"""Add a path collection to data list as a scatter plot.
Current implementation defaults such collections as scatter plots.
Matplotlib supports collections that have many of the same parameters
in common like color, size, path, etc. However, they needn't all be
the same. Plotly does not currently support such functionality and
therefore, the style for the first object is taken and used to define
the remaining paths in the collection.
props.keys() -- [
'paths', (structure: [vertices, path_code])
'path_coordinates', ('data', 'axes', 'figure', or 'display')
'path_transforms', (mpl transform, including Affine2D matrix)
'offsets', (offset from axes, helpful if in 'data')
'offset_coordinates', ('data', 'axes', 'figure', or 'display')
'offset_order',
'styles', (style dict, see below)
'mplobj' (the collection obj being drawn)
]
props['styles'].keys() -- [
'linewidth', (one or more linewidths)
'facecolor', (one or more facecolors for path)
'edgecolor', (one or more edgecolors for path)
'alpha', (one or more opacites for path)
'zorder', (precedence when stacked)
]
"""
self.msg += " Attempting to draw a path collection\n"
if props["offset_coordinates"] is "data":
markerstyle = mpltools.get_markerstyle_from_collection(props)
scatter_props = {
"coordinates": "data",
"data": props["offsets"],
"label": None,
"markerstyle": markerstyle,
"linestyle": None,
}
self.msg += " Drawing path collection as markers\n"
self.draw_marked_line(**scatter_props)
else:
self.msg += " Path collection not linked to 'data', " "not drawing\n"
warnings.warn(
"Dang! That path collection is out of this "
"world. I totally don't know what to do with "
"it yet! Plotly can only import path "
"collections linked to 'data' coordinates"
)
def draw_path(self, **props):
"""Draw path, currently only attempts to draw bar charts.
This function attempts to sort a given path into a collection of
horizontal or vertical bar charts. Most of the actual code takes
place in functions from mpltools.py.
props.keys() -- [
'data', (a list of verticies for the path)
'coordinates', ('data', 'axes', 'figure', or 'display')
'pathcodes', (code for the path, structure: ['M', 'L', 'Z', etc.])
'style', (style dict, see below)
'mplobj' (the mpl path object)
]
props['style'].keys() -- [
'alpha', (opacity of path obj)
'edgecolor',
'facecolor',
'edgewidth',
'dasharray', (style for path's enclosing line)
'zorder' (precedence of obj when stacked)
]
"""
self.msg += " Attempting to draw a path\n"
is_bar = mpltools.is_bar(self.current_mpl_ax.containers, **props)
if is_bar:
self.current_bars += [props]
else:
self.msg += " This path isn't a bar, not drawing\n"
warnings.warn(
"I found a path object that I don't think is part "
"of a bar chart. Ignoring."
)
def draw_text(self, **props):
"""Create an annotation dict for a text obj.
Currently, plotly uses either 'page' or 'data' to reference
annotation locations. These refer to 'display' and 'data',
respectively for the 'coordinates' key used in the Exporter.
Appropriate measures are taken to transform text locations to
reference one of these two options.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw an mpl text object\n"
if not mpltools.check_corners(props["mplobj"], self.mpl_fig):
warnings.warn(
"Looks like the annotation(s) you are trying \n"
"to draw lies/lay outside the given figure size.\n\n"
"Therefore, the resulting Plotly figure may not be \n"
"large enough to view the full text. To adjust \n"
"the size of the figure, use the 'width' and \n"
"'height' keys in the Layout object. Alternatively,\n"
"use the Margin object to adjust the figure's margins."
)
align = props["mplobj"]._multialignment
if not align:
align = props["style"]["halign"] # mpl default
if "annotations" not in self.plotly_fig["layout"]:
self.plotly_fig["layout"]["annotations"] = []
if props["text_type"] == "xlabel":
self.msg += " Text object is an xlabel\n"
self.draw_xlabel(**props)
elif props["text_type"] == "ylabel":
self.msg += " Text object is a ylabel\n"
self.draw_ylabel(**props)
elif props["text_type"] == "title":
self.msg += " Text object is a title\n"
self.draw_title(**props)
else: # just a regular text annotation...
self.msg += " Text object is a normal annotation\n"
if props["coordinates"] is not "data":
self.msg += (
" Text object isn't linked to 'data' " "coordinates\n"
)
x_px, y_px = (
props["mplobj"].get_transform().transform(props["position"])
)
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
else:
self.msg += " Text object is linked to 'data' " "coordinates\n"
x, y = props["position"]
axis_ct = self.axis_ct
xaxis = self.plotly_fig["layout"]["xaxis{0}".format(axis_ct)]
yaxis = self.plotly_fig["layout"]["yaxis{0}".format(axis_ct)]
if (
xaxis["range"][0] < x < xaxis["range"][1]
and yaxis["range"][0] < y < yaxis["range"][1]
):
xref = "x{0}".format(self.axis_ct)
yref = "y{0}".format(self.axis_ct)
else:
self.msg += (
" Text object is outside "
"plotting area, making 'paper' reference.\n"
)
x_px, y_px = (
props["mplobj"].get_transform().transform(props["position"])
)
x, y = mpltools.display_to_paper(
x_px, y_px, self.plotly_fig["layout"]
)
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
annotation = go.layout.Annotation(
text=(
str(props["text"])
if isinstance(props["text"], six.string_types)
else props["text"]
),
opacity=props["style"]["alpha"],
x=x,
y=y,
xref=xref,
yref=yref,
align=align,
xanchor=xanchor,
yanchor=yanchor,
showarrow=False, # change this later?
font=go.layout.annotation.Font(
color=props["style"]["color"], size=props["style"]["fontsize"]
),
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
self.msg += " Heck, yeah I drew that annotation\n"
def draw_title(self, **props):
"""Add a title to the current subplot in layout dictionary.
If there exists more than a single plot in the figure, titles revert
to 'page'-referenced annotations.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw a title\n"
if len(self.mpl_fig.axes) > 1:
self.msg += (
" More than one subplot, adding title as " "annotation\n"
)
x_px, y_px = props["mplobj"].get_transform().transform(props["position"])
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
annotation = go.layout.Annotation(
text=props["text"],
font=go.layout.annotation.Font(
color=props["style"]["color"], size=props["style"]["fontsize"]
),
xref="paper",
yref="paper",
x=x,
y=y,
xanchor="center",
yanchor="bottom",
showarrow=False, # no arrow for a title!
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
else:
self.msg += (
" Only one subplot found, adding as a " "plotly title\n"
)
self.plotly_fig["layout"]["title"] = props["text"]
titlefont = dict(
size=props["style"]["fontsize"], color=props["style"]["color"]
)
self.plotly_fig["layout"]["titlefont"] = titlefont
def draw_xlabel(self, **props):
"""Add an xaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding xlabel\n"
axis_key = "xaxis{0}".format(self.axis_ct)
self.plotly_fig["layout"][axis_key]["title"] = str(props["text"])
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"][axis_key]["titlefont"] = titlefont
def draw_ylabel(self, **props):
"""Add a yaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding ylabel\n"
axis_key = "yaxis{0}".format(self.axis_ct)
self.plotly_fig["layout"][axis_key]["title"] = props["text"]
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"][axis_key]["titlefont"] = titlefont
def resize(self):
"""Revert figure layout to allow plotly to resize.
By default, PlotlyRenderer tries its hardest to precisely mimic an
mpl figure. However, plotly is pretty good with aesthetics. By
running PlotlyRenderer.resize(), layout parameters are deleted. This
lets plotly choose them instead of mpl.
"""
self.msg += "Resizing figure, deleting keys from layout\n"
for key in ["width", "height", "autosize", "margin"]:
try:
del self.plotly_fig["layout"][key]
except (KeyError, AttributeError):
pass
def strip_style(self):
self.msg += "Stripping mpl style is no longer supported\n"
|
the-stack_0_12046 | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
"""
gh_lists.py MILESTONE
Functions for Github API requests.
"""
from __future__ import print_function, division, absolute_import
import os
import re
import sys
import json
import collections
import argparse
from urllib2 import urlopen
Issue = collections.namedtuple('Issue', ('id', 'title', 'url'))
def main():
p = argparse.ArgumentParser(usage=__doc__.lstrip())
p.add_argument('--project', default='holgern/pyedflib')
p.add_argument('milestone')
args = p.parse_args()
getter = CachedGet('gh_cache.json')
try:
milestones = get_milestones(getter, args.project)
if args.milestone not in milestones:
msg = "Milestone {0} not available. Available milestones: {1}"
msg = msg.format(args.milestone, u", ".join(sorted(milestones)))
p.error(msg)
issues = get_issues(getter, args.project, args.milestone)
issues.sort()
finally:
getter.save()
prs = [x for x in issues if u'/pull/' in x.url]
issues = [x for x in issues if x not in prs]
def print_list(title, items):
print()
print(title)
print("-"*len(title))
print()
for issue in items:
msg = u"- `#{0} <{1}>`__: {2}"
title = re.sub(u"\s+", u" ", issue.title.strip())
if len(title) > 60:
remainder = re.sub(u"\s.*$", u"...", title[60:])
if len(remainder) > 20:
remainder = title[:80] + u"..."
else:
title = title[:60] + remainder
msg = msg.format(issue.id, issue.url, title)
print(msg)
print()
msg = u"Issues closed for {0}".format(args.milestone)
print_list(msg, issues)
msg = u"Pull requests for {0}".format(args.milestone)
print_list(msg, prs)
return 0
def get_milestones(getter, project):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
raw_data, info = getter.get(url)
data = json.loads(raw_data)
milestones = {}
for ms in data:
milestones[ms[u'title']] = ms[u'number']
return milestones
def get_issues(getter, project, milestone):
milestones = get_milestones(getter, project)
mid = milestones[milestone]
url = "https://api.github.com/repos/{project}/issues?milestone={mid}&state=closed&sort=created&direction=asc"
url = url.format(project=project, mid=mid)
raw_datas = []
while True:
raw_data, info = getter.get(url)
raw_datas.append(raw_data)
if 'link' not in info:
break
m = re.search('<(.*?)>; rel="next"', info['link'])
if m:
url = m.group(1)
continue
break
issues = []
for raw_data in raw_datas:
data = json.loads(raw_data)
for issue_data in data:
issues.append(Issue(issue_data[u'number'],
issue_data[u'title'],
issue_data[u'html_url']))
return issues
class CachedGet(object):
def __init__(self, filename):
self.filename = filename
if os.path.isfile(filename):
print("[gh_lists] using {0} as cache (remove it if you want fresh data)".format(filename),
file=sys.stderr)
with open(filename, 'rb') as f:
self.cache = json.load(f)
else:
self.cache = {}
def get(self, url):
url = unicode(url)
if url not in self.cache:
print("[gh_lists] get:", url, file=sys.stderr)
req = urlopen(url)
if req.getcode() != 200:
raise RuntimeError()
data = req.read()
info = dict(req.info())
self.cache[url] = (data, info)
req.close()
else:
print("[gh_lists] get (cached):", url, file=sys.stderr)
return self.cache[url]
def save(self):
tmp = self.filename + ".new"
with open(tmp, 'wb') as f:
json.dump(self.cache, f)
os.rename(tmp, self.filename)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_12049 | import os
import numpy as np
import music21
import math
# Parameter n_pitch: Pitch-Range reicht von 0 bis 127
MAX_PITCH = 128
# Parameter d_[duration]_[dots]:
SIGN_DUR = "d"
# Parameter v_[velocity]: Lautstärke der folgenden Noten, reicht von 4, 8, 12, ... bis 128 (in 4er-Schritten)
SIGN_VELO = "v"
MIN_VELO = 0
MAX_VELO = 128
# Parameter t_[tempo]: Tempo der folgenden Noten, reicht von 24, 28, 32, ... bis 160 (in 4er-Schritten)
SIGN_TEMP0 = "t"
MIN_TEMP0 = 24
MAX_TEMPO = 128
# Zeichen zur Markierung des Ende des Stücks (End Of File)
SIGN_EOF = "\n"
# neue Note
SIGN_NOTE = "n"
# Zeichen für die Wait-Zeit
SIGN_WAIT = "w"
# 3-punktierte Halbe und 3-punktierte 32-tel
THREE_DOTTED_BREVE = 15
THREE_DOTTED_32ND = 0.21875
def load_midi(data_path, sample_freq=4, piano_range=(33, 93), transpo_range=10, stretching_range=10):
text = ""
vocab = set()
if os.path.isfile(data_path):
# gegebener Pfad ist eine einzelne Midi-Datei
file_extension = os.path.splitext(data_path)[1]
if file_extension == ".midi" or file_extension == ".mid":
text = parse_midi(file_path=data_path, piano_range=piano_range, sample_freq=sample_freq,
transpo_range=transpo_range, stretching_range=stretching_range)
vocab = set(text.split(" "))
else:
# Lade jede Datei einzeln
for file in os.listdir(data_path):
file_path = os.path.join(data_path, file)
file_extension = os.path.splitext(file_path)[1]
# Prüfen, ob der file_path kein weiterer Ordner ist und ob die Dateiendung passt (.mid oder .midi)
if os.path.isfile(file_path) and (file_extension == ".midi" or file_extension == ".mid"):
encoded_midi = parse_midi(file_path=file_path, piano_range=piano_range, sample_freq=sample_freq,
transpo_range=transpo_range, stretching_range=stretching_range)
if len(encoded_midi) > 0:
words = set(encoded_midi.split(" "))
vocab = vocab | words
text += encoded_midi + " "
# letztes Leerzeichen wird entfernt
text = text[:-1]
return text, vocab
def parse_midi(file_path: str, piano_range, sample_freq, transpo_range, stretching_range):
midi_file_path = None
print(f"> Parse MIDI-File: {file_path}")
# Als Parameter kann auch eine Datei mit Pfad übergeben werden:
midi_dir = os.path.dirname(file_path)
midi_name = os.path.basename(file_path).split(".") [0]
# Falls eine txt-Datei von dieser Midi-Datei bereits existiert, wird diese geladen
midi_txt_name = os.path.join(midi_dir, midi_name + ".txt")
if (os.path.isfile(midi_txt_name)):
midi_file_path = open(midi_txt_name, "r")
encoded_midi = midi_file_path.read()
else:
# Lade mit Music21 die Midi-Datei
midi = music21.midi.MidiFile()
midi.open(file_path)
midi.read()
midi.close()
# Konvertierung der Midi-Datei in Liste mit Noten und Akkorden
encoded_midi = midi_to_encoded(midifile=midi, piano_range=piano_range, sample_freq=sample_freq,
transpo_range=transpo_range, stretching_range=stretching_range)
if len(encoded_midi) > 0:
# neue txt-Datei erzeugen
midi_file_path = open(midi_txt_name, "w+")
midi_file_path.write(encoded_midi)
midi_file_path.flush()
if midi_file_path: midi_file_path.close()
return encoded_midi
def midi_to_encoded(midifile, piano_range, sample_freq, transpo_range, stretching_range):
try:
stream = music21.midi.translate.midiFileToStream(midifile)
except:
return []
piano_roll = midi_to_piano_roll(midi_stream=stream, sample_freq=sample_freq, piano_range=piano_range,
transpo_range=transpo_range, stretching_range=stretching_range)
encoded = piano_roll_to_encoded(piano_roll)
return " ".join(encoded)
def piano_roll_to_encoded(piano_roll):
# Konvertierung der piano_roll in eine Liste mit Strings, die die Noten darstellen sollen
encoded = {}
counter = 0
for version in piano_roll:
# letztes Tempo, Geschwindigkeit und Dauer auf -1 setzen
_tempo = -1
_velo = -1
_duration = -1.0
version_encoded = []
for i in range(len(version)):
# die letzten Noten sind in der letzten Reihe gespeichert
tempo = version[i, -1][0]
# neues Tempo wird hinzugefügt
if tempo != 0 and tempo != _tempo:
version_encoded.append(SIGN_TEMP0 + "_" + str(int(tempo)))
_tempo = tempo
# Fahre mit dem aktuellen Time Step fort
for next_step in range(len(version[i]) -1):
duration = version[i, next_step][0]
velo = int(version[i, next_step][1])
# neues Tempo
if velo != 0 and velo != _velo:
version_encoded.append(SIGN_VELO + "_" + str(velo))
_velo = velo
# neue Duration
if duration != 0 and duration != _duration:
duration_tuple = music21.duration.durationTupleFromQuarterLength(duration)
version_encoded.append(SIGN_DUR + "_" + duration_tuple.type + "_" + str(duration_tuple.dots))
_duration = duration
# neue Note wird hinzugefügt
if velo != 0 and duration != 0:
version_encoded.append(SIGN_NOTE + "_" + str(next_step))
# Ende dieses Zeitabschnittes
if (len(version_encoded) > 0) and version_encoded[-1][0] == SIGN_WAIT:
# 'Warte'-Zeit wird um 1 erhöht
version_encoded[-1] = "w_" + str(int(version_encoded[-1].split("_")[1]) + 1)
else:
version_encoded.append("w_1")
# Ende des Stücks markieren
version_encoded.append(SIGN_EOF)
# Check, ob diese Version der MIDI-Datei nicht schon mal hinzugefügt wurde
version_encoded_str = " ".join(version_encoded)
if version_encoded_str not in encoded:
encoded[version_encoded_str] = counter
counter += 1
return encoded.keys()
def write(encoded_midi, path):
# Erzeugt eine Midi-Datei mit dem gegebenen Midi-Daten
midi = encoded_to_midi(encoded_midi)
midi.open(path, "wb")
midi.write()
midi.close()
def encoded_to_midi(note_encoded, ts_duration=0.25):
notes = []
velo = 100
duration = "16th"
dots = 0
ts = 0
for note in note_encoded.split(" "):
if len(note) == 0:
continue
elif note[0] == SIGN_WAIT:
wait_counter = int(note.split("_")[1])
ts += wait_counter
elif note[0] == SIGN_NOTE:
pitch = int(note.split("_")[1])
note = music21.note.Note(pitch)
note.duration = music21.duration.Duration(type=duration, dots=dots)
note.offset = ts * ts_duration
note.volume.velocity = velo
notes.append(note)
elif note[0] == SIGN_DUR:
duration = note.split("_")[1]
dots = int(note.split("_")[2])
elif note[0] == SIGN_VELO:
velo = int(note.split("_")[1])
elif note[0] == SIGN_TEMP0:
if note.split("_")[1] != "":
tempo = int(note.split("_")[1])
if tempo > 0:
mark = music21.tempo.MetronomeMark(number=tempo)
mark.offset = ts * ts_duration
notes.append(mark)
piano = music21.instrument.fromString("Piano")
notes.insert(0, piano)
piano_stream = music21.stream.Stream(notes)
main_stream = music21.stream.Stream([piano_stream])
midi_file = music21.midi.translate.streamToMidiFile(main_stream)
return midi_file
def midi_parse_notes(midi_stream, sample_freq):
note_filter = music21.stream.filters.ClassFilter('Note')
events = []
notes_list = midi_stream.recurse().addFilter(note_filter)
for note in notes_list:
pitch = note.pitch.midi
dur = note.duration.quarterLength
velo = note.volume.velocity
# Abrunden
offset = math.floor(note.offset * sample_freq)
events.append((pitch, dur, velo, offset))
return events
def midi_parse_chords(midi_stream, sample_freq):
chord_filter = music21.stream.filters.ClassFilter('Chord')
events = []
chords_list = midi_stream.recurse().addFilter(chord_filter)
for chord in chords_list:
pitches_in_chord = chord.pitches
for p in pitches_in_chord:
pitch = p.midi
dur = chord.duration.quarterLength
velo = chord.volume.velocity
offset = math.floor(chord.offset * sample_freq)
events.append((pitch, dur, velo, offset))
return events
def midi_parse_metronome(midi_stream, sample_freq):
metro_filter = music21.stream.filters.ClassFilter('MetronomeMark')
events = []
metro_list = midi_stream.recurse().addFilter(metro_filter)
for metro in metro_list:
time = int(metro.number)
offset = math.floor(metro.offset * sample_freq)
events.append((time, offset))
return events
def midi_to_notes(midi_stream, sample_freq, transpo_range):
notes = []
notes += midi_parse_notes(midi_stream=midi_stream, sample_freq=sample_freq)
notes += midi_parse_chords(midi_stream=midi_stream, sample_freq=sample_freq)
# Transponieren aller Noten in die gewünschte Lage
transposed_notes = transpose_notes(notes, transpo_range)
return transposed_notes
def transpose_notes(notes, transpo_range):
transpos = []
first_key = -math.floor(transpo_range/2)
last_key = math.ceil(transpo_range/2)
for key in range(first_key, last_key):
notes_in_key = []
for n in notes:
pitch, dur, velo, offset = n
new_pitch = pitch + key
notes_in_key.append((new_pitch, dur, velo, offset))
transpos.append(notes_in_key)
return transpos
def midi_to_piano_roll(midi_stream, sample_freq, piano_range, transpo_range, stretching_range):
# Anzahl time_steps im Piano-Roll berechnen
time_steps = math.floor(midi_stream.duration.quarterLength * sample_freq) + 1
# Midi-Datei --> Liste mit (pitch, duration, velocity, offset)
transpos = midi_to_notes(midi_stream=midi_stream, sample_freq=sample_freq, transpo_range=transpo_range)
time_events = midi_parse_metronome(midi_stream=midi_stream, sample_freq=sample_freq)
time_stretches = stretch_time(time_events=time_events, stretching_range=stretching_range)
piano_roll_notes = notes_to_piano_roll(transpositions=transpos, time_stretches=time_stretches,
time_steps=time_steps, piano_range=piano_range)
return piano_roll_notes
def notes_to_piano_roll(transpositions, time_stretches, time_steps, piano_range):
performances = []
min_pitch, max_pitch = piano_range
for t in range(len(transpositions)):
for s in range(len(time_stretches)):
# neue Piano-Roll mit berechneter Größe
# Zusätzliche Dimension, um am Anfang die Lautstärke und Dauer zu beschreiben
piano_roll = np.zeros((time_steps, MAX_PITCH + 1, 2))
for note in transpositions[t]:
pitch, dur, velo, offset = note
if dur == 0.0:
continue
pitch = clamp_pitch(pitch=pitch, max=max_pitch, min=min_pitch)
piano_roll[offset, pitch][0] = clamp_duration(dur)
piano_roll[offset, pitch][1] = discretize_value(val=velo, bins=32, range_=(MIN_VELO, MAX_VELO))
for time_events in time_stretches[s]:
time, offset = time_events
piano_roll[offset, -1][0] = discretize_value(val=time, bins=100, range_=(MIN_TEMP0, MAX_TEMPO))
performances.append(piano_roll)
return performances
def stretch_time(time_events, stretching_range):
stretches = []
slower_time = -math.floor(stretching_range/2)
faster_time = math.ceil(stretching_range/2)
for stretch_time in range(slower_time, faster_time):
time_events_in_stretch = []
for e in time_events:
time, offset = e
s_time = time + 0.05 * stretch_time * MAX_TEMPO
time_events_in_stretch.append((s_time, offset))
stretches.append(time_events_in_stretch)
return stretches
def discretize_value(val, bins, range_):
min_val, max_val = range_
val = int(max(min_val, val))
val = int(min(val, max_val))
bin_size = (max_val/bins)
return math.floor(val/bin_size) * bin_size
def clamp_pitch(pitch, max, min):
while pitch < min:
pitch += 12
while pitch >= max:
pitch -= 12
return pitch
def clamp_duration(dur, max=THREE_DOTTED_BREVE, min=THREE_DOTTED_32ND):
# falls die gegebene Dauer (dur) höher als das Maximum (3-punktierte Halbe) ist
if dur > max:
dur = max
# falls die Dauer kleiner als das Minimum (3-punktierte 32-tel) ist
if dur < min:
dur = min
dur_tuple = music21.duration.durationTupleFromQuarterLength(dur)
if dur_tuple.type == "inexpressible":
duration_clos_type = music21.duration.quarterLengthToClosestType(dur)[0]
dur = music21.duration.typeToDuration[duration_clos_type]
return dur
|
the-stack_0_12051 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
class account_voucher_line(osv.osv):
def _supplier_invoice_number(self, cursor, user, ids, name, arg, context=None):
res = {}
cursor.execute("""SELECT vl.id, i.supplier_invoice_number
FROM account_voucher_line vl
inner join account_move_line ml on vl.move_line_id = ml.id
left outer join account_invoice i on ml.move_id = i.move_id
WHERE vl.id IN %s""",(tuple(ids),))
for line_id, supplier_invoice_number in cursor.fetchall():
res[line_id] = supplier_invoice_number
return res
_inherit = 'account.voucher.line'
_columns = {
'supplier_invoice_number': fields.function(_supplier_invoice_number, string='Supplier Invoice Number', type='char'),
}
account_voucher_line()
class account_voucher(osv.osv):
_inherit = 'account.voucher'
_columns = {
'date_cheque':fields.date('Cheque Date', readonly=True, select=True, states={'draft':[('readonly',False)]}),
'number_cheque':fields.char('Cheque No.', size=64),
}
_defaults = {
'date_cheque': lambda *a: time.strftime('%Y-%m-%d'),
}
account_voucher()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
the-stack_0_12053 | #@+leo-ver=5-thin
#@+node:tbrown.20091029123555.5319: * @file ../plugins/attrib_edit.py
#@+<< docstring >>
#@+node:tbrown.20091009210724.10972: ** << docstring >>
r""" Edits user attributes in a Qt frame.
This plugin creates a frame for editing attributes similar to::
Name: Fred Blogs
Home: 555-555-5555
Work: 555-555-5556
``attrib_edit`` is also intended to provide attribute editing for
other plugins, see below.
The editor panel appears in the Log pane in its own tab. If the free_layout
system is active you can move it into its own pane (e.g. below the body text)
by right clicking the pane dividers.
The attributes can be stored in different ways, three modes are implemented
currently:
v.u mode
These attributes are stored in the "unknownAttributes" (uA) data for
each node, accessed via v.u.
Field:
Attributes are lines starting (no whitespace) with "AttributeName:" in
the body text.
@Child
Attributes are the head strings of child nodes when the head string
starts with '@AttributeName' where the first letter (second character)
must be capitalized.
The plugin defines the following commands, available either in the
plugin's sub-menu in the Plugins menu, or as ``Alt-X attrib-edit-*``.
attrib-edit-modes
Select which attribute setting / getting modes to use. More than one mode
can be used at the same time.
You can also control which modes are active by listing them
with the @data attrib_edit_active_modes setting. For example::
Field:
@Child
# v.u mode
would cause only the "Field:" and "@Child" modes to be active be default.
attrib-edit-manage
Select which attributes, from all attributes seen so
far in this outline, to include on the current node.
attrib-edit-scan
Scan the entire outline for attributes so ``attrib-edit-manage``
has the complete list.
attrib-edit-create
Create a new attribute on the current node. If Field: or \@Child modes
are active, they simply remind you how to create an attribute in the log pane.
If the "v.u mode" mode is active, you're prompted for a path for the attribute.
For example::
addressbook First
to store the attribute in v.u['addressbook']['_edit']['First']
As a convenience, entering a path like::
todo metadata created|creator|revised
would create::
v.u.['todo']['metadata']['_edit']['created']
v.u.['todo']['metadata']['_edit']['creator']
v.u.['todo']['metadata']['_edit']['revised']
**Technical details**
See the source for complete documentation for use with other
plugins. Here are some points of interest:
- In addition to ``v.u['addressbook']['_edit']['first']``, paths
like ``v.u['addressbook']['_edit']['_int']['age']`` may be used
to identify type, although currently there's no difference in
the edit widget.
- In the future the plugin may allow other plugins to register
to provide attribute path information, instead of just
scanning for ['_edit'] entries in v.u.
- Currently there's no sorting of the attributes in "v.u mode", which is
a problem for some applications. It's unclear where the
desired order would be stored, without even more repetition
in v.u. When other plugins can register to manipulate the
attribute list each plugin could address this, with unordered
presentation in the absence of the client plugin.
"""
#@-<< docstring >>
# Written by TNB.
from leo.core import leoGlobals as g
from leo.core.leoQt import isQt6, QtConst, QtCore, QtWidgets
from leo.core.leoQt import DialogCode, Orientation
#
# Fail fast, right after all imports.
g.assertUi('qt') # May raise g.UiTypeException, caught by the plugins manager.
#@+others
#@+node:tbrown.20091009210724.10975: ** init
def init():
"""Return True if the plugin has loaded successfully."""
if g.app.gui.guiName() != "qt":
print('attrib_edit.py plugin not loading because gui is not Qt')
return False
g.registerHandler('after-create-leo-frame', onCreate)
g.plugin_signon(__name__)
return True
#@+node:tbrown.20091009210724.10976: ** onCreate
def onCreate(tag, key):
c = key.get('c')
attrib_edit_Controller(c)
#@+node:tbrown.20091103080354.1400: ** class AttributeGetter
class AttributeGetter:
implementations = []
typeMap = {
'_int': int,
'_float': float,
'_bool': bool,
}
@classmethod
def register(cls, subclass):
cls.implementations.append(subclass)
def __init__(self, c):
self.c = c
def name(self):
return "ABSTRACT VIRTUAL BASE CLASS"
def getAttribs(self, v):
raise NotImplementedError
def setAttrib(self, v, path, value):
raise NotImplementedError
def delAttrib(self, v, path):
raise NotImplementedError
def helpCreate(self):
"""either a string telling user how to add an attribute, or
True if the Getter needs to help the user create an attribute"""
return "ABSTRACT VIRTUAL BASE CLASS"
def longDescrip(self, path):
"""give the long description of the attribute on path 'path'.
ASSUMES: path determines name
E.g. attribute named 'count' might be described as 'address.people.count'
"""
raise NotImplementedError
#@+node:tbrown.20091103080354.1402: ** class AttributeGetterUA
class AttributeGetterUA(AttributeGetter):
#@+others
#@+node:tbrown.20091103080354.1409: *3* recSearch
def recSearch(self, d, path, ans):
"""recursive search of tree of dicts for values whose
key path is like [*][*][*]['_edit'][*] or
[*][*][*]['_edit']['_int'][*]
Modifies list ans
"""
for k in d:
if isinstance(d[k], dict):
if k not in ('_edit', '_view'):
self.recSearch(d[k], path + [k], ans)
else:
# k == '_edit' or '_view'
for ek in d[k]:
if ek in self.typeMap:
# ek is '_int' or similar
type_ = self.typeMap[ek]
for ekt in d[k][ek]:
ans.append((self,
ekt, d[k][ek][ekt], tuple(path + ['_edit', ek, ekt]),
type_, k != '_edit'))
else:
ans.append((self,
ek, d[k][ek], tuple(path + ['_edit', ek]), str, k != '_edit'))
#@+node:tbrown.20091103080354.1410: *3* getAttribs
def getAttribs(self, v):
"""Return a list of tuples describing editable uAs.
(class, name, value, path, type, readonly)
e.g.
(AttributeGetterUA, 'created', '2009-09-23', ('stickynotes','_edit','created'), str, False),
(AttributeGetterUA, 'cars', 2, ('inventory','_edit','_int','cars'), int, False)
Changes should be written back to
v.uA['stickynotes']['_edit']['created'] and
v.uA['inventory']['_edit']['_int']['cars'] respectively
"""
ans = []
d = v.u
self.recSearch(d, [], ans)
return ans
#@+node:tbrown.20091103080354.1430: *3* setAttrib
def setAttrib(self, v, path, value):
"""copy value into dict a on path,
e.g. a['one']['more']['level'] = value
"""
a = v.u
for i in path[:-1]:
a = a.setdefault(i, {})
a[path[-1]] = value
#@+node:tbrown.20091103080354.1438: *3* delAttrib
def delAttrib(self, v, path):
a = v.u
for i in path[:-1]:
try:
a = a[i]
except KeyError:
return
try:
del a[path[-1]]
except KeyError:
pass
#@+node:tbrown.20091103080354.1411: *3* name
def name(self):
return "v.u mode"
#@+node:tbrown.20091103080354.1431: *3* helpCreate
def helpCreate(self):
"""does the Getter need to help the user create an attribute?"""
return True
#@+node:tbrown.20091103080354.1432: *3* createAttrib
def createAttrib(self, v, gui_parent=None):
path, ok = QtWidgets.QInputDialog.getText(gui_parent,
"Enter attribute path",
"Enter path to attribute (space separated words)")
ns = str(path).split()
if not ok or not ns:
g.es("Cancelled")
return
#FIXME type_ = {True: '_view', False: '_edit'}[readonly]
type_ = '_edit'
if '|' in ns[-1]:
nslist = [ns[:-1] + [i.strip()] for i in ns[-1].split('|')]
else:
nslist = [ns]
for ns in nslist:
if type_ not in ns:
ns.insert(-1, type_)
self.setAttrib(v, ns, '')
#FIXME self.attrPaths.add(tuple(ns))
#@+node:tbrown.20091103080354.1433: *3* longDescrip
def longDescrip(self, path):
return '.'.join([j for j in path if j not in ('_edit', '_view')])
#@-others
AttributeGetter.register(AttributeGetterUA)
#@+node:tbrown.20091103080354.1420: ** class AttributeGetterAt
class AttributeGetterAt(AttributeGetter):
#@+others
#@+node:tbrown.20091103080354.1422: *3* getAttribs
def getAttribs(self, v):
"""Return a list of tuples describing editable uAs.
(class, name, value, path, type, readonly)
e.g.
(AttributeGetterUA, 'created', '2009-09-23', ('stickynotes','_edit','created'), str, False),
(AttributeGetterUA, 'cars', 2, ('inventory','_edit','_int','cars'), int, False)
Changes should be written back to
v.uA['stickynotes']['_edit']['created'] and
v.uA['inventory']['_edit']['_int']['cars'] respectively
"""
ans = []
for n in v.children:
if n.h and n.h[0] == '@' and ('A' <= n.h[1] <= 'Z'):
words = n.h[1:].split(None, 1)
if not words:
continue
if len(words) == 1:
words.append('')
ans.append((self, words[0], words[1], words[0], str, False))
return ans
#@+node:tbrown.20091103080354.6237: *3* setAttrib
def setAttrib(self, v, path, value):
for n in v.children:
if n.h[0] == '@' and ('A' <= n.h[1] <= 'Z'):
words = n.h[1:].split(None, 1)
if len(words) == 1:
words.append('')
if words[0] == path:
n.h = "@%s %s" % (path, value)
break
else:
p = self.c.vnode2position(v)
n = p.insertAsLastChild()
n.h = "@%s %s" % (path, value)
#@+node:tbrown.20091103080354.6244: *3* delAttrib
def delAttrib(self, v, path):
for n in v.children:
if n.h[0] == '@' and ('A' <= n.h[1] <= 'Z'):
words = n.h[1:].split(None, 1)
if not words:
continue
if words[0] == path:
p = self.c.vnode2position(n)
p.doDelete()
break
#@+node:tbrown.20091103080354.1423: *3* name
def name(self):
return "@Child"
#@+node:tbrown.20091103080354.1443: *3* helpCreate
def helpCreate(self):
return "Add a child named '@AttributeName'"
#@+node:tbrown.20091103080354.1435: *3* longName
def longDescrip(self, path):
return path
#@-others
AttributeGetter.register(AttributeGetterAt)
#@+node:tbrown.20091103080354.1427: ** class AttributeGetterColon
class AttributeGetterColon(AttributeGetter):
#@+others
#@+node:tbrown.20091103080354.1428: *3* getAttribs
def getAttribs(self, v):
ans = []
parts = v.b.split('\n', 100)
for i in parts[:99]:
if not i or i[0].isspace():
continue
words = i.split(None, 1)
if words and words[0] and words[0][-1] == ':':
if len(words) == 1:
words.append('')
ans.append((self, words[0][:-1], words[1], words[0][:-1], str, False))
return ans
#@+node:tbrown.20091103080354.6246: *3* setAttrib
def setAttrib(self, v, path, value):
parts = v.b.split('\n', 100)
for n, i in enumerate(parts[:99]):
words = i.split(None, 1)
if words and words[0] and words[0][-1] == ':' and words[0][:-1] == path:
parts[n] = "%s: %s" % (path, value)
v.b = '\n'.join(parts)
break
else:
v.b = "%s: %s\n%s" % (path, value, v.b)
#@+node:tbrown.20091103080354.6248: *3* delAttrib
def delAttrib(self, v, path):
parts = v.b.split('\n', 100)
for n, i in enumerate(parts[:99]):
words = i.split(None, 1)
if words and words[0] and words[0][-1] == ':' and words[0][:-1] == path:
del parts[n]
v.b = '\n'.join(parts)
break
#@+node:tbrown.20091103080354.1429: *3* name
def name(self):
return "Field:"
#@+node:tbrown.20091103080354.1441: *3* helpCreate
def helpCreate(self):
return "Add 'AttributeName:' to the text"
#@+node:tbrown.20091103080354.1437: *3* longName
def longDescrip(self, path):
return path
#@-others
AttributeGetter.register(AttributeGetterColon)
#@+node:tbrown.20091028131637.1353: ** class ListDialog
class ListDialog(QtWidgets.QDialog):
#@+others
#@+node:tbrown.20091028131637.1354: *3* __init__ (attrib_edit.py)
def __init__(self, parent, title, text, entries):
self.entries = entries
super().__init__(parent)
vbox = QtWidgets.QVBoxLayout()
sa = QtWidgets.QScrollArea()
salo = QtWidgets.QVBoxLayout()
frame = QtWidgets.QFrame()
frame.setLayout(salo)
self.buttons = []
for entry in entries:
hbox = QtWidgets.QHBoxLayout()
cb = QtWidgets.QCheckBox(entry[0])
self.buttons.append(cb)
if entry[1]:
cb.setChecked(True if isQt6 else QtConst.Checked)
hbox.addWidget(cb)
salo.addLayout(hbox)
sa.setWidget(frame)
vbox.addWidget(sa)
hbox = QtWidgets.QHBoxLayout()
ok = QtWidgets.QPushButton("Ok")
cancel = QtWidgets.QPushButton("Cancel")
ok.clicked.connect(self.writeBack)
cancel.clicked.connect(self.reject)
# QtCore.QObject.connect(ok, QtCore.SIGNAL('clicked(bool)'), self.writeBack)
# QtCore.QObject.connect(cancel, QtCore.SIGNAL('clicked(bool)'), self.reject)
hbox.addWidget(ok)
hbox.addWidget(cancel)
vbox.addLayout(hbox)
self.setLayout(vbox)
#@+node:tbrown.20091028131637.1359: *3* writeBack
def writeBack(self, event=None):
for n, i in enumerate(self.buttons):
self.entries[n][1] = (i.isChecked())
self.accept()
#@-others
#@+node:tbrown.20091010211613.5257: ** class editWatcher
class editWatcher:
"""class to supply widget for editing attribute and handle
its textChanged signal"""
def __init__(self, c, v, class_, name, value, path, type_):
"""v - node whose attribute we edit
name - name of edited attribute
value - initial value of edited attribute
path - dictionary key path to attribute in v.u
type_ - attribute type
"""
self.c = c
self.v = v
self.class_ = class_
self.name = name
self.value = value
self.path = path
self.type_ = type_
self._widget = None
def widget(self):
"""return widget for editing this attribute"""
if not self._widget:
self._widget = w = QtWidgets.QLineEdit(str(self.value))
w.textChanged.connect(self.updateValue)
self._widget.focusOutEvent = self.lostFocus
# see lostFocus()
return self._widget
def updateValue(self, newValue):
"""copy value from widget to v.u"""
self.class_.setAttrib(self.v, self.path, self.type_(newValue))
self.v.setDirty()
def lostFocus(self, event):
"""Can activate this in in widget(), but it stops tabbing through
the attributes - unless we can check that none of our siblings
has focus..."""
sibs = self._widget.parent().findChildren(QtWidgets.QLineEdit)
for i in sibs:
if i.hasFocus():
break
else:
self.c.redraw()
#X def setValue(a, path, value):
#X """copy value into dict a on path,
#X e.g. a['one']['more']['level'] = value
#X """
#X for i in path[:-1]:
#X a = a.setdefault(i, {})
#X a[path[-1]] = value
#@+node:tbrown.20091009210724.10979: ** class attrib_edit_Controller
class attrib_edit_Controller:
"""A per-commander class that manages attribute editing."""
#@+others
#@+node:tbrown.20091009210724.10981: *3* __init__ & reloadSettings (attrib_edit_Controller)
def __init__(self, c):
self.c = c
c.attribEditor = self
self.pname = "_attrib_edit_frame" # used to tag out panel
self.reloadSettings()
self.attrPaths = set() # set of tuples (getter-class, path)
self.handlers = [
('select3', self.updateEditor),
]
for i in self.handlers:
g.registerHandler(i[0], i[1])
# 'body' or 'tab' mode
# self.guiMode = c.config.getString('attrib-edit-placement') or 'tab'
self.guiMode = 'tab'
# body mode in not compatible with nested_splitter, causes hard crash
if self.guiMode == 'body':
self.holder = QtWidgets.QSplitter(Orientation.Vertical)
self.holder.setMinimumWidth(300)
parent = c.frame.top.leo_body_frame.parent()
self.holder.addWidget(c.frame.top.leo_body_frame)
parent.addWidget(self.holder)
self.parent = self.holder
elif self.guiMode == 'tab':
self.parent = QtWidgets.QFrame()
self.holder = QtWidgets.QHBoxLayout()
self.parent.setLayout(self.holder)
c.frame.log.createTab('Attribs', widget=self.parent)
def reloadSettings(self):
c = self.c
c.registerReloadSettings(self)
active = c.config.getData('attrib_edit_active_modes') or []
self.getsetters = []
for i in AttributeGetter.implementations:
s = i(c)
self.getsetters.append([s, (s.name() in active)])
if not active:
self.getsetters[0][1] = True # turn on the first one
#@+node:tbrown.20091009210724.10983: *3* __del__
def __del__(self):
for i in self.handlers:
g.unregisterHandler(i[0], i[1])
#@+node:tbrown.20091009210724.11210: *3* initForm
def initForm(self):
"""set up self.form, the blank form layout before adding edit widgets"""
self.editors = []
w = self.holder
for i in w.parent().findChildren(QtCore.QObject):
if i.objectName() == self.pname:
i.hide()
i.deleteLater()
pnl = QtWidgets.QFrame()
pnl.setObjectName(self.pname)
self.form = QtWidgets.QFormLayout()
self.form.setVerticalSpacing(0)
pnl.setLayout(self.form)
pnl.setAutoFillBackground(True)
w.addWidget(pnl)
#@+node:tbrown.20091009210724.11047: *3* updateEditor
def updateEditor(self, tag, k):
"""update edit panel when new node selected"""
if k['c'] != self.c:
return # not our problem
self.updateEditorInt()
#@+node:tbrown.20091028100922.1493: *3* updateEditorInt
def updateEditorInt(self):
c = self.c
self.initForm()
for attr in self.getAttribs():
class_, name, value, path, type_, readonly = attr
if readonly:
self.form.addRow(QtWidgets.QLabel(name), QtWidgets.QLabel(str(value)))
else:
editor = editWatcher(c, c.currentPosition().v, class_, name, value, path, type_)
self.editors.append(editor)
self.form.addRow(QtWidgets.QLabel(name), editor.widget())
#@+node:tbrown.20091103080354.1405: *3* recSearch (not used)
# def JUNKrecSearch(self, d, path, ans):
# """recursive search of tree of dicts for values whose
# key path is like [*][*][*]['_edit'][*] or
# [*][*][*]['_edit']['_int'][*]
# Modifies list ans
# """
# for k in d:
# if isinstance(d[k], dict):
# if k not in ('_edit', '_view'):
# self.recSearch(d[k], path+[k], ans)
# else:
# # k == '_edit' or '_view'
# for ek in d[k]:
# if ek in self.typeMap:
# # ek is '_int' or similar
# type_ = self.typeMap[ek]
# for ekt in d[k][ek]:
# ans.append((ekt, d[k][ek][ekt], tuple(path+['_edit',ek,ekt]),
# type_, k != '_edit'))
# else:
# ans.append((ek, d[k][ek], tuple(path+['_edit',ek]), str, k != '_edit'))
#@+node:tbrown.20091103080354.1406: *3* getAttribs
def getAttribs(self, v=None):
"""Return a list of tuples describing editable uAs.
(class, name, value, path, type, readonly)
e.g.
(class, 'created', '2009-09-23', ('stickynotes','_edit','created'), str, False),
(class, 'cars', 2, ('inventory','_edit','_int','cars'), int, False)
Changes should be written back to
v.uA['stickynotes']['_edit']['created'] and
v.uA['inventory']['_edit']['_int']['cars'] respectively
"""
ans = []
if not v:
v = self.c.currentPosition().v
for getter, isOn in self.getsetters:
if not isOn:
continue
ans.extend(getter.getAttribs(v))
for ns in ans:
self.attrPaths.add((ns[0], ns[1], ns[3])) # class, name, path
return ans
#@+node:tbrown.20091029101116.1413: *3* addAttrib
def addAttrib(self, attrib):
attrib[0].setAttrib(self.c.currentPosition().v, attrib[2], '')
#@+node:tbrown.20091029101116.1414: *3* delAttrib
def delAttrib(self, attrib):
attrib[0].delAttrib(self.c.currentPosition().v, attrib[2])
#@+node:tbrown.20091029101116.1424: *3* scanAttribs
def scanAttribs(self):
"""scan all of c for attrbutes"""
for v in self.c.all_unique_nodes():
self.getAttribs(v) # updates internal list of attribs
g.es("%d attributes found" % len(self.attrPaths))
#@+node:tbrown.20091011151836.14788: *3* createAttrib
def createAttrib(self, event=None, readonly=False):
ans = []
for getter, isOn in self.getsetters:
if not isOn:
continue
if getter.helpCreate() is True:
ans.append(getter)
else:
g.es("For '%s' attributes:\n %s" % (getter.name(), getter.helpCreate()))
if len(ans) > 1:
g.error('Eror: more than one attribute type (%s) active' %
', '.join([i.name() for i in ans]))
elif ans:
ans[0].createAttrib(self.c.currentPosition().v, gui_parent=self.parent)
self.updateEditorInt()
self.c.currentPosition().v.setDirty()
self.c.redraw()
#@+node:tbrown.20091028131637.1358: *3* manageAttrib
def manageAttrib(self):
attribs = [(i[0], i[1], i[3]) for i in self.getAttribs()]
dat = []
for attr in self.attrPaths:
txt = attr[0].longDescrip(attr[2])
active = attr in attribs
dat.append([txt, active, attr])
if not dat:
g.es('No attributes seen (yet)')
return
dat.sort(key=lambda x: x[0])
res = ListDialog(self.parent, "Enter attribute path",
"Enter path to attribute (space separated words)", dat)
res.exec_()
if res.result() == DialogCode.Rejected:
return
# check for deletions
for i in dat:
if i[2] in attribs and not i[1]:
res = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Question,
"Really delete attributes?", "Really delete attributes?",
QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel, self.parent)
if res.exec_() == QtWidgets.QMessageBox.Cancel:
return
break
# apply changes
for i in dat:
if i[2] in attribs and not i[1]:
self.delAttrib(i[2])
elif i[2] not in attribs and i[1]:
self.addAttrib(i[2])
self.updateEditorInt()
self.c.redraw()
#@+node:tbrown.20091103080354.1415: *3* manageModes
def manageModes(self):
modes = [[i[0].name(), i[1]] for i in self.getsetters]
res = ListDialog(self.parent, "Enter attribute path",
"Enter path to attribute (space separated words)",
modes)
res.exec_()
if res.result() == DialogCode.Rejected:
return
for n, i in enumerate(modes):
self.getsetters[n][1] = i[1]
self.updateEditorInt()
#@-others
#@+node:tbrown.20091029101116.1415: ** cmd_Modes (attrib_edit_Controller)
@g.command('attrib-edit-modes')
def cmd_Modes(event):
c = event.get('c')
c.attribEditor.manageModes()
#@+node:tbrown.20091103080354.1413: ** cmd_Manage (attrib_edit_Controller)
@g.command('attrib-edit-manage')
def cmd_Manage(event):
c = event.get('c')
c.attribEditor.manageAttrib()
#@+node:tbrown.20091029101116.1419: ** cmd_Create (attrib_edit_Controller)
@g.command('attrib-edit-create')
def cmd_Create(event):
c = event.get('c')
c.attribEditor.createAttrib()
#@+node:tbrown.20091029101116.1421: ** cmd_CreateReadonly (attrib_edit_Controller)
def Xcmd_CreateReadonly(c):
c.attribEditor.createAttrib(readonly=True)
#@+node:tbrown.20091029101116.1426: ** cmd_Scan (attrib_edit_Controller)
@g.command('attrib-edit-scan')
def cmd_Scan(event):
c = event.get('c')
c.attribEditor.scanAttribs()
#@-others
#@@language python
#@@tabwidth -4
#@-leo
|
the-stack_0_12057 | """
This file offers the methods to automatically retrieve the graph Listeria monocytogenes Scott.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ListeriaMonocytogenesScott(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Listeria monocytogenes Scott graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Listeria monocytogenes Scott graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ListeriaMonocytogenesScott",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_12060 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compute (upsampled) Nifti label image from bundle and centroid.
Each voxel will have the label of its nearest centroid point.
"""
import argparse
import logging
import nibabel as nib
import numpy as np
from scilpy.io.streamlines import load_tractogram_with_reference
from scilpy.io.utils import (add_overwrite_arg,
add_reference_arg,
assert_inputs_exist,
assert_outputs_exist,
add_verbose_arg)
from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map
from scilpy.tractanalysis.distance_to_centroid import min_dist_to_centroid
def _build_arg_parser():
p = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('in_bundle',
help='Fiber bundle file.')
p.add_argument('in_centroid',
help='Centroid streamline corresponding to bundle.')
p.add_argument('output_map',
help='Nifti image with corresponding labels.')
p.add_argument('--upsample',
type=float, default=2,
help='Upsample reference grid by this factor. '
'[%(default)s]')
add_reference_arg(p)
add_overwrite_arg(p)
add_verbose_arg(p)
return p
def main():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser,
[args.in_bundle, args.in_centroid],
optional=args.reference)
assert_outputs_exist(parser, args, args.output_map)
sft_bundle = load_tractogram_with_reference(parser, args, args.in_bundle)
sft_centroid = load_tractogram_with_reference(parser, args,
args.in_centroid)
if not len(sft_bundle.streamlines):
logging.error('Empty bundle file {}. '
'Skipping'.format(args.in_bundle))
raise ValueError
if not len(sft_centroid.streamlines):
logging.error('Centroid file {} should contain one streamline. '
'Skipping'.format(args.in_centroid))
raise ValueError
sft_bundle.to_vox()
bundle_streamlines_vox = sft_bundle.streamlines
bundle_streamlines_vox._data *= args.upsample
sft_centroid.to_vox()
centroid_streamlines_vox = sft_centroid.streamlines
centroid_streamlines_vox._data *= args.upsample
upsampled_shape = [s * args.upsample for s in sft_bundle.dimensions]
tdi_mask = compute_tract_counts_map(bundle_streamlines_vox,
upsampled_shape) > 0
tdi_mask_nzr = np.nonzero(tdi_mask)
tdi_mask_nzr_ind = np.transpose(tdi_mask_nzr)
min_dist_ind, _ = min_dist_to_centroid(tdi_mask_nzr_ind,
centroid_streamlines_vox[0])
# Save the (upscaled) labels mask
labels_mask = np.zeros(tdi_mask.shape)
labels_mask[tdi_mask_nzr] = min_dist_ind + 1 # 0 is background value
rescaled_affine = sft_bundle.affine
rescaled_affine[:3, :3] /= args.upsample
labels_img = nib.Nifti1Image(labels_mask, rescaled_affine)
upsampled_spacing = sft_bundle.voxel_sizes / args.upsample
labels_img.header.set_zooms(upsampled_spacing)
nib.save(labels_img, args.output_map)
if __name__ == '__main__':
main()
|
the-stack_0_12061 | """
Import as:
import im.airflow.devops.dags.im_infra as imaddimin
"""
import os
import airflow
from airflow import DAG
from airflow.operators.bash import BashOperator
P1_AIRFLOW_WORKER_DB_LOADER_QUEUE = os.environ[
"P1_AIRFLOW_WORKER_DB_LOADER_QUEUE"
]
STAGE = os.environ["STAGE"]
SEND_EMAIL = STAGE not in ["LOCAL", "TEST"]
default_args = {
"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(1),
"email": [],
"email_on_failure": SEND_EMAIL,
"email_on_retry": SEND_EMAIL,
}
dag = DAG(
"IM_INFRA",
default_args=default_args,
schedule_interval=None,
max_active_runs=1,
)
# Create EDGAR DB schema.
test = BashOperator(
task_id="test",
bash_command='bash -c "/app/im/devops/docker_build/entrypoints/entrypoint_worker.sh '
"im/app/transform/convert_s3_to_sql.py "
"--provider kibot "
"--symbol AAPL "
"--frequency T "
"--contract_type continuous "
"--asset_class stocks "
'--exchange NYSE"',
dag=dag,
queue=P1_AIRFLOW_WORKER_DB_LOADER_QUEUE,
)
|
the-stack_0_12067 | from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from tensorflow.contrib.layers.python import layers as tf_layers
from rsa import *
from cka import *
import matplotlib.pyplot as plt
# heiner activation maximization filters early layers
# based on https://github.com/zonghua94/mnist/blob/master/mnist_cnn.py
def compute_accuracy(v_x, v_y):
global prediction
y_pre = sess.run(prediction, feed_dict={x: v_x})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={x: v_x, y: v_y})
return result
def conv_block(inp, cweight, bweight, reuse, scope, activation=tf.nn.relu, max_pool_pad='VALID', residual=False):
""" Perform, conv, batch norm, nonlinearity, and max pool """
stride, no_stride = [1,2,2,1], [1,1,1,1]
conv_output = tf.nn.conv2d(inp, cweight, stride, 'SAME') + bweight
normed = tf_layers.batch_norm(conv_output, activation_fn=activation, reuse=reuse, scope=scope)
return normed
def reshape_elems_of_list(layers, shape = (10000, -1)):
reshaped_layers = []
for layer in layers:
layer = np.reshape(layer, shape)
reshaped_layers.append(layer)
return reshaped_layers
# load mnist data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
# reshape(data you want to reshape, [-1, reshape_height, reshape_weight, imagine layers]) image layers=1 when the imagine is in white and black, =3 when the imagine is RGB
x_image = tf.reshape(x, [-1, 28, 28, 1])
weights = {}
convolution = True
if convolution:
dtype = tf.float32
conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)
weights['conv1'] = tf.get_variable('conv1', [3, 3, 1, 64], initializer=conv_initializer, dtype=dtype)
weights['b1'] = tf.Variable(tf.zeros([64]))
weights['conv2'] = tf.get_variable('conv2', [3, 3, 64, 64], initializer=conv_initializer, dtype=dtype)
weights['b2'] = tf.Variable(tf.zeros([64]))
weights['conv3'] = tf.get_variable('conv3', [3, 3, 64, 64], initializer=conv_initializer, dtype=dtype)
weights['b3'] = tf.Variable(tf.zeros([64]))
weights['conv4'] = tf.get_variable('conv4', [3, 3, 64, 64], initializer=conv_initializer, dtype=dtype)
weights['b4'] = tf.Variable(tf.zeros([64]))
weights['w5'] = tf.Variable(tf.random_normal([64, 10]), name='w5')
weights['b5'] = tf.Variable(tf.zeros([10]), name='b5')
tvars = tf.trainable_variables()
scope = ""
hidden1 = conv_block(x_image, weights['conv1'], weights['b1'], False, scope + '0')
hidden2 = conv_block(hidden1, weights['conv2'], weights['b2'], False, scope + '1')
hidden3 = conv_block(hidden2, weights['conv3'], weights['b3'], False, scope + '2')
hidden4 = conv_block(hidden3, weights['conv4'], weights['b4'], False, scope + '3')
hidden4 = tf.reduce_mean(hidden4, [1, 2])
out = tf.matmul(hidden4, weights['w5']) + weights['b5']
prediction = tf.nn.softmax(out)
tvars = tf.trainable_variables()
layer_names = ["Pooling layer 1", "Pooling layer 2", "Pooling layer 3", "Pooling layer 4", "Logits/Head"]
else:
weights = {}
dims = [200, 100, 50, 20]
weights['w1'] = tf.Variable(tf.truncated_normal([784, dims[0]], stddev=0.01))
weights['b1'] = tf.Variable(tf.zeros(dims[0]))
for i, dim in enumerate(dims):
if i == len(dims) -1:
break
weights['w'+str(i+2)] = tf.Variable(tf.truncated_normal([dims[i], dims[i+1]], stddev=0.01))
weights['b'+str(i+2)] = tf.Variable(tf.zeros(dims[i+1]))
weights['w5'] = tf.Variable(tf.random_normal([dims[-1], 10]), name='w5')
weights['b5'] = tf.Variable(tf.zeros([10]), name='b5')
x_image = tf.reshape(x_image, [-1, 784])
hidden1 = tf.nn.relu(tf_layers.batch_norm(tf.matmul(x_image, weights['w1']) + weights['b1']))
hidden2 = tf.nn.relu(tf_layers.batch_norm(tf.matmul(hidden1, weights['w2']) + weights['b2']))
hidden3 = tf.nn.relu(tf_layers.batch_norm(tf.matmul(hidden2, weights['w3']) + weights['b3']))
hidden4 = tf.nn.relu(tf_layers.batch_norm(tf.matmul(hidden3, weights['w4']) + weights['b4']))
out = tf.matmul(hidden4, weights['w5']) + weights['b5']
prediction = tf.nn.softmax(out)
layer_names = [f"Hidden Layer {i+1} FC {dim}" for i, dim in enumerate(dims)]
layer_names.append("Logits/Head")
# calculate the loss
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(prediction), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)#, var_list=g_vars)
N = 100
test_images = mnist.test.images[:N]
test_labels = mnist.test.labels[:N]
for sim_measure in ["cka", "euclidean"]:
# init session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
start = sess.run([hidden1, hidden2, hidden3, hidden4, out],
feed_dict={x: test_images, y: test_labels})
prev = start
similarities = []
similarities_prev = []
steps = []
all_representations = []
labels = []
colors = []
for i in range(200):
batch_x, batch_y = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
if i % 5 == 0:
steps.append(i)
representations = sess.run([hidden1, hidden2, hidden3, hidden4, out],
feed_dict={x: test_images, y: test_labels})
labels = labels + [f"{i} ({j+1})" for j in range(5)]
colors = colors + list(range(5))
peter = representations[0].reshape((N,-1))
all_representations = all_representations + [r.reshape((N,-1)) for r in representations]
if sim_measure == "cka":
similarities_of_step = [kernel_CKA(np.reshape(s, (N, -1)), np.reshape(r, (N, -1))) for s, r in zip(start, representations)]
similarities_of_step_prev = [kernel_CKA(np.reshape(s, (N, -1)), np.reshape(r, (N, -1))) for s, r in zip(prev, representations)]
else:
print(np.mean(start[0]), np.mean(representations[0]))
similarities_of_step = [rsa(np.array([np.reshape(s, (N, -1)), np.reshape(r, (N, -1))]), sim_measure) for
s, r in zip(start, representations)]
similarities_of_step_prev = [rsa(np.array([np.reshape(s, (N, -1)), np.reshape(r, (N, -1))]), sim_measure)
for s, r in zip(prev, representations)]
similarities.append(similarities_of_step)
similarities_prev.append(similarities_of_step_prev)
prev = representations.copy()
print(i, compute_accuracy(mnist.test.images, mnist.test.labels))
plot_rsa(all_representations, labels, colors)
similarities = np.array(similarities).transpose()
similarities_prev = np.array(similarities_prev).transpose()
fig = plt.figure(figsize=(8, 2.5))
if sim_measure == "cka":
plt.title(f"CKA similarity before and after training")
plt.ylabel("Similarity")
else:
plt.title(f"RSA ({sim_measure}) dissimilarity before and after training")
plt.ylabel("Dissimilarity")
plt.xlabel("Number of training steps")
#plt.yscale('symlog', linthreshy=0.015)
plt.ylim(-0.05, 1.05)
for i in range(len(similarities)):
plt.plot(steps, similarities[i], label=layer_names[i])
#plt.plot(range(len(similarities_prev[i])), similarities_prev[i], label=layer_names[i]+" to prev")
plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left", borderaxespad=0)
plt.show()
|
the-stack_0_12069 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.prediction",
manifest={"VideoActionRecognitionPredictionResult",},
)
class VideoActionRecognitionPredictionResult(proto.Message):
r"""Prediction output format for Video Action Recognition.
Attributes:
id (str):
The resource ID of the AnnotationSpec that
had been identified.
display_name (str):
The display name of the AnnotationSpec that
had been identified.
time_segment_start (google.protobuf.duration_pb2.Duration):
The beginning, inclusive, of the video's time
segment in which the AnnotationSpec has been
identified. Expressed as a number of seconds as
measured from the start of the video, with
fractions up to a microsecond precision, and
with "s" appended at the end.
time_segment_end (google.protobuf.duration_pb2.Duration):
The end, exclusive, of the video's time
segment in which the AnnotationSpec has been
identified. Expressed as a number of seconds as
measured from the start of the video, with
fractions up to a microsecond precision, and
with "s" appended at the end.
confidence (google.protobuf.wrappers_pb2.FloatValue):
The Model's confidence in correction of this
prediction, higher value means higher
confidence.
"""
id = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
time_segment_start = proto.Field(
proto.MESSAGE, number=4, message=duration_pb2.Duration,
)
time_segment_end = proto.Field(
proto.MESSAGE, number=5, message=duration_pb2.Duration,
)
confidence = proto.Field(proto.MESSAGE, number=6, message=wrappers_pb2.FloatValue,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_12072 | from typing import List
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
from scipy.optimize import curve_fit
def nice_string_output(
names: List[str], values: List[str], extra_spacing: int = 0,
):
max_values = len(max(values, key=len))
max_names = len(max(names, key=len))
string = ""
for name, value in zip(names, values):
string += "{0:s} {1:>{spacing}} \n".format(
name,
value,
spacing=extra_spacing + max_values + max_names - len(name),
)
return string[:-2]
def plot_gaussian(
data, ax: plt.Axes, nBins=100, textpos="l", legend=False, short_text=False
):
# make sure our data is an ndarray
if type(data) == list:
data = np.array(data)
### FITTING WITH A GAUSSIAN
def func_gauss(x, N, mu, sigma):
return N * stats.norm.pdf(x, mu, sigma)
counts, bin_edges = np.histogram(data, bins=nBins)
bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2
s_counts = np.sqrt(counts)
x = bin_centers[counts > 0]
y = counts[counts > 0]
sy = s_counts[counts > 0]
popt_gauss, pcov_gauss = curve_fit(
func_gauss, x, y, p0=[1, data.mean(), data.std()]
)
y_func = func_gauss(x, *popt_gauss)
pKS = stats.ks_2samp(y, y_func)
pKS_g1, pKS_g2 = pKS[0], pKS[1]
# print('LOOK! \n \n \n pKS is {} \n \n \n '.format(pKS_g2))
chi2_gauss = sum((y - y_func) ** 2 / sy ** 2)
NDOF_gauss = nBins - 3
prob_gauss = stats.chi2.sf(chi2_gauss, NDOF_gauss)
if short_text == True:
namesl = [
"Gauss_N",
"Gauss_Mu",
"Gauss_Sigma",
]
valuesl = [
"{:.3f} +/- {:.3f}".format(val, unc)
for val, unc in zip(popt_gauss, np.diagonal(pcov_gauss))
]
del namesl[0] # remove gauss n
del valuesl[0]
else:
namesl = [
"Gauss_N",
"Gauss_Mu",
"Gauss_Sigma",
"KS stat",
"KS_pval",
"Chi2 / NDOF",
"Prob",
]
valuesl = (
[
"{:.3f} +/- {:.3f}".format(val, unc)
for val, unc in zip(popt_gauss, np.diagonal(pcov_gauss))
]
+ ["{:.3f}".format(pKS_g1)]
+ ["{:.3f}".format(pKS_g2)]
+ ["{:.3f} / {}".format(chi2_gauss, NDOF_gauss)]
+ ["{:.3f}".format(prob_gauss)]
)
ax.errorbar(x, y, yerr=sy, xerr=0, fmt=".", elinewidth=1)
ax.plot(x, y_func, "--", label="Gaussian")
if textpos == "l":
ax.text(
0.02,
0.98,
nice_string_output(namesl, valuesl),
family="monospace",
transform=ax.transAxes,
fontsize=10,
verticalalignment="top",
alpha=0.5,
)
elif textpos == "r":
ax.text(
0.6,
0.98,
nice_string_output(namesl, valuesl),
family="monospace",
transform=ax.transAxes,
fontsize=10,
verticalalignment="top",
alpha=0.5,
)
if legend:
ax.legend(loc="center left")
return ax
if __name__ == '__main__':
samples = stats.expon.rvs(5.7, size=10000)
# samples = stats.poisson.rvs(mu=2, size=10000)
# samples = stats.cauchy.rvs(size=10000)
sums = np.zeros(1000)
for si in range(len(sums)):
sums[si] = np.mean(np.random.choice(samples, size=10))
fig, ax = plt.subplots()
plot_gaussian(sums, ax)
plt.show() |
the-stack_0_12073 | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nonblocking_timer Library'
copyright = u'2017 Michael Schneider'
author = u'Michael Schneider'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nonblocking_timerLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nonblocking_timerLibrary.tex', u'nonblocking_timer Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nonblocking_timerlibrary', u'nonblocking_timer Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nonblocking_timerLibrary', u' nonblocking_timer Library Documentation',
author, 'nonblocking_timerLibrary', 'One line description of project.',
'Miscellaneous'),
]
|
the-stack_0_12074 | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2019 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Basic tests for PlotWidget"""
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "03/01/2019"
import unittest
import logging
import numpy
import sys
from silx.utils.testutils import ParametricTestCase, parameterize
from silx.gui.utils.testutils import SignalListener
from silx.gui.utils.testutils import TestCaseQt
from silx.test.utils import test_options
from silx.gui import qt
from silx.gui.plot import PlotWidget
from silx.gui.plot.items.curve import CurveStyle
from silx.gui.colors import Colormap
from .utils import PlotWidgetTestCase
SIZE = 1024
"""Size of the test image"""
DATA_2D = numpy.arange(SIZE ** 2).reshape(SIZE, SIZE)
"""Image data set"""
logger = logging.getLogger(__name__)
class TestSpecialBackend(PlotWidgetTestCase, ParametricTestCase):
def __init__(self, methodName='runTest', backend=None):
TestCaseQt.__init__(self, methodName=methodName)
self.__backend = backend
def _createPlot(self):
return PlotWidget(backend=self.__backend)
def testPlot(self):
self.assertIsNotNone(self.plot)
class TestPlotWidget(PlotWidgetTestCase, ParametricTestCase):
"""Basic tests for PlotWidget"""
def testShow(self):
"""Most basic test"""
pass
def testSetTitleLabels(self):
"""Set title and axes labels"""
title, xlabel, ylabel = 'the title', 'x label', 'y label'
self.plot.setGraphTitle(title)
self.plot.getXAxis().setLabel(xlabel)
self.plot.getYAxis().setLabel(ylabel)
self.qapp.processEvents()
self.assertEqual(self.plot.getGraphTitle(), title)
self.assertEqual(self.plot.getXAxis().getLabel(), xlabel)
self.assertEqual(self.plot.getYAxis().getLabel(), ylabel)
def _checkLimits(self,
expectedXLim=None,
expectedYLim=None,
expectedRatio=None):
"""Assert that limits are as expected"""
xlim = self.plot.getXAxis().getLimits()
ylim = self.plot.getYAxis().getLimits()
ratio = abs(xlim[1] - xlim[0]) / abs(ylim[1] - ylim[0])
if expectedXLim is not None:
self.assertEqual(expectedXLim, xlim)
if expectedYLim is not None:
self.assertEqual(expectedYLim, ylim)
if expectedRatio is not None:
self.assertTrue(
numpy.allclose(expectedRatio, ratio, atol=0.01))
def testChangeLimitsWithAspectRatio(self):
self.plot.setKeepDataAspectRatio()
self.qapp.processEvents()
xlim = self.plot.getXAxis().getLimits()
ylim = self.plot.getYAxis().getLimits()
defaultRatio = abs(xlim[1] - xlim[0]) / abs(ylim[1] - ylim[0])
self.plot.getXAxis().setLimits(1., 10.)
self._checkLimits(expectedXLim=(1., 10.), expectedRatio=defaultRatio)
self.qapp.processEvents()
self._checkLimits(expectedXLim=(1., 10.), expectedRatio=defaultRatio)
self.plot.getYAxis().setLimits(1., 10.)
self._checkLimits(expectedYLim=(1., 10.), expectedRatio=defaultRatio)
self.qapp.processEvents()
self._checkLimits(expectedYLim=(1., 10.), expectedRatio=defaultRatio)
def testResizeWidget(self):
"""Test resizing the widget and receiving limitsChanged events"""
self.plot.resize(200, 200)
self.qapp.processEvents()
self.qWait(100)
xlim = self.plot.getXAxis().getLimits()
ylim = self.plot.getYAxis().getLimits()
listener = SignalListener()
self.plot.getXAxis().sigLimitsChanged.connect(listener.partial('x'))
self.plot.getYAxis().sigLimitsChanged.connect(listener.partial('y'))
# Resize without aspect ratio
self.plot.resize(200, 300)
self.qapp.processEvents()
self.qWait(100)
self._checkLimits(expectedXLim=xlim, expectedYLim=ylim)
self.assertEqual(listener.callCount(), 0)
# Resize with aspect ratio
self.plot.setKeepDataAspectRatio(True)
self.qapp.processEvents()
self.qWait(1000)
listener.clear() # Clean-up received signal
self.plot.resize(200, 200)
self.qapp.processEvents()
self.qWait(100)
self.assertNotEqual(listener.callCount(), 0)
def testAddRemoveItemSignals(self):
"""Test sigItemAdded and sigItemAboutToBeRemoved"""
listener = SignalListener()
self.plot.sigItemAdded.connect(listener.partial('add'))
self.plot.sigItemAboutToBeRemoved.connect(listener.partial('remove'))
self.plot.addCurve((1, 2, 3), (3, 2, 1), legend='curve')
self.assertEqual(listener.callCount(), 1)
curve = self.plot.getCurve('curve')
self.plot.remove('curve')
self.assertEqual(listener.callCount(), 2)
self.assertEqual(listener.arguments(callIndex=0), ('add', curve))
self.assertEqual(listener.arguments(callIndex=1), ('remove', curve))
def testGetItems(self):
"""Test getItems method"""
curve_x = 1, 2
self.plot.addCurve(curve_x, (3, 4))
image = (0, 1), (2, 3)
self.plot.addImage(image)
scatter_x = 10, 11
self.plot.addScatter(scatter_x, (12, 13), (0, 1))
marker_pos = 5, 5
self.plot.addMarker(*marker_pos)
marker_x = 6
self.plot.addXMarker(marker_x)
self.plot.addItem((0, 5), (2, 10), shape='rectangle')
items = self.plot.getItems()
self.assertEqual(len(items), 6)
self.assertTrue(numpy.all(numpy.equal(items[0].getXData(), curve_x)))
self.assertTrue(numpy.all(numpy.equal(items[1].getData(), image)))
self.assertTrue(numpy.all(numpy.equal(items[2].getXData(), scatter_x)))
self.assertTrue(numpy.all(numpy.equal(items[3].getPosition(), marker_pos)))
self.assertTrue(numpy.all(numpy.equal(items[4].getPosition()[0], marker_x)))
self.assertEqual(items[5].getType(), 'rectangle')
def testBackGroundColors(self):
self.plot.setVisible(True)
self.qWaitForWindowExposed(self.plot)
self.qapp.processEvents()
# Custom the full background
color = self.plot.getBackgroundColor()
self.assertTrue(color.isValid())
self.assertEqual(color, qt.QColor(255, 255, 255))
self.plot.setBackgroundColor("red")
color = self.plot.getBackgroundColor()
self.assertTrue(color.isValid())
self.qapp.processEvents()
# Custom the data background
color = self.plot.getDataBackgroundColor()
self.assertFalse(color.isValid())
self.plot.setDataBackgroundColor("red")
color = self.plot.getDataBackgroundColor()
self.assertTrue(color.isValid())
self.qapp.processEvents()
# Back to default
self.plot.setBackgroundColor('white')
self.plot.setDataBackgroundColor(None)
color = self.plot.getBackgroundColor()
self.assertTrue(color.isValid())
self.assertEqual(color, qt.QColor(255, 255, 255))
color = self.plot.getDataBackgroundColor()
self.assertFalse(color.isValid())
self.qapp.processEvents()
class TestPlotImage(PlotWidgetTestCase, ParametricTestCase):
"""Basic tests for addImage"""
def setUp(self):
super(TestPlotImage, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
def testPlotColormapTemperature(self):
self.plot.setGraphTitle('Temp. Linear')
colormap = Colormap(name='temperature',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap)
def testPlotColormapGray(self):
self.plot.setKeepDataAspectRatio(False)
self.plot.setGraphTitle('Gray Linear')
colormap = Colormap(name='gray',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap)
def testPlotColormapTemperatureLog(self):
self.plot.setGraphTitle('Temp. Log')
colormap = Colormap(name='temperature',
normalization=Colormap.LOGARITHM,
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap)
def testPlotRgbRgba(self):
self.plot.setKeepDataAspectRatio(False)
self.plot.setGraphTitle('RGB + RGBA')
rgb = numpy.array(
(((0, 0, 0), (128, 0, 0), (255, 0, 0)),
((0, 128, 0), (0, 128, 128), (0, 128, 256))),
dtype=numpy.uint8)
self.plot.addImage(rgb, legend="rgb",
origin=(0, 0), scale=(10, 10),
resetzoom=False)
rgba = numpy.array(
(((0, 0, 0, .5), (.5, 0, 0, 1), (1, 0, 0, .5)),
((0, .5, 0, 1), (0, .5, .5, 1), (0, 1, 1, .5))),
dtype=numpy.float32)
self.plot.addImage(rgba, legend="rgba",
origin=(5, 5), scale=(10, 10),
resetzoom=False)
self.plot.resetZoom()
def testPlotColormapCustom(self):
self.plot.setKeepDataAspectRatio(False)
self.plot.setGraphTitle('Custom colormap')
colormap = Colormap(name=None,
normalization=Colormap.LINEAR,
vmin=None,
vmax=None,
colors=((0., 0., 0.), (1., 0., 0.),
(0., 1., 0.), (0., 0., 1.)))
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap,
resetzoom=False)
colormap = Colormap(name=None,
normalization=Colormap.LINEAR,
vmin=None,
vmax=None,
colors=numpy.array(
((0, 0, 0, 0), (0, 0, 0, 128),
(128, 128, 128, 128), (255, 255, 255, 255)),
dtype=numpy.uint8))
self.plot.addImage(DATA_2D, legend="image 2", colormap=colormap,
origin=(DATA_2D.shape[0], 0),
resetzoom=False)
self.plot.resetZoom()
def testImageOriginScale(self):
"""Test of image with different origin and scale"""
self.plot.setGraphTitle('origin and scale')
tests = [ # (origin, scale)
((10, 20), (1, 1)),
((10, 20), (-1, -1)),
((-10, 20), (2, 1)),
((10, -20), (-1, -2)),
(100, 2),
(-100, (1, 1)),
((10, 20), 2),
]
for origin, scale in tests:
with self.subTest(origin=origin, scale=scale):
self.plot.addImage(DATA_2D, origin=origin, scale=scale)
try:
ox, oy = origin
except TypeError:
ox, oy = origin, origin
try:
sx, sy = scale
except TypeError:
sx, sy = scale, scale
xbounds = ox, ox + DATA_2D.shape[1] * sx
ybounds = oy, oy + DATA_2D.shape[0] * sy
# Check limits without aspect ratio
xmin, xmax = self.plot.getXAxis().getLimits()
ymin, ymax = self.plot.getYAxis().getLimits()
self.assertEqual(xmin, min(xbounds))
self.assertEqual(xmax, max(xbounds))
self.assertEqual(ymin, min(ybounds))
self.assertEqual(ymax, max(ybounds))
# Check limits with aspect ratio
self.plot.setKeepDataAspectRatio(True)
xmin, xmax = self.plot.getXAxis().getLimits()
ymin, ymax = self.plot.getYAxis().getLimits()
self.assertTrue(round(xmin, 7) <= min(xbounds))
self.assertTrue(round(xmax, 7) >= max(xbounds))
self.assertTrue(round(ymin, 7) <= min(ybounds))
self.assertTrue(round(ymax, 7) >= max(ybounds))
self.plot.setKeepDataAspectRatio(False) # Reset aspect ratio
self.plot.clear()
self.plot.resetZoom()
def testPlotColormapDictAPI(self):
"""Test that the addImage API using a colormap dictionary is still
working"""
self.plot.setGraphTitle('Temp. Log')
colormap = {
'name': 'temperature',
'normalization': 'log',
'vmin': None,
'vmax': None
}
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap)
def testPlotComplexImage(self):
"""Test that a complex image is displayed as its absolute value."""
data = numpy.linspace(1, 1j, 100).reshape(10, 10)
self.plot.addImage(data, legend='complex')
image = self.plot.getActiveImage()
retrievedData = image.getData(copy=False)
self.assertTrue(
numpy.all(numpy.equal(retrievedData, numpy.absolute(data))))
def testPlotBooleanImage(self):
"""Test that a boolean image is displayed and converted to int8."""
data = numpy.zeros((10, 10), dtype=numpy.bool)
data[::2, ::2] = True
self.plot.addImage(data, legend='boolean')
image = self.plot.getActiveImage()
retrievedData = image.getData(copy=False)
self.assertTrue(numpy.all(numpy.equal(retrievedData, data)))
self.assertIs(retrievedData.dtype.type, numpy.int8)
def testPlotAlphaImage(self):
"""Test with an alpha image layer"""
data = numpy.random.random((10, 10))
alpha = numpy.linspace(0, 1, 100).reshape(10, 10)
self.plot.addImage(data, legend='image')
image = self.plot.getActiveImage()
image.setData(data, alpha=alpha)
self.qapp.processEvents()
self.assertTrue(numpy.array_equal(alpha, image.getAlphaData()))
class TestPlotCurve(PlotWidgetTestCase):
"""Basic tests for addCurve."""
# Test data sets
xData = numpy.arange(1000)
yData = -500 + 100 * numpy.sin(xData)
xData2 = xData + 1000
yData2 = xData - 1000 + 200 * numpy.random.random(1000)
def setUp(self):
super(TestPlotCurve, self).setUp()
self.plot.setGraphTitle('Curve')
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.setActiveCurveHandling(False)
def testPlotCurveColorFloat(self):
color = numpy.array(numpy.random.random(3 * 1000),
dtype=numpy.float32).reshape(1000, 3)
self.plot.addCurve(self.xData, self.yData,
legend="curve 1",
replace=False, resetzoom=False,
color=color,
linestyle="", symbol="s")
self.plot.addCurve(self.xData2, self.yData2,
legend="curve 2",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
self.plot.resetZoom()
def testPlotCurveColorByte(self):
color = numpy.array(255 * numpy.random.random(3 * 1000),
dtype=numpy.uint8).reshape(1000, 3)
self.plot.addCurve(self.xData, self.yData,
legend="curve 1",
replace=False, resetzoom=False,
color=color,
linestyle="", symbol="s")
self.plot.addCurve(self.xData2, self.yData2,
legend="curve 2",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
self.plot.resetZoom()
def testPlotCurveColors(self):
color = numpy.array(numpy.random.random(3 * 1000),
dtype=numpy.float32).reshape(1000, 3)
self.plot.addCurve(self.xData, self.yData,
legend="curve 2",
replace=False, resetzoom=False,
color=color, linestyle="-", symbol='o')
self.plot.resetZoom()
# Test updating color array
# From array to array
newColors = numpy.ones((len(self.xData), 3), dtype=numpy.float32)
self.plot.addCurve(self.xData, self.yData,
legend="curve 2",
replace=False, resetzoom=False,
color=newColors, symbol='o')
# Array to single color
self.plot.addCurve(self.xData, self.yData,
legend="curve 2",
replace=False, resetzoom=False,
color='green', symbol='o')
# single color to array
self.plot.addCurve(self.xData, self.yData,
legend="curve 2",
replace=False, resetzoom=False,
color=color, symbol='o')
class TestPlotScatter(PlotWidgetTestCase, ParametricTestCase):
"""Basic tests for addScatter"""
def testScatter(self):
x = numpy.arange(100)
y = numpy.arange(100)
value = numpy.arange(100)
self.plot.addScatter(x, y, value)
self.plot.resetZoom()
def testScatterVisualization(self):
self.plot.addScatter((0, 1, 2, 3), (2, 0, 2, 1), (0, 1, 2, 3))
self.plot.resetZoom()
self.qapp.processEvents()
scatter = self.plot.getItems()[0]
for visualization in ('solid',
'points',
scatter.Visualization.SOLID,
scatter.Visualization.POINTS):
with self.subTest(visualization=visualization):
scatter.setVisualization(visualization)
self.qapp.processEvents()
class TestPlotMarker(PlotWidgetTestCase):
"""Basic tests for add*Marker"""
def setUp(self):
super(TestPlotMarker, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.getXAxis().setAutoScale(False)
self.plot.getYAxis().setAutoScale(False)
self.plot.setKeepDataAspectRatio(False)
self.plot.setLimits(0., 100., -100., 100.)
def testPlotMarkerX(self):
self.plot.setGraphTitle('Markers X')
markers = [
(10., 'blue', False, False),
(20., 'red', False, False),
(40., 'green', True, False),
(60., 'gray', True, True),
(80., 'black', False, True),
]
for x, color, select, drag in markers:
name = str(x)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addXMarker(x, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerY(self):
self.plot.setGraphTitle('Markers Y')
markers = [
(-50., 'blue', False, False),
(-30., 'red', False, False),
(0., 'green', True, False),
(10., 'gray', True, True),
(80., 'black', False, True),
]
for y, color, select, drag in markers:
name = str(y)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addYMarker(y, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerPt(self):
self.plot.setGraphTitle('Markers Pt')
markers = [
(10., -50., 'blue', False, False),
(40., -30., 'red', False, False),
(50., 0., 'green', True, False),
(50., 20., 'gray', True, True),
(70., 50., 'black', False, True),
]
for x, y, color, select, drag in markers:
name = "{0},{1}".format(x, y)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addMarker(x, y, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerWithoutLegend(self):
self.plot.setGraphTitle('Markers without legend')
self.plot.getYAxis().setInverted(True)
# Markers without legend
self.plot.addMarker(10, 10)
self.plot.addMarker(10, 20)
self.plot.addMarker(40, 50, text='test', symbol=None)
self.plot.addMarker(40, 50, text='test', symbol='+')
self.plot.addXMarker(25)
self.plot.addXMarker(35)
self.plot.addXMarker(45, text='test')
self.plot.addYMarker(55)
self.plot.addYMarker(65)
self.plot.addYMarker(75, text='test')
self.plot.resetZoom()
def testPlotMarkerYAxis(self):
# Check only the API
legend = self.plot.addMarker(10, 10)
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "left")
legend = self.plot.addMarker(10, 10, yaxis="right")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "right")
legend = self.plot.addMarker(10, 10, yaxis="left")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "left")
legend = self.plot.addXMarker(10, yaxis="right")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "right")
legend = self.plot.addXMarker(10, yaxis="left")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "left")
legend = self.plot.addYMarker(10, yaxis="right")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "right")
legend = self.plot.addYMarker(10, yaxis="left")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "left")
self.plot.resetZoom()
# TestPlotItem ################################################################
class TestPlotItem(PlotWidgetTestCase):
"""Basic tests for addItem."""
# Polygon coordinates and color
polygons = [ # legend, x coords, y coords, color
('triangle', numpy.array((10, 30, 50)),
numpy.array((55, 70, 55)), 'red'),
('square', numpy.array((10, 10, 50, 50)),
numpy.array((10, 50, 50, 10)), 'green'),
('star', numpy.array((60, 70, 80, 60, 80)),
numpy.array((25, 50, 25, 40, 40)), 'blue'),
]
# Rectangle coordinantes and color
rectangles = [ # legend, x coords, y coords, color
('square 1', numpy.array((1., 10.)),
numpy.array((1., 10.)), 'red'),
('square 2', numpy.array((10., 20.)),
numpy.array((10., 20.)), 'green'),
('square 3', numpy.array((20., 30.)),
numpy.array((20., 30.)), 'blue'),
('rect 1', numpy.array((1., 30.)),
numpy.array((35., 40.)), 'black'),
('line h', numpy.array((1., 30.)),
numpy.array((45., 45.)), 'darkRed'),
]
def setUp(self):
super(TestPlotItem, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.getXAxis().setAutoScale(False)
self.plot.getYAxis().setAutoScale(False)
self.plot.setKeepDataAspectRatio(False)
self.plot.setLimits(0., 100., -100., 100.)
def testPlotItemPolygonFill(self):
self.plot.setGraphTitle('Item Fill')
for legend, xList, yList, color in self.polygons:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="polygon", fill=True, color=color)
self.plot.resetZoom()
def testPlotItemPolygonNoFill(self):
self.plot.setGraphTitle('Item No Fill')
for legend, xList, yList, color in self.polygons:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="polygon", fill=False, color=color)
self.plot.resetZoom()
def testPlotItemRectangleFill(self):
self.plot.setGraphTitle('Rectangle Fill')
for legend, xList, yList, color in self.rectangles:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="rectangle", fill=True, color=color)
self.plot.resetZoom()
def testPlotItemRectangleNoFill(self):
self.plot.setGraphTitle('Rectangle No Fill')
for legend, xList, yList, color in self.rectangles:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="rectangle", fill=False, color=color)
self.plot.resetZoom()
class TestPlotActiveCurveImage(PlotWidgetTestCase):
"""Basic tests for active curve and image handling"""
xData = numpy.arange(1000)
yData = -500 + 100 * numpy.sin(xData)
xData2 = xData + 1000
yData2 = xData - 1000 + 200 * numpy.random.random(1000)
def tearDown(self):
self.plot.setActiveCurveHandling(False)
super(TestPlotActiveCurveImage, self).tearDown()
def testActiveCurveAndLabels(self):
# Active curve handling off, no label change
self.plot.setActiveCurveHandling(False)
self.plot.getXAxis().setLabel('XLabel')
self.plot.getYAxis().setLabel('YLabel')
self.plot.addCurve((1, 2), (1, 2))
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
self.plot.addCurve((1, 2), (2, 3), xlabel='x1', ylabel='y1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
self.plot.clear()
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
# Active curve handling on, label changes
self.plot.setActiveCurveHandling(True)
self.plot.getXAxis().setLabel('XLabel')
self.plot.getYAxis().setLabel('YLabel')
# labels changed as active curve
self.plot.addCurve((1, 2), (1, 2), legend='1',
xlabel='x1', ylabel='y1')
self.plot.setActiveCurve('1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
# labels not changed as not active curve
self.plot.addCurve((1, 2), (2, 3), legend='2')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
# labels changed
self.plot.setActiveCurve('2')
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
self.plot.setActiveCurve('1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
self.plot.clear()
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
def testPlotActiveCurveSelectionMode(self):
self.plot.clear()
self.plot.setActiveCurveHandling(True)
legend = "curve 1"
self.plot.addCurve(self.xData, self.yData,
legend=legend,
color="green")
# active curve should be None
self.assertEqual(self.plot.getActiveCurve(just_legend=True), None)
# active curve should be None when None is set as active curve
self.plot.setActiveCurve(legend)
current = self.plot.getActiveCurve(just_legend=True)
self.assertEqual(current, legend)
self.plot.setActiveCurve(None)
current = self.plot.getActiveCurve(just_legend=True)
self.assertEqual(current, None)
# testing it automatically toggles if there is only one
self.plot.setActiveCurveSelectionMode("legacy")
current = self.plot.getActiveCurve(just_legend=True)
self.assertEqual(current, legend)
# active curve should not change when None set as active curve
self.assertEqual(self.plot.getActiveCurveSelectionMode(), "legacy")
self.plot.setActiveCurve(None)
current = self.plot.getActiveCurve(just_legend=True)
self.assertEqual(current, legend)
# situation where no curve is active
self.plot.clear()
self.plot.setActiveCurveHandling(True)
self.assertEqual(self.plot.getActiveCurveSelectionMode(), "atmostone")
self.plot.addCurve(self.xData, self.yData,
legend=legend,
color="green")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), None)
self.plot.addCurve(self.xData2, self.yData2,
legend="curve 2",
color="red")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), None)
self.plot.setActiveCurveSelectionMode("legacy")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), None)
# the first curve added should be active
self.plot.clear()
self.plot.addCurve(self.xData, self.yData,
legend=legend,
color="green")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), legend)
self.plot.addCurve(self.xData2, self.yData2,
legend="curve 2",
color="red")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), legend)
def testActiveCurveStyle(self):
"""Test change of active curve style"""
self.plot.setActiveCurveHandling(True)
self.plot.setActiveCurveStyle(color='black')
style = self.plot.getActiveCurveStyle()
self.assertEqual(style.getColor(), (0., 0., 0., 1.))
self.assertIsNone(style.getLineStyle())
self.assertIsNone(style.getLineWidth())
self.assertIsNone(style.getSymbol())
self.assertIsNone(style.getSymbolSize())
self.plot.addCurve(x=self.xData, y=self.yData, legend="curve1")
curve = self.plot.getCurve("curve1")
curve.setColor('blue')
curve.setLineStyle('-')
curve.setLineWidth(1)
curve.setSymbol('o')
curve.setSymbolSize(5)
# Check default current style
defaultStyle = curve.getCurrentStyle()
self.assertEqual(defaultStyle, CurveStyle(color='blue',
linestyle='-',
linewidth=1,
symbol='o',
symbolsize=5))
# Activate curve with highlight color=black
self.plot.setActiveCurve("curve1")
style = curve.getCurrentStyle()
self.assertEqual(style.getColor(), (0., 0., 0., 1.))
self.assertEqual(style.getLineStyle(), '-')
self.assertEqual(style.getLineWidth(), 1)
self.assertEqual(style.getSymbol(), 'o')
self.assertEqual(style.getSymbolSize(), 5)
# Change highlight to linewidth=2
self.plot.setActiveCurveStyle(linewidth=2)
style = curve.getCurrentStyle()
self.assertEqual(style.getColor(), (0., 0., 1., 1.))
self.assertEqual(style.getLineStyle(), '-')
self.assertEqual(style.getLineWidth(), 2)
self.assertEqual(style.getSymbol(), 'o')
self.assertEqual(style.getSymbolSize(), 5)
self.plot.setActiveCurve(None)
self.assertEqual(curve.getCurrentStyle(), defaultStyle)
def testActiveImageAndLabels(self):
# Active image handling always on, no API for toggling it
self.plot.getXAxis().setLabel('XLabel')
self.plot.getYAxis().setLabel('YLabel')
# labels changed as active curve
self.plot.addImage(numpy.arange(100).reshape(10, 10),
legend='1', xlabel='x1', ylabel='y1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
# labels not changed as not active curve
self.plot.addImage(numpy.arange(100).reshape(10, 10),
legend='2')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
# labels changed
self.plot.setActiveImage('2')
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
self.plot.setActiveImage('1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
self.plot.clear()
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
##############################################################################
# Log
##############################################################################
class TestPlotEmptyLog(PlotWidgetTestCase):
"""Basic tests for log plot"""
def testEmptyPlotTitleLabelsLog(self):
self.plot.setGraphTitle('Empty Log Log')
self.plot.getXAxis().setLabel('X')
self.plot.getYAxis().setLabel('Y')
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
self.plot.resetZoom()
class TestPlotAxes(TestCaseQt, ParametricTestCase):
# Test data
xData = numpy.arange(1, 10)
yData = xData ** 2
def __init__(self, methodName='runTest', backend=None):
unittest.TestCase.__init__(self, methodName)
self.__backend = backend
def setUp(self):
super(TestPlotAxes, self).setUp()
self.plot = PlotWidget(backend=self.__backend)
# It is not needed to display the plot
# It saves a lot of time
# self.plot.show()
# self.qWaitForWindowExposed(self.plot)
def tearDown(self):
self.qapp.processEvents()
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
del self.plot
super(TestPlotAxes, self).tearDown()
def testDefaultAxes(self):
axis = self.plot.getXAxis()
self.assertEqual(axis.getScale(), axis.LINEAR)
axis = self.plot.getYAxis()
self.assertEqual(axis.getScale(), axis.LINEAR)
axis = self.plot.getYAxis(axis="right")
self.assertEqual(axis.getScale(), axis.LINEAR)
def testOldPlotAxis_getterSetter(self):
"""Test silx API prior to silx 0.6"""
x = self.plot.getXAxis()
y = self.plot.getYAxis()
p = self.plot
tests = [
# setters
(p.setGraphXLimits, (10, 20), x.getLimits, (10, 20)),
(p.setGraphYLimits, (10, 20), y.getLimits, (10, 20)),
(p.setGraphXLabel, "foox", x.getLabel, "foox"),
(p.setGraphYLabel, "fooy", y.getLabel, "fooy"),
(p.setYAxisInverted, True, y.isInverted, True),
(p.setXAxisLogarithmic, True, x.getScale, x.LOGARITHMIC),
(p.setYAxisLogarithmic, True, y.getScale, y.LOGARITHMIC),
(p.setXAxisAutoScale, False, x.isAutoScale, False),
(p.setYAxisAutoScale, False, y.isAutoScale, False),
# getters
(x.setLimits, (11, 20), p.getGraphXLimits, (11, 20)),
(y.setLimits, (11, 20), p.getGraphYLimits, (11, 20)),
(x.setLabel, "fooxx", p.getGraphXLabel, "fooxx"),
(y.setLabel, "fooyy", p.getGraphYLabel, "fooyy"),
(y.setInverted, False, p.isYAxisInverted, False),
(x.setScale, x.LINEAR, p.isXAxisLogarithmic, False),
(y.setScale, y.LINEAR, p.isYAxisLogarithmic, False),
(x.setAutoScale, True, p.isXAxisAutoScale, True),
(y.setAutoScale, True, p.isYAxisAutoScale, True),
]
for testCase in tests:
setter, value, getter, expected = testCase
with self.subTest():
if setter is not None:
if not isinstance(value, tuple):
value = (value, )
setter(*value)
if getter is not None:
self.assertEqual(getter(), expected)
def testOldPlotAxis_Logarithmic(self):
"""Test silx API prior to silx 0.6"""
x = self.plot.getXAxis()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
self.assertEqual(x.getScale(), x.LINEAR)
self.assertEqual(y.getScale(), x.LINEAR)
self.assertEqual(yright.getScale(), x.LINEAR)
self.plot.setXAxisLogarithmic(True)
self.assertEqual(x.getScale(), x.LOGARITHMIC)
self.assertEqual(y.getScale(), x.LINEAR)
self.assertEqual(yright.getScale(), x.LINEAR)
self.assertEqual(self.plot.isXAxisLogarithmic(), True)
self.assertEqual(self.plot.isYAxisLogarithmic(), False)
self.plot.setYAxisLogarithmic(True)
self.assertEqual(x.getScale(), x.LOGARITHMIC)
self.assertEqual(y.getScale(), x.LOGARITHMIC)
self.assertEqual(yright.getScale(), x.LOGARITHMIC)
self.assertEqual(self.plot.isXAxisLogarithmic(), True)
self.assertEqual(self.plot.isYAxisLogarithmic(), True)
yright.setScale(yright.LINEAR)
self.assertEqual(x.getScale(), x.LOGARITHMIC)
self.assertEqual(y.getScale(), x.LINEAR)
self.assertEqual(yright.getScale(), x.LINEAR)
self.assertEqual(self.plot.isXAxisLogarithmic(), True)
self.assertEqual(self.plot.isYAxisLogarithmic(), False)
def testOldPlotAxis_AutoScale(self):
"""Test silx API prior to silx 0.6"""
x = self.plot.getXAxis()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
self.assertEqual(x.isAutoScale(), True)
self.assertEqual(y.isAutoScale(), True)
self.assertEqual(yright.isAutoScale(), True)
self.plot.setXAxisAutoScale(False)
self.assertEqual(x.isAutoScale(), False)
self.assertEqual(y.isAutoScale(), True)
self.assertEqual(yright.isAutoScale(), True)
self.assertEqual(self.plot.isXAxisAutoScale(), False)
self.assertEqual(self.plot.isYAxisAutoScale(), True)
self.plot.setYAxisAutoScale(False)
self.assertEqual(x.isAutoScale(), False)
self.assertEqual(y.isAutoScale(), False)
self.assertEqual(yright.isAutoScale(), False)
self.assertEqual(self.plot.isXAxisAutoScale(), False)
self.assertEqual(self.plot.isYAxisAutoScale(), False)
yright.setAutoScale(True)
self.assertEqual(x.isAutoScale(), False)
self.assertEqual(y.isAutoScale(), True)
self.assertEqual(yright.isAutoScale(), True)
self.assertEqual(self.plot.isXAxisAutoScale(), False)
self.assertEqual(self.plot.isYAxisAutoScale(), True)
def testOldPlotAxis_Inverted(self):
"""Test silx API prior to silx 0.6"""
x = self.plot.getXAxis()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
self.assertEqual(x.isInverted(), False)
self.assertEqual(y.isInverted(), False)
self.assertEqual(yright.isInverted(), False)
self.plot.setYAxisInverted(True)
self.assertEqual(x.isInverted(), False)
self.assertEqual(y.isInverted(), True)
self.assertEqual(yright.isInverted(), True)
self.assertEqual(self.plot.isYAxisInverted(), True)
yright.setInverted(False)
self.assertEqual(x.isInverted(), False)
self.assertEqual(y.isInverted(), False)
self.assertEqual(yright.isInverted(), False)
self.assertEqual(self.plot.isYAxisInverted(), False)
def testLogXWithData(self):
self.plot.setGraphTitle('Curve X: Log Y: Linear')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
axis = self.plot.getXAxis()
axis.setScale(axis.LOGARITHMIC)
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
def testLogYWithData(self):
self.plot.setGraphTitle('Curve X: Linear Y: Log')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
axis = self.plot.getYAxis()
axis.setScale(axis.LOGARITHMIC)
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
axis = self.plot.getYAxis(axis="right")
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
def testLogYRightWithData(self):
self.plot.setGraphTitle('Curve X: Linear Y: Log')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
axis = self.plot.getYAxis(axis="right")
axis.setScale(axis.LOGARITHMIC)
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
axis = self.plot.getYAxis()
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
def testLimitsChanged_setLimits(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
self.plot.getXAxis().sigLimitsChanged.connect(listener.partial(axis="x"))
self.plot.getYAxis().sigLimitsChanged.connect(listener.partial(axis="y"))
self.plot.getYAxis(axis="right").sigLimitsChanged.connect(listener.partial(axis="y2"))
self.plot.setLimits(0, 1, 0, 1, 0, 1)
# at least one event per axis
self.assertEqual(len(set(listener.karguments(argumentName="axis"))), 3)
def testLimitsChanged_resetZoom(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
self.plot.getXAxis().sigLimitsChanged.connect(listener.partial(axis="x"))
self.plot.getYAxis().sigLimitsChanged.connect(listener.partial(axis="y"))
self.plot.getYAxis(axis="right").sigLimitsChanged.connect(listener.partial(axis="y2"))
self.plot.resetZoom()
# at least one event per axis
self.assertEqual(len(set(listener.karguments(argumentName="axis"))), 3)
def testLimitsChanged_setXLimit(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
axis = self.plot.getXAxis()
axis.sigLimitsChanged.connect(listener)
axis.setLimits(20, 30)
# at least one event per axis
self.assertEqual(listener.arguments(callIndex=-1), (20.0, 30.0))
self.assertEqual(axis.getLimits(), (20.0, 30.0))
def testLimitsChanged_setYLimit(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
axis = self.plot.getYAxis()
axis.sigLimitsChanged.connect(listener)
axis.setLimits(20, 30)
# at least one event per axis
self.assertEqual(listener.arguments(callIndex=-1), (20.0, 30.0))
self.assertEqual(axis.getLimits(), (20.0, 30.0))
def testLimitsChanged_setYRightLimit(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
axis = self.plot.getYAxis(axis="right")
axis.sigLimitsChanged.connect(listener)
axis.setLimits(20, 30)
# at least one event per axis
self.assertEqual(listener.arguments(callIndex=-1), (20.0, 30.0))
self.assertEqual(axis.getLimits(), (20.0, 30.0))
def testScaleProxy(self):
listener = SignalListener()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
y.sigScaleChanged.connect(listener.partial("left"))
yright.sigScaleChanged.connect(listener.partial("right"))
yright.setScale(yright.LOGARITHMIC)
self.assertEqual(y.getScale(), y.LOGARITHMIC)
events = listener.arguments()
self.assertEqual(len(events), 2)
self.assertIn(("left", y.LOGARITHMIC), events)
self.assertIn(("right", y.LOGARITHMIC), events)
def testAutoScaleProxy(self):
listener = SignalListener()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
y.sigAutoScaleChanged.connect(listener.partial("left"))
yright.sigAutoScaleChanged.connect(listener.partial("right"))
yright.setAutoScale(False)
self.assertEqual(y.isAutoScale(), False)
events = listener.arguments()
self.assertEqual(len(events), 2)
self.assertIn(("left", False), events)
self.assertIn(("right", False), events)
def testInvertedProxy(self):
listener = SignalListener()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
y.sigInvertedChanged.connect(listener.partial("left"))
yright.sigInvertedChanged.connect(listener.partial("right"))
yright.setInverted(True)
self.assertEqual(y.isInverted(), True)
events = listener.arguments()
self.assertEqual(len(events), 2)
self.assertIn(("left", True), events)
self.assertIn(("right", True), events)
def testAxesDisplayedFalse(self):
"""Test coverage on setAxesDisplayed(False)"""
self.plot.setAxesDisplayed(False)
def testAxesDisplayedTrue(self):
"""Test coverage on setAxesDisplayed(True)"""
self.plot.setAxesDisplayed(True)
class TestPlotCurveLog(PlotWidgetTestCase, ParametricTestCase):
"""Basic tests for addCurve with log scale axes"""
# Test data
xData = numpy.arange(1000) + 1
yData = xData ** 2
def _setLabels(self):
self.plot.getXAxis().setLabel('X')
self.plot.getYAxis().setLabel('X * X')
def testPlotCurveLogX(self):
self._setLabels()
self.plot.getXAxis()._setLogarithmic(True)
self.plot.setGraphTitle('Curve X: Log Y: Linear')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
def testPlotCurveLogY(self):
self._setLabels()
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('Curve X: Linear Y: Log')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
def testPlotCurveLogXY(self):
self._setLabels()
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('Curve X: Log Y: Log')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
def testPlotCurveErrorLogXY(self):
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
# Every second error leads to negative number
errors = numpy.ones_like(self.xData)
errors[::2] = self.xData[::2] + 1
tests = [ # name, xerror, yerror
('xerror=3', 3, None),
('xerror=N array', errors, None),
('xerror=Nx1 array', errors.reshape(len(errors), 1), None),
('xerror=2xN array', numpy.array((errors, errors)), None),
('yerror=6', None, 6),
('yerror=N array', None, errors ** 2),
('yerror=Nx1 array', None, (errors ** 2).reshape(len(errors), 1)),
('yerror=2xN array', None, numpy.array((errors, errors)) ** 2),
]
for name, xError, yError in tests:
with self.subTest(name):
self.plot.setGraphTitle(name)
self.plot.addCurve(self.xData, self.yData,
legend=name,
xerror=xError, yerror=yError,
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
self.qapp.processEvents()
self.plot.clear()
self.plot.resetZoom()
self.qapp.processEvents()
def testPlotCurveToggleLog(self):
"""Add a curve with negative data and toggle log axis"""
arange = numpy.arange(1000) + 1
tests = [ # name, xData, yData
('x>0, some negative y', arange, arange - 500),
('x>0, y<0', arange, -arange),
('some negative x, y>0', arange - 500, arange),
('x<0, y>0', -arange, arange),
('some negative x and y', arange - 500, arange - 500),
('x<0, y<0', -arange, -arange),
]
for name, xData, yData in tests:
with self.subTest(name):
self.plot.addCurve(xData, yData, resetzoom=True)
self.qapp.processEvents()
# no log axis
xLim = self.plot.getXAxis().getLimits()
self.assertEqual(xLim, (min(xData), max(xData)))
yLim = self.plot.getYAxis().getLimits()
self.assertEqual(yLim, (min(yData), max(yData)))
# x axis log
self.plot.getXAxis()._setLogarithmic(True)
self.qapp.processEvents()
xLim = self.plot.getXAxis().getLimits()
yLim = self.plot.getYAxis().getLimits()
positives = xData > 0
if numpy.any(positives):
self.assertTrue(numpy.allclose(
xLim, (min(xData[positives]), max(xData[positives]))))
self.assertEqual(
yLim, (min(yData[positives]), max(yData[positives])))
else: # No positive x in the curve
self.assertEqual(xLim, (1., 100.))
self.assertEqual(yLim, (1., 100.))
# x axis and y axis log
self.plot.getYAxis()._setLogarithmic(True)
self.qapp.processEvents()
xLim = self.plot.getXAxis().getLimits()
yLim = self.plot.getYAxis().getLimits()
positives = numpy.logical_and(xData > 0, yData > 0)
if numpy.any(positives):
self.assertTrue(numpy.allclose(
xLim, (min(xData[positives]), max(xData[positives]))))
self.assertTrue(numpy.allclose(
yLim, (min(yData[positives]), max(yData[positives]))))
else: # No positive x and y in the curve
self.assertEqual(xLim, (1., 100.))
self.assertEqual(yLim, (1., 100.))
# y axis log
self.plot.getXAxis()._setLogarithmic(False)
self.qapp.processEvents()
xLim = self.plot.getXAxis().getLimits()
yLim = self.plot.getYAxis().getLimits()
positives = yData > 0
if numpy.any(positives):
self.assertEqual(
xLim, (min(xData[positives]), max(xData[positives])))
self.assertTrue(numpy.allclose(
yLim, (min(yData[positives]), max(yData[positives]))))
else: # No positive y in the curve
self.assertEqual(xLim, (1., 100.))
self.assertEqual(yLim, (1., 100.))
# no log axis
self.plot.getYAxis()._setLogarithmic(False)
self.qapp.processEvents()
xLim = self.plot.getXAxis().getLimits()
self.assertEqual(xLim, (min(xData), max(xData)))
yLim = self.plot.getYAxis().getLimits()
self.assertEqual(yLim, (min(yData), max(yData)))
self.plot.clear()
self.plot.resetZoom()
self.qapp.processEvents()
class TestPlotImageLog(PlotWidgetTestCase):
"""Basic tests for addImage with log scale axes."""
def setUp(self):
super(TestPlotImageLog, self).setUp()
self.plot.getXAxis().setLabel('Columns')
self.plot.getYAxis().setLabel('Rows')
def testPlotColormapGrayLogX(self):
self.plot.getXAxis()._setLogarithmic(True)
self.plot.setGraphTitle('CMap X: Log Y: Linear')
colormap = Colormap(name='gray',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1",
origin=(1., 1.), scale=(1., 1.),
resetzoom=False, colormap=colormap)
self.plot.resetZoom()
def testPlotColormapGrayLogY(self):
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('CMap X: Linear Y: Log')
colormap = Colormap(name='gray',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1",
origin=(1., 1.), scale=(1., 1.),
resetzoom=False, colormap=colormap)
self.plot.resetZoom()
def testPlotColormapGrayLogXY(self):
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('CMap X: Log Y: Log')
colormap = Colormap(name='gray',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1",
origin=(1., 1.), scale=(1., 1.),
resetzoom=False, colormap=colormap)
self.plot.resetZoom()
def testPlotRgbRgbaLogXY(self):
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('RGB + RGBA X: Log Y: Log')
rgb = numpy.array(
(((0, 0, 0), (128, 0, 0), (255, 0, 0)),
((0, 128, 0), (0, 128, 128), (0, 128, 256))),
dtype=numpy.uint8)
self.plot.addImage(rgb, legend="rgb",
origin=(1, 1), scale=(10, 10),
resetzoom=False)
rgba = numpy.array(
(((0, 0, 0, .5), (.5, 0, 0, 1), (1, 0, 0, .5)),
((0, .5, 0, 1), (0, .5, .5, 1), (0, 1, 1, .5))),
dtype=numpy.float32)
self.plot.addImage(rgba, legend="rgba",
origin=(5., 5.), scale=(10., 10.),
resetzoom=False)
self.plot.resetZoom()
class TestPlotMarkerLog(PlotWidgetTestCase):
"""Basic tests for markers on log scales"""
# Test marker parameters
markers = [ # x, y, color, selectable, draggable
(10., 10., 'blue', False, False),
(20., 20., 'red', False, False),
(40., 100., 'green', True, False),
(40., 500., 'gray', True, True),
(60., 800., 'black', False, True),
]
def setUp(self):
super(TestPlotMarkerLog, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.getXAxis().setAutoScale(False)
self.plot.getYAxis().setAutoScale(False)
self.plot.setKeepDataAspectRatio(False)
self.plot.setLimits(1., 100., 1., 1000.)
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
def testPlotMarkerXLog(self):
self.plot.setGraphTitle('Markers X, Log axes')
for x, _, color, select, drag in self.markers:
name = str(x)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addXMarker(x, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerYLog(self):
self.plot.setGraphTitle('Markers Y, Log axes')
for _, y, color, select, drag in self.markers:
name = str(y)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addYMarker(y, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerPtLog(self):
self.plot.setGraphTitle('Markers Pt, Log axes')
for x, y, color, select, drag in self.markers:
name = "{0},{1}".format(x, y)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addMarker(x, y, name, name, color, select, drag)
self.plot.resetZoom()
class TestPlotItemLog(PlotWidgetTestCase):
"""Basic tests for items with log scale axes"""
# Polygon coordinates and color
polygons = [ # legend, x coords, y coords, color
('triangle', numpy.array((10, 30, 50)),
numpy.array((55, 70, 55)), 'red'),
('square', numpy.array((10, 10, 50, 50)),
numpy.array((10, 50, 50, 10)), 'green'),
('star', numpy.array((60, 70, 80, 60, 80)),
numpy.array((25, 50, 25, 40, 40)), 'blue'),
]
# Rectangle coordinantes and color
rectangles = [ # legend, x coords, y coords, color
('square 1', numpy.array((1., 10.)),
numpy.array((1., 10.)), 'red'),
('square 2', numpy.array((10., 20.)),
numpy.array((10., 20.)), 'green'),
('square 3', numpy.array((20., 30.)),
numpy.array((20., 30.)), 'blue'),
('rect 1', numpy.array((1., 30.)),
numpy.array((35., 40.)), 'black'),
('line h', numpy.array((1., 30.)),
numpy.array((45., 45.)), 'darkRed'),
]
def setUp(self):
super(TestPlotItemLog, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.getXAxis().setAutoScale(False)
self.plot.getYAxis().setAutoScale(False)
self.plot.setKeepDataAspectRatio(False)
self.plot.setLimits(1., 100., 1., 100.)
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
def testPlotItemPolygonLogFill(self):
self.plot.setGraphTitle('Item Fill Log')
for legend, xList, yList, color in self.polygons:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="polygon", fill=True, color=color)
self.plot.resetZoom()
def testPlotItemPolygonLogNoFill(self):
self.plot.setGraphTitle('Item No Fill Log')
for legend, xList, yList, color in self.polygons:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="polygon", fill=False, color=color)
self.plot.resetZoom()
def testPlotItemRectangleLogFill(self):
self.plot.setGraphTitle('Rectangle Fill Log')
for legend, xList, yList, color in self.rectangles:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="rectangle", fill=True, color=color)
self.plot.resetZoom()
def testPlotItemRectangleLogNoFill(self):
self.plot.setGraphTitle('Rectangle No Fill Log')
for legend, xList, yList, color in self.rectangles:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="rectangle", fill=False, color=color)
self.plot.resetZoom()
def suite():
testClasses = (TestPlotWidget,
TestPlotImage,
TestPlotCurve,
TestPlotScatter,
TestPlotMarker,
TestPlotItem,
TestPlotAxes,
TestPlotActiveCurveImage,
TestPlotEmptyLog,
TestPlotCurveLog,
TestPlotImageLog,
TestPlotMarkerLog,
TestPlotItemLog)
test_suite = unittest.TestSuite()
# Tests with matplotlib
for testClass in testClasses:
test_suite.addTest(parameterize(testClass, backend=None))
test_suite.addTest(parameterize(TestSpecialBackend, backend=u"mpl"))
if sys.version_info[0] == 2:
test_suite.addTest(parameterize(TestSpecialBackend, backend=b"mpl"))
if test_options.WITH_GL_TEST:
# Tests with OpenGL backend
for testClass in testClasses:
test_suite.addTest(parameterize(testClass, backend='gl'))
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
the-stack_0_12076 | # Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for specifying BSON codec options."""
import datetime
from abc import abstractmethod
from collections import namedtuple
from bson.py3compat import ABC, abc, abstractproperty, string_type
from bson.binary import (ALL_UUID_REPRESENTATIONS,
PYTHON_LEGACY,
UUID_REPRESENTATION_NAMES)
_RAW_BSON_DOCUMENT_MARKER = 101
def _raw_document_class(document_class):
"""Determine if a document_class is a RawBSONDocument class."""
marker = getattr(document_class, '_type_marker', None)
return marker == _RAW_BSON_DOCUMENT_MARKER
class TypeEncoder(ABC):
"""Base class for defining type codec classes which describe how a
custom type can be transformed to one of the types BSON understands.
Codec classes must implement the ``python_type`` attribute, and the
``transform_python`` method to support encoding.
"""
@abstractproperty
def python_type(self):
"""The Python type to be converted into something serializable."""
pass
@abstractmethod
def transform_python(self, value):
"""Convert the given Python object into something serializable."""
pass
class TypeDecoder(ABC):
"""Base class for defining type codec classes which describe how a
BSON type can be transformed to a custom type.
Codec classes must implement the ``bson_type`` attribute, and the
``transform_bson`` method to support decoding.
"""
@abstractproperty
def bson_type(self):
"""The BSON type to be converted into our own type."""
pass
@abstractmethod
def transform_bson(self, value):
"""Convert the given BSON value into our own type."""
pass
class TypeCodec(TypeEncoder, TypeDecoder):
"""Base class for defining type codec classes which describe how a
custom type can be transformed to/from one of the types BSON already
understands, and can encode/decode.
Codec classes must implement the ``python_type`` attribute, and the
``transform_python`` method to support encoding, as well as the
``bson_type`` attribute, and the ``transform_bson`` method to support
decoding.
"""
pass
class TypeRegistry(object):
"""Encapsulates type codecs used in encoding and / or decoding BSON, as
well as the fallback encoder. Type registries cannot be modified after
instantiation.
``TypeRegistry`` can be initialized with an iterable of type codecs, and
a callable for the fallback encoder::
>>> from bson.codec_options import TypeRegistry
>>> type_registry = TypeRegistry([Codec1, Codec2, Codec3, ...],
... fallback_encoder)
:Parameters:
- `type_codecs` (optional): iterable of type codec instances. If
``type_codecs`` contains multiple codecs that transform a single
python or BSON type, the transformation specified by the type codec
occurring last prevails.
- `fallback_encoder` (optional): callable that accepts a single,
unencodable python value and transforms it into a type that BSON can
encode.
"""
def __init__(self, type_codecs=None, fallback_encoder=None):
self.__type_codecs = list(type_codecs or [])
self._fallback_encoder = fallback_encoder
self._encoder_map = {}
self._decoder_map = {}
if self._fallback_encoder is not None:
if not callable(fallback_encoder):
raise TypeError("fallback_encoder %r is not a callable" % (
fallback_encoder))
for codec in self.__type_codecs:
is_valid_codec = False
if isinstance(codec, TypeEncoder):
self._validate_type_encoder(codec)
is_valid_codec = True
self._encoder_map[codec.python_type] = codec.transform_python
if isinstance(codec, TypeDecoder):
is_valid_codec = True
self._decoder_map[codec.bson_type] = codec.transform_bson
if not is_valid_codec:
raise TypeError(
"Expected an instance of %s, %s, or %s, got %r instead" % (
TypeEncoder.__name__, TypeDecoder.__name__,
TypeCodec.__name__, codec))
def _validate_type_encoder(self, codec):
from bson import _BUILT_IN_TYPES
for pytype in _BUILT_IN_TYPES:
if issubclass(codec.python_type, pytype):
err_msg = ("TypeEncoders cannot change how built-in types are "
"encoded (encoder %s transforms type %s)" %
(codec, pytype))
raise TypeError(err_msg)
def __repr__(self):
return ('%s(type_codecs=%r, fallback_encoder=%r)' % (
self.__class__.__name__, self.__type_codecs,
self._fallback_encoder))
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return ((self._decoder_map == other._decoder_map) and
(self._encoder_map == other._encoder_map) and
(self._fallback_encoder == other._fallback_encoder))
_options_base = namedtuple(
'CodecOptions',
('document_class', 'tz_aware', 'uuid_representation',
'unicode_decode_error_handler', 'tzinfo', 'type_registry'))
class CodecOptions(_options_base):
"""Encapsulates options used encoding and / or decoding BSON.
The `document_class` option is used to define a custom type for use
decoding BSON documents. Access to the underlying raw BSON bytes for
a document is available using the :class:`~bson.raw_bson.RawBSONDocument`
type::
>>> from bson.raw_bson import RawBSONDocument
>>> from bson.codec_options import CodecOptions
>>> codec_options = CodecOptions(document_class=RawBSONDocument)
>>> coll = db.get_collection('test', codec_options=codec_options)
>>> doc = coll.find_one()
>>> doc.raw
'\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00'
The document class can be any type that inherits from
:class:`~collections.MutableMapping`::
>>> class AttributeDict(dict):
... # A dict that supports attribute access.
... def __getattr__(self, key):
... return self[key]
... def __setattr__(self, key, value):
... self[key] = value
...
>>> codec_options = CodecOptions(document_class=AttributeDict)
>>> coll = db.get_collection('test', codec_options=codec_options)
>>> doc = coll.find_one()
>>> doc._id
ObjectId('5b3016359110ea14e8c58b93')
See :doc:`/examples/datetimes` for examples using the `tz_aware` and
`tzinfo` options.
See :class:`~bson.binary.UUIDLegacy` for examples using the
`uuid_representation` option.
:Parameters:
- `document_class`: BSON documents returned in queries will be decoded
to an instance of this class. Must be a subclass of
:class:`~collections.MutableMapping`. Defaults to :class:`dict`.
- `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone
aware instances of :class:`~datetime.datetime`. Otherwise they will be
naive. Defaults to ``False``.
- `uuid_representation`: The BSON representation to use when encoding
and decoding instances of :class:`~uuid.UUID`. Defaults to
:data:`~bson.binary.PYTHON_LEGACY`.
- `unicode_decode_error_handler`: The error handler to apply when
a Unicode-related error occurs during BSON decoding that would
otherwise raise :exc:`UnicodeDecodeError`. Valid options include
'strict', 'replace', and 'ignore'. Defaults to 'strict'.
- `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the
timezone to/from which :class:`~datetime.datetime` objects should be
encoded/decoded.
- `type_registry`: Instance of :class:`TypeRegistry` used to customize
encoding and decoding behavior.
.. warning:: Care must be taken when changing
`unicode_decode_error_handler` from its default value ('strict').
The 'replace' and 'ignore' modes should not be used when documents
retrieved from the server will be modified in the client application
and stored back to the server.
"""
def __new__(cls, document_class=dict,
tz_aware=False, uuid_representation=PYTHON_LEGACY,
unicode_decode_error_handler="strict",
tzinfo=None, type_registry=None):
if not (issubclass(document_class, abc.MutableMapping) or
_raw_document_class(document_class)):
raise TypeError("document_class must be dict, bson.son.SON, "
"bson.raw_bson.RawBSONDocument, or a "
"sublass of collections.MutableMapping")
if not isinstance(tz_aware, bool):
raise TypeError("tz_aware must be True or False")
if uuid_representation not in ALL_UUID_REPRESENTATIONS:
raise ValueError("uuid_representation must be a value "
"from bson.binary.ALL_UUID_REPRESENTATIONS")
if not isinstance(unicode_decode_error_handler, (string_type, None)):
raise ValueError("unicode_decode_error_handler must be a string "
"or None")
if tzinfo is not None:
if not isinstance(tzinfo, datetime.tzinfo):
raise TypeError(
"tzinfo must be an instance of datetime.tzinfo")
if not tz_aware:
raise ValueError(
"cannot specify tzinfo without also setting tz_aware=True")
type_registry = type_registry or TypeRegistry()
if not isinstance(type_registry, TypeRegistry):
raise TypeError("type_registry must be an instance of TypeRegistry")
return tuple.__new__(
cls, (document_class, tz_aware, uuid_representation,
unicode_decode_error_handler, tzinfo, type_registry))
def _arguments_repr(self):
"""Representation of the arguments used to create this object."""
document_class_repr = (
'dict' if self.document_class is dict
else repr(self.document_class))
uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation,
self.uuid_representation)
return ('document_class=%s, tz_aware=%r, uuid_representation=%s, '
'unicode_decode_error_handler=%r, tzinfo=%r, '
'type_registry=%r' %
(document_class_repr, self.tz_aware, uuid_rep_repr,
self.unicode_decode_error_handler, self.tzinfo,
self.type_registry))
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._arguments_repr())
def with_options(self, **kwargs):
"""Make a copy of this CodecOptions, overriding some options::
>>> from bson.codec_options import DEFAULT_CODEC_OPTIONS
>>> DEFAULT_CODEC_OPTIONS.tz_aware
False
>>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True)
>>> options.tz_aware
True
.. versionadded:: 3.5
"""
return CodecOptions(
kwargs.get('document_class', self.document_class),
kwargs.get('tz_aware', self.tz_aware),
kwargs.get('uuid_representation', self.uuid_representation),
kwargs.get('unicode_decode_error_handler',
self.unicode_decode_error_handler),
kwargs.get('tzinfo', self.tzinfo),
kwargs.get('type_registry', self.type_registry)
)
DEFAULT_CODEC_OPTIONS = CodecOptions()
def _parse_codec_options(options):
"""Parse BSON codec options."""
return CodecOptions(
document_class=options.get(
'document_class', DEFAULT_CODEC_OPTIONS.document_class),
tz_aware=options.get(
'tz_aware', DEFAULT_CODEC_OPTIONS.tz_aware),
uuid_representation=options.get(
'uuidrepresentation', DEFAULT_CODEC_OPTIONS.uuid_representation),
unicode_decode_error_handler=options.get(
'unicode_decode_error_handler',
DEFAULT_CODEC_OPTIONS.unicode_decode_error_handler),
tzinfo=options.get('tzinfo', DEFAULT_CODEC_OPTIONS.tzinfo),
type_registry=options.get(
'type_registry', DEFAULT_CODEC_OPTIONS.type_registry))
|
the-stack_0_12078 | from flask import url_for
def test_ping(client):
resp = client.get(url_for('main.ping'))
assert resp.status_code == 200
resp = resp.json
assert resp == {
'addition': {'msg': "it's alive!"},
'description': {},
'result': True,
'status': 200,
}
|
the-stack_0_12079 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ardour2fxp.py
#
"""Convert one or more Ardour VST presets XML file to VST2 FXP preset files."""
import argparse
import os
import sys
from base64 import b64decode
from collections import namedtuple
from os.path import exists, isdir, join
from struct import calcsize, pack
from xml.etree import ElementTree as ET
FXP_HEADER_FMT = '>4si4s4i28s'
FXP_PREAMBEL_SIZE = calcsize('>4si')
FXP_HEADER_SIZE = calcsize(FXP_HEADER_FMT)
FXP_FORMAT_VERSION = 1
CHUNK_MAGIC = b'CcnK'
FX_MAGIC_PARAMS = b'FxCk'
FX_MAGIC_CHUNK = b'FPCh'
FX_DEFAULT_VERSION = 1
PRESET_BASE_FIELDS = (
'plugin_id',
'plugin_version',
'hash',
'label',
'num_params',
)
ChunkPreset = namedtuple('ChunkPreset', PRESET_BASE_FIELDS + ('chunk',))
Preset = namedtuple('Preset', PRESET_BASE_FIELDS + ('params',))
def label2fn(label):
"""Replace characters in label unsuitable for filenames with underscore."""
return label.strip().replace(' ', '_')
def parse_ardourpresets(root):
"""Parse ardour VST presets XML document.
Returns list of Preset or ChunkPreset instances.
"""
if root.tag != 'VSTPresets':
raise ValueError("Root node must be 'VSTPresets'.")
presets = []
for preset in root:
if preset.tag not in ('Preset', 'ChunkPreset'):
print("Invalid preset type: {}".format(preset.tag))
continue
try:
type, plugin_id, hash = preset.attrib['uri'].split(':', 2)
plugin_id = int(plugin_id)
version = preset.attrib.get('version')
num_params = preset.attrib.get('numParams')
label = preset.attrib['label']
if version is not None:
version = int(version)
if num_params is not None:
num_params = int(num_params)
if type != "VST":
raise ValueError
except (KeyError, ValueError):
print("Invalid preset format: {}".format(preset.attrib))
continue
if preset.tag == 'Preset':
params = {int(param.attrib['index']): param.attrib['value']
for param in preset}
params = [float(value) for _, value in sorted(params.items())]
presets.append(Preset(plugin_id, version, hash, label, num_params,
params))
elif preset.tag == 'ChunkPreset':
presets.append(ChunkPreset(plugin_id, version, hash, label,
num_params, b64decode(preset.text)))
return presets
def main(args=None):
argparser = argparse.ArgumentParser()
argparser.add_argument('-v', '--fx-version', type=int,
help="VST plugin version number")
argparser.add_argument('-f', '--force', action="store_true",
help="Overwrite existing destination file(s)")
argparser.add_argument('-o', '--output-dir',
help="Ardour presets output directory")
argparser.add_argument('infiles', nargs='*', metavar='XML',
help="Ardour VST presets XML (input) file(s)")
args = argparser.parse_args(args)
output_dir = args.output_dir or os.getcwd()
if not args.infiles:
argparser.print_help()
return 2
for infile in args.infiles:
try:
root_node = ET.parse(infile).getroot()
presets = parse_ardourpresets(root_node)
except Exception as exc:
return "Error reading Ardour preset file '{}': {}".format(
infile, exc)
if not presets:
return "No valid presets found in input file(s)."
for preset in presets:
plugin_id = pack('>I', preset.plugin_id).decode('ascii')
dstdir = join(output_dir, plugin_id)
if not isdir(dstdir):
os.makedirs(dstdir)
fxp_fn = join(dstdir, label2fn(preset.label)) + '.fxp'
if exists(fxp_fn) and not args.force:
print("FXP output file '{}' already exists. Skipping".format(
fxp_fn))
continue
with open(fxp_fn, 'wb') as fp:
if args.fx_version is not None:
fx_version = args.fx_version
elif preset.plugin_version is not None:
fx_version = preset.plugin_version
else:
fx_version = FX_DEFAULT_VERSION
if isinstance(preset, Preset):
if preset.num_params is None:
num_params = len(preset.params)
else:
num_params = preset.num_params
params_fmt = '>{:d}f'.format(num_params)
size = (FXP_HEADER_SIZE - FXP_PREAMBEL_SIZE +
calcsize(params_fmt))
fx_magic = FX_MAGIC_PARAMS
elif isinstance(preset, ChunkPreset):
if preset.num_params is None:
num_params = int(len(preset.chunk) / 4)
else:
num_params = preset.num_params
chunk_len = len(preset.chunk)
chunk_size = pack('>i', chunk_len)
size = (FXP_HEADER_SIZE - FXP_PREAMBEL_SIZE +
len(chunk_size) + chunk_len)
fx_magic = FX_MAGIC_CHUNK
else:
raise TypeError("Wrong preset type: {!r}".format(preset))
header = pack(
FXP_HEADER_FMT,
CHUNK_MAGIC,
size,
fx_magic,
FXP_FORMAT_VERSION,
preset.plugin_id,
fx_version,
num_params,
preset.label.encode('latin1', errors='replace')
)
fp.write(header)
if isinstance(preset, Preset):
data = pack(params_fmt, *preset.params)
fp.write(data)
elif isinstance(preset, ChunkPreset):
fp.write(chunk_size)
fp.write(preset.chunk)
if __name__ == '__main__':
sys.exit(main() or 0)
|
the-stack_0_12081 | try:
import _thread
except ModuleNotFoundError:
_thread = None
class Producer:
"""
Uses a list instead of a set to ensure correct ordering of subscriptions.
Does not allow lambda functions to be used.
:params name: name of producer
:params validation: a function which will accept arguments passed into emit and check values / types raising a ValueError if incorrect type
:params as_threads: option to run handlers as threads
:raises NotImplementedError if micro-python version does not implement _thread and as_threads keyword set to True.
"""
def __init__(self, *args, name=None, validation=None, as_threads=False):
if as_threads and not _thread:
raise NotImplementedError(
'threading is not available in this distribution')
self.__handlers = []
self.__name = name
self.__validation = validation
self.__as_threads = as_threads
# private methods
def _add_handler(self, handler_func):
if handler_func in self.__handlers:
raise ValueError('handler is already subscribed.')
self.__handlers.append(handler_func)
return self
def _remove_handler(self, handler_func):
if not handler_func in self.__handlers:
raise ValueError('handler is not subscribed to producer')
self.__handlers.remove(handler_func)
return self
# public methods
def subscribe(self, handler_func):
"""
Subscribe a function as a callback to the producer.
:params handler_func: a callback function that will be invoked
when a value is sent to the emit method. Function cannot be a lambda.
:raises ValueError if handler is a lambda or already subscribed.
"""
if handler_func.__name__ == '<lambda>':
raise ValueError('handler cannot be a lambda function')
return self._add_handler(handler_func)
def unsubscribe(self, handler_func):
"""
Unsubscribe a callback from the producer.
:raises ValueError if handler is not already subscribed.
"""
return self._remove_handler(handler_func)
def emit(self, *args, **kwargs):
"""
Send arguments and keyword arguments to subscribed functions.
Arguments are first passed through the validation function and then
passed sequentially to each subscribed callback.
If as_threads is set to True callbacks are started as separate threads.
"""
if self.__validation:
self.__validation(*args, **kwargs)
for handler in self.__handlers:
if self.__as_threads and _thread:
_thread.start_new_thread(handler, args, kwargs)
else:
handler(*args, **kwargs)
# datamodel methods
def __repr__(self):
return "Producer(%s)" % self.__name
def __len__(self):
return len(self.__handlers)
__call__ = emit
__iadd__ = subscribe
__isub__ = unsubscribe
|
the-stack_0_12082 | # From https://github.com/taki0112/ResNet-Tensorflow.
import tensorflow as tf
import tensorflow.contrib as tf_contrib
weight_init = tf_contrib.layers.variance_scaling_initializer()
weight_regularizer = tf_contrib.layers.l2_regularizer(0.0001)
def conv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, scope='conv_0'):
with tf.variable_scope(scope):
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias, padding=padding)
return x
def fully_conneted(x, units, use_bias=True, scope='fully_0'):
with tf.variable_scope(scope):
x = flatten(x)
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x
def resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='resblock'):
with tf.variable_scope(scope):
x = batch_norm(x_init, is_training, scope='batch_norm_0')
x = relu(x)
if downsample:
x = conv(x, channels, kernel=3, stride=2,
use_bias=use_bias, scope='conv_0')
x_init = conv(x_init, channels, kernel=1, stride=2,
use_bias=use_bias, scope='conv_init')
else:
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_0')
x = batch_norm(x, is_training, scope='batch_norm_1')
x = relu(x)
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_1')
return x + x_init
def bottle_resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='bottle_resblock'):
with tf.variable_scope(scope):
x = batch_norm(x_init, is_training, scope='batch_norm_1x1_front')
shortcut = relu(x)
x = conv(shortcut, channels, kernel=1, stride=1,
use_bias=use_bias, scope='conv_1x1_front')
x = batch_norm(x, is_training, scope='batch_norm_3x3')
x = relu(x)
if downsample:
x = conv(x, channels, kernel=3, stride=2,
use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels*4, kernel=1,
stride=2, use_bias=use_bias, scope='conv_init')
else:
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels * 4, kernel=1,
stride=1, use_bias=use_bias, scope='conv_init')
x = batch_norm(x, is_training, scope='batch_norm_1x1_back')
x = relu(x)
x = conv(x, channels*4, kernel=1, stride=1,
use_bias=use_bias, scope='conv_1x1_back')
return x + shortcut
def get_residual_layer(res_n):
x = []
if res_n == 18:
x = [2, 2, 2, 2]
if res_n == 34:
x = [3, 4, 6, 3]
if res_n == 50:
x = [3, 4, 6, 3]
if res_n == 101:
x = [3, 4, 23, 3]
if res_n == 152:
x = [3, 8, 36, 3]
return x
def flatten(x):
return tf.layers.flatten(x)
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
return gap
def avg_pooling(x):
return tf.layers.average_pooling2d(x, pool_size=2, strides=2, padding='SAME')
def relu(x):
return tf.nn.relu(x)
def batch_norm(x, is_training=True, scope='batch_norm'):
return tf_contrib.layers.batch_norm(x,
decay=0.9, epsilon=1e-05,
center=True, scale=True, updates_collections=None,
is_training=is_training, scope=scope)
def classification_loss(logit, label):
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logit))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
def classification_loss_weighted(logit, label):
loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(
targets=label, logits=logit, pos_weight=2))
# cost1 = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=y, logits=pred,pos_weight=1))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
|
the-stack_0_12083 | import copy
import numpy
import logging
from six.moves import xrange
import theano
from theano import tensor, scalar, gof
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB,
SequenceDB, Optimizer, toolbox)
from theano.gof.optdb import LocalGroupDB
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet.conv import ConvOp
from theano.tests.breakpoint import PdbBreakpoint
from .type import GpuArrayType, GpuArrayConstant, get_context
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemv, GpuGemm, GpuGer,
gpugemm_no_inplace)
from .conv import GpuConv
from .nnet import (GpuCrossentropySoftmaxArgmax1HotWithBias,
GpuCrossentropySoftmax1HotWithBiasDx,
GpuSoftmaxWithBias, GpuSoftmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)
from .opt_util import alpha_merge, output_merge
_logger = logging.getLogger("theano.sandbox.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
gpu_seqopt = SequenceDB()
# Don't register this right now
conv_groupopt = LocalGroupDB()
conv_groupopt.__name__ = "gpua_conv_opts"
gpu_seqopt.register('gpuarray_local_optimiziations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'inplace', 'gpuarray')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return host_from_gpu(x)
else:
return x
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if i.owner and i.owner.op == host_from_gpu:
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != 'cuda')):
return False
new_op = maker(node, context_name)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
# tag the inputs with the context in case
# the context was derived from the outputs
def tag(i, ctx):
i.tag.context_name = ctx
return i
inputs = [tag(i, context_name) for i in node.inputs]
return [safe_to_cpu(o) for o in
new_op(*inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [safe_to_cpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [host_from_gpu(new_op)]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input to the gpu to start the rolling wave.
"""
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
if (len(input.clients) == 1 and
(input.clients[0][0] == 'output' or
isinstance(input.clients[0][0].op, GpuFromHost))):
continue
ctx_name = getattr(input.tag, 'context_name', None)
try:
new_input = host_from_gpu(GpuFromHost(ctx_name)(input))
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ValueError:
# If there is no context tag and no default context
# then it stays on the CPU
if not hasattr(input.tag, 'context_name'):
raise
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
@local_optimizer([GpuFromHost, GpuToGpu, host_from_gpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
node.inputs[0].owner.op == host_from_gpu):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (node.op == host_from_gpu and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [host_from_gpu(n2.inputs[0])]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [GpuFromHost(node.op.context_name)(n2.inputs[0])]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'inplace', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpuaalloc2(node):
"""
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
try:
get_context(None)
except ValueError:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
c.op == tensor.join and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [host_from_gpu(GpuAlloc(None)(*node.inputs))]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
def local_gpuaalloc(node, context_name):
return GpuAlloc(context_name)(*node.inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(numpy.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
return [new_op(*node.inputs)]
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
def local_gpureshape(node, context_name):
op = node.op
name = op.name
if name:
name = 'Gpu' + name
res = GpuReshape(op.ndim, op.name)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
def local_gpu_rebroadcast(node, context_name):
if isinstance(node.inputs[0].owner.op, HostFromGpu):
return node.op(node.inputs[0].owner.inputs[0])
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
def local_gpuflatten(node, context_name):
op = node.op
shp = []
if op.outdim != 1:
shp = [node.inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim, None)
o = res(node.inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
def local_gpu_elemwise(node, context_name):
op = node.op
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(node.outputs) > 1:
return
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
# If the elemwise operation is a pow, casts might be required on the
# inputs and or outputs because only the (float, float)->float and
# (double, double)->double cases are implemented at the moment.
if isinstance(op.scalar_op, Pow):
# Only transfer the computation on the gpu if the output dtype is
# floating point. Else, give up on the transfer to the gpu.
out_dtype = node.outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
# Transfer the inputs on the GPU and cast them to the right dtype.
new_inputs = []
for inp in node.inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp)))
else:
new_inputs.append(as_gpuarray_variable(inp))
# Perform the exponent on the gpu and transfer the output back to the
# cpu.
gpu_output = res(*new_inputs)
cpu_output = host_from_gpu(gpu_output)
return [cpu_output]
else:
return res
def max_inputs_to_GpuElemwise(node):
ptr_size = 8
int_size = 4
# we take the limit from CUDA for now
argument_limit = 232
ndim = node.inputs[0].type.ndim
# number of elements and shape
size_param_mandatory = (int_size * (ndim + 1)) + \
(ptr_size + int_size * ndim) * len(node.outputs)
nb_bytes_avail = argument_limit - size_param_mandatory
nb_bytes_per_input = ptr_size + ndim * int_size
max_nb_inputs = nb_bytes_avail // nb_bytes_per_input
return max_nb_inputs
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 71.00,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.inplace_elemwise_optimizer_op(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
def local_gpua_dimshuffle(node, context_name):
return GpuDimShuffle(node.op.input_broadcastable,
node.op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
def local_gpua_specifyShape(node, context_name):
if isinstance(node.inputs[0].type, GpuArrayType):
return
inp = [GpuFromHost(context_name)(node.inputs[0])] + node.inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(node, context_name):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(node.inputs[0].type, GpuArrayType):
return
return [GpuFromHost(context_name)(node.inputs[0]).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, numpy.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
def local_gpu_print_op(node, context_name):
x, = node.inputs
gpu_x, = x.owner.inputs
new_op = node.op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = node.op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transfering on GPU those
# for which the input comes from the GPU or the output will be
# transfered on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transfered to the gpu
new_inputs.append(GpuFromHost(context_name)(inp))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transfered to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(host_from_gpu(new_op_outputs[i]))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([tensor.Join])
def local_gpua_join(node, context_name):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpuajoin_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
def local_gpua_split(node, context_name):
return GpuSplit(node.op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(node, context_name):
x = node.inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in node.outputs[0].clients]):
return
else:
return [host_from_gpu(gpu_x.owner.op(node.outputs[0]))]
return GpuSubtensor(node.op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
def local_gpua_incsubtensor(node, context_name):
return GpuIncSubtensor(node.op.idx_list, node.op.inplace,
node.op.set_instead_of_inc,
node.op.destroyhandler_tolerate_aliased)
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
def local_gpua_advanced_subtensor(node, context_name):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
def local_gpua_advanced_incsubtensor(node, context_name):
# This is disabled on non-cuda contexts
if get_context(context_name).kind != 'cuda':
return None
x, y, ilist = node.inputs
# Gpu Ops needs both inputs to have the same dtype
if (x.type.dtype != y.type.dtype):
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if x.type.dtype != dtype:
x = tensor.cast(x, dtype)
if y.type.dtype != dtype:
y = tensor.cast(y, dtype)
set_instead_of_inc = node.op.set_instead_of_inc
active_device_no = theano.sandbox.cuda.active_device_number()
device_properties = theano.sandbox.cuda.device_properties
compute_capability = device_properties(active_device_no)['major']
if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
return [GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
else:
return [GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
def local_gpua_careduce(node, context_name):
if isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == 'opencl':
op = GpuCAReduceCPY
if node.op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == 'cuda':
op = GpuCAReduceCuda
else:
return False
x, = node.inputs
greduce = op(
node.op.scalar_op, axis=node.op.axis,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([GpuFromHost(context_name)(x)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if node.op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in node.op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
shape_of = node.fgraph.shape_feature.shape_of
x_shape = shape_of[x]
new_in_shp = [x_shape[0]]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= x_shape[i]
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(x_shape[i])
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op(
node.op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = GpuFromHost(context_name)(reshaped_x)
gvar = greduce(gpu_reshaped_x)
# We need to have the make node called, otherwise the mask can
# be None
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = host_from_gpu(
greduce(gpu_reshaped_x))
if reduce_reshaped_x.ndim != node.outputs[0].ndim:
unreshaped_reduce = reduce_reshaped_x.reshape(
tensor.stack(shape_of[node.outputs[0]]))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
def local_gpua_gemv(node, context_name):
return GpuGemv(inplace=node.op.inplace)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
def local_gpua_gemm(node, context_name):
return GpuGemm(inplace=node.op.inplace)
@register_opt('fast_compile')
@op_lifter([tensor.basic.Dot])
def local_gpua_hgemm(node, context_name):
from theano.sandbox.cuda import nvcc_compiler
if nvcc_compiler.nvcc_version < '7.5':
_logger.warning("Not performing dot of float16 on the GPU since "
"cuda 7.5 is not available. Updating could speed up "
"your code.")
return
A = node.inputs[0]
B = node.inputs[1]
if (A.ndim == 2 and B.ndim == 2 and
A.dtype == 'float16' and B.dtype == 'float16'):
fgraph = node.inputs[0].fgraph
C = GpuAllocEmpty(dtype='float16', context_name=context_name)(
shape_i(A, 0, fgraph),
shape_i(B, 1, fgraph))
return gpugemm_no_inplace(C, 1.0, A, B, 0.0)
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpuagemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpuagemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
def local_gpua_ger(node, context_name):
return GpuGer(inplace=node.op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
def local_gpua_dot22(node, context_name):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
def local_gpua_eye(node, context_name):
return GpuEye(dtype=node.op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], cuda_only=True)
def local_gpua_crossentropysoftmaxargmax1hotwithbias(node, context_name):
return GpuCrossentropySoftmaxArgmax1HotWithBias()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], cuda_only=True)
def local_gpua_crossentropysoftmax1hotwithbiasdx(node, context_name):
return GpuCrossentropySoftmax1HotWithBiasDx()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax], cuda_only=True)
def local_gpua_softmax(node, context_name):
return GpuSoftmax()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias], cuda_only=True)
def local_gpua_softmaxwithbias(node, context_name):
return GpuSoftmaxWithBias()
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_assert(node, context_name):
if (node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
return [host_from_gpu(node.op(node.inputs[0].owner.inputs[0],
*node.inputs[1:]))]
@register_opt('fast_compile')
@op_lifter([ConvOp])
def local_gpu_conv(node, context_name):
def GpuConvOp_from_ConvOp(op):
logical_img_hw = None
if op.kshp_logical is not None and op.kshp_logical != op.kshp:
return None
ret = GpuConv(border_mode=op.out_mode,
subsample=(op.dx, op.dy),
logical_img_hw=logical_img_hw,
logical_kern_hw=op.kshp_logical,
logical_kern_align_top=op.kshp_logical_top_aligned,
kshp=op.kshp,
version=op.version,
direction_hint=op.direction_hint,
verbose=op.verbose,
imshp=op.imshp,
nkern=op.nkern,
bsize=op.bsize,
fft_opt=op.fft_opt)
if op.imshp_logical is not None:
logical_img_hw = op.imshp_logical[1:3]
if logical_img_hw != op.imshp[1:3]:
rstride = int(numpy.ceil(op.imshp_logical[1] /
float(op.imshp[1])))
cstride = int(numpy.ceil(op.imshp_logical[2] /
float(op.imshp[2])))
def make_graph(img, kern):
buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),
img.shape[0], *op.imshp_logical)
img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],
img)
img = GpuFromHost(context_name)(img)
return ret(img, kern)
return make_graph
return ret
def values_eq_approx(a, b):
"""
This fct is needed to don't have DebugMode raise useless
error due to ronding error.
This happen as We reduce on the two last dimensions, so this
can raise the absolute error if the number of element we
reduce on is significant.
"""
assert a.ndim == 4
atol = None
if a.shape[-1] * a.shape[-2] > 100:
# For float32 the default atol is 1e-5
atol = 3e-5
return GpuArrayType.values_eq_approx(a, b, atol=atol)
img, kern = node.inputs
gpu_conv = GpuConvOp_from_ConvOp(node.op)
if gpu_conv is None:
return
out = gpu_conv(GpuFromHost(context_name)(img),
GpuFromHost(context_name)(kern))
assert isinstance(out.type, GpuArrayType)
# Make sure to keep the broadcastable pattern of the original
# convolution even if we might gain or lose some due to different
# information at the node level.
out = tensor.patternbroadcast(out, node.outputs[0].broadcastable)
out.values_eq_approx = values_eq_approx
return [out]
# Register this here so that it goes after 'local_gpu_conv'
register_opt()(conv_groupopt)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
"""
Merge some GpuCAReduceCuda and GPUElemwise.
"""
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably result
# to slow down.
isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
op = node.op
inp = node.inputs[0].owner.inputs[0]
return [GpuCAReduceCuda(scalar_op=op.scalar_op,
reduce_mask=op.reduce_mask,
pre_scalar_op=scalar.basic.sqr)(inp)]
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
def local_scan_to_gpua(node, context_name):
info = copy.deepcopy(node.op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [node.inputs[0]]
e = (1 +
node.op.n_seqs +
node.op.n_mit_mot +
node.op.n_mit_sot +
node.op.n_sit_sot +
node.op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[1:e]]
b = e
e = e + node.op.n_nit_sot
nw_ins += node.inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in node.op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if node.op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs[:-1]]
scan_outs += [node.op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(node.op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'fast_run',
'inplace',
'scan')
|
the-stack_0_12086 | import requests
from lxml import html
url = 'https://info.urfu.ru/ru/departures/kafedry/'
# Получение исходного кода страницы
response = requests.get(url)
# Преобразование тела документа в дерево элементов
parsed_body = html.fromstring(response.text)
# Получение всех элементов класса 'course-box'
course_boxes = parsed_body.find_class('course-box')
# Создание пустого списка для последующего добавления кафедр
departments = []
for box in course_boxes:
# Получение содержания элемента с тегом <a>
link = box.find('a')
# Получение содержания элемента с тегом <p>
text = link.find('p')
# Добавление названия кафедры в заготовленный список
departments.append(text.text_content())
|
the-stack_0_12088 | #!/usr/bin/env python
import os
import requests
import json
import datetime
import shutil
from bs4 import BeautifulSoup
here = os.path.dirname(os.path.abspath(__file__))
hospital_id = os.path.basename(here)
url ='https://www.huntsvillehospital.org/price-transparency'
today = datetime.datetime.today().strftime('%Y-%m-%d')
outdir = os.path.join(here, today)
if not os.path.exists(outdir):
os.mkdir(outdir)
prefix = "https://www.huntsvillehospital.org"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
# Each folder will have a list of records
records = []
for entry in soup.find_all('a', href=True):
download_url = prefix + entry['href']
if '.csv' in download_url:
filename = os.path.basename(download_url.split('?')[0])
output_file = os.path.join(outdir, filename)
os.system('wget -O "%s" "%s"' % (output_file, download_url))
record = { 'hospital_id': hospital_id,
'filename': filename,
'date': today,
'uri': filename,
'name': filename,
'url': download_url }
records.append(record)
# Keep json record of all files included
records_file = os.path.join(outdir, 'records.json')
with open(records_file, 'w') as filey:
filey.write(json.dumps(records, indent=4))
# This folder is also latest.
latest = os.path.join(here, 'latest')
if os.path.exists(latest):
shutil.rmtree(latest)
shutil.copytree(outdir, latest)
|
the-stack_0_12089 | """Integration tests for :mod:`esmvalcore._recipe_checks`."""
from typing import Any, List
from unittest import mock
import pytest
import esmvalcore._recipe_checks as check
ERR_ALL = 'Looked for files matching%s'
ERR_D = ('Looked for files in %s, but did not find any file pattern to match '
'against')
ERR_F = ('Looked for files matching %s, but did not find any existing input '
'directory')
ERR_RANGE = 'No input data available for years {} in files {}'
VAR = {
'filename': 'a/c.nc',
'frequency': 'mon',
'short_name': 'tas',
'start_year': 2020,
'end_year': 2025,
'alias': 'alias',
}
FX_VAR = {
'filename': 'a/b.nc',
'frequency': 'fx',
'short_name': 'areacella',
}
FILES = [
'a/b/c_20200101-20201231',
'a/b/c_20210101-20211231',
'a/b/c_20220101-20221231',
'a/b/c_20230101-20231231',
'a/b/c_20240101-20241231',
'a/b/c_20250101-20251231',
]
DATA_AVAILABILITY_DATA = [
(FILES, dict(VAR), None),
(FILES, dict(FX_VAR), None),
(FILES[:-1], dict(VAR), ERR_RANGE.format('2025', FILES[:-1])),
(FILES[:-2], dict(VAR), ERR_RANGE.format('2024, 2025', FILES[:-2])),
([FILES[1]] + [FILES[3]], dict(VAR),
ERR_RANGE.format('2020, 2022, 2024, 2025', [FILES[1]] + [FILES[3]])),
]
@pytest.mark.parametrize('input_files,var,error', DATA_AVAILABILITY_DATA)
@mock.patch('esmvalcore._recipe_checks.logger', autospec=True)
def test_data_availability_data(mock_logger, input_files, var, error):
"""Test check for data when data is present."""
saved_var = dict(var)
if error is None:
check.data_availability(input_files, var, None, None)
mock_logger.error.assert_not_called()
else:
with pytest.raises(check.RecipeError) as rec_err:
check.data_availability(input_files, var, None, None)
assert str(rec_err.value) == error
assert var == saved_var
DATA_AVAILABILITY_NO_DATA: List[Any] = [
([], [], None),
([], None, None),
(None, [], None),
(None, None, None),
(['dir1'], [], (ERR_D, ['dir1'])),
(['dir1', 'dir2'], [], (ERR_D, ['dir1', 'dir2'])),
(['dir1'], None, (ERR_D, ['dir1'])),
(['dir1', 'dir2'], None, (ERR_D, ['dir1', 'dir2'])),
([], ['a*.nc'], (ERR_F, ['a*.nc'])),
([], ['a*.nc', 'b*.nc'], (ERR_F, ['a*.nc', 'b*.nc'])),
(None, ['a*.nc'], (ERR_F, ['a*.nc'])),
(None, ['a*.nc', 'b*.nc'], (ERR_F, ['a*.nc', 'b*.nc'])),
(['1'], ['a'], (ERR_ALL, ': 1/a')),
(['1'], ['a', 'b'], (ERR_ALL, '\n1/a\n1/b')),
(['1', '2'], ['a'], (ERR_ALL, '\n1/a\n2/a')),
(['1', '2'], ['a', 'b'], (ERR_ALL, '\n1/a\n1/b\n2/a\n2/b')),
]
@pytest.mark.parametrize('dirnames,filenames,error', DATA_AVAILABILITY_NO_DATA)
@mock.patch('esmvalcore._recipe_checks.logger', autospec=True)
def test_data_availability_no_data(mock_logger, dirnames, filenames, error):
"""Test check for data when no data is present."""
var = dict(VAR)
var_no_filename = {
'frequency': 'mon',
'short_name': 'tas',
'start_year': 2020,
'end_year': 2025,
'alias': 'alias',
}
error_first = ('No input files found for variable %s', var_no_filename)
error_last = ("Set 'log_level' to 'debug' to get more information", )
with pytest.raises(check.RecipeError) as rec_err:
check.data_availability([], var, dirnames, filenames)
assert str(rec_err.value) == 'Missing data for alias: tas'
if error is None:
assert mock_logger.error.call_count == 2
errors = [error_first, error_last]
else:
assert mock_logger.error.call_count == 3
errors = [error_first, error, error_last]
calls = [mock.call(*e) for e in errors]
assert mock_logger.error.call_args_list == calls
assert var == VAR
|
the-stack_0_12090 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow ops for directed graphs."""
import tensorflow as tf
from syntaxnet.util import check
def ArcPotentialsFromTokens(source_tokens, target_tokens, weights):
r"""Returns arc potentials computed from token activations and weights.
For each batch of source and target token activations, computes a scalar
potential for each arc as the 3-way product between the activation vectors of
the source and target of the arc and the |weights|. Specifically,
arc[b,s,t] =
\sum_{i,j} source_tokens[b,s,i] * weights[i,j] * target_tokens[b,t,j]
Note that the token activations can be extended with bias terms to implement a
"biaffine" model (Dozat and Manning, 2017).
Args:
source_tokens: [B,N,S] tensor of batched activations for the source token in
each arc.
target_tokens: [B,N,T] tensor of batched activations for the target token in
each arc.
weights: [S,T] matrix of weights.
B,N may be statically-unknown, but S,T must be statically-known. The dtype
of all arguments must be compatible.
Returns:
[B,N,N] tensor A of arc potentials where A_{b,s,t} is the potential of the
arc from s to t in batch element b. The dtype of A is the same as that of
the arguments. Note that the diagonal entries (i.e., where s==t) represent
self-loops and may not be meaningful.
"""
# All arguments must have statically-known rank.
check.Eq(source_tokens.get_shape().ndims, 3, 'source_tokens must be rank 3')
check.Eq(target_tokens.get_shape().ndims, 3, 'target_tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
num_target_activations = weights.get_shape().as_list()[1]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(source_tokens.get_shape().as_list()[2], num_source_activations,
'dimension mismatch between weights and source_tokens')
check.Eq(target_tokens.get_shape().as_list()[2], num_target_activations,
'dimension mismatch between weights and target_tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
source_tokens.dtype.base_dtype,
target_tokens.dtype.base_dtype],
'dtype mismatch')
source_tokens_shape = tf.shape(source_tokens)
target_tokens_shape = tf.shape(target_tokens)
batch_size = source_tokens_shape[0]
num_tokens = source_tokens_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, target_tokens_shape[0]),
tf.assert_equal(num_tokens, target_tokens_shape[1])]):
# Flatten out the batch dimension so we can use one big multiplication.
targets_bnxt = tf.reshape(target_tokens, [-1, num_target_activations])
# Matrices are row-major, so we arrange for the RHS argument of each matmul
# to have its transpose flag set. That way no copying is required to align
# the rows of the LHS with the columns of the RHS.
weights_targets_bnxs = tf.matmul(targets_bnxt, weights, transpose_b=True)
# The next computation is over pairs of tokens within each batch element, so
# restore the batch dimension.
weights_targets_bxnxs = tf.reshape(
weights_targets_bnxs, [batch_size, num_tokens, num_source_activations])
# Note that this multiplication is repeated across the batch dimension,
# instead of being one big multiplication as in the first matmul. There
# doesn't seem to be a way to arrange this as a single multiplication given
# the pairwise nature of this computation.
arcs_bxnxn = tf.matmul(source_tokens, weights_targets_bxnxs,
transpose_b=True)
return arcs_bxnxn
def ArcSourcePotentialsFromTokens(tokens, weights):
r"""Returns arc source potentials computed from tokens and weights.
For each batch of token activations, computes a scalar potential for each arc
as the product between the activations of the source token and the |weights|.
Specifically,
arc[b,s,:] = \sum_{i} weights[i] * tokens[b,s,i]
Args:
tokens: [B,N,S] tensor of batched activations for source tokens.
weights: [S] vector of weights.
B,N may be statically-unknown, but S must be statically-known. The dtype of
all arguments must be compatible.
Returns:
[B,N,N] tensor A of arc potentials as defined above. The dtype of A is the
same as that of the arguments. Note that the diagonal entries (i.e., where
s==t) represent self-loops and may not be meaningful.
"""
# All arguments must have statically-known rank.
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 1, 'weights must be a vector')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.Eq(tokens.get_shape().as_list()[2], num_source_activations,
'dimension mismatch between weights and tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
tokens.dtype.base_dtype],
'dtype mismatch')
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
# Flatten out the batch dimension so we can use a couple big matmuls.
tokens_bnxs = tf.reshape(tokens, [-1, num_source_activations])
weights_sx1 = tf.expand_dims(weights, 1)
sources_bnx1 = tf.matmul(tokens_bnxs, weights_sx1)
sources_bnxn = tf.tile(sources_bnx1, [1, num_tokens])
# Restore the batch dimension in the output.
sources_bxnxn = tf.reshape(sources_bnxn, [batch_size, num_tokens, num_tokens])
return sources_bxnxn
def RootPotentialsFromTokens(root, tokens, weights):
r"""Returns root selection potentials computed from tokens and weights.
For each batch of token activations, computes a scalar potential for each root
selection as the 3-way product between the activations of the artificial root
token, the token activations, and the |weights|. Specifically,
roots[b,r] = \sum_{i,j} root[i] * weights[i,j] * tokens[b,r,j]
Args:
root: [S] vector of activations for the artificial root token.
tokens: [B,N,T] tensor of batched activations for root tokens.
weights: [S,T] matrix of weights.
B,N may be statically-unknown, but S,T must be statically-known. The dtype
of all arguments must be compatible.
Returns:
[B,N] matrix R of root-selection potentials as defined above. The dtype of
R is the same as that of the arguments.
"""
# All arguments must have statically-known rank.
check.Eq(root.get_shape().ndims, 1, 'root must be a vector')
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
num_target_activations = weights.get_shape().as_list()[1]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(root.get_shape().as_list()[0], num_source_activations,
'dimension mismatch between weights and root')
check.Eq(tokens.get_shape().as_list()[2], num_target_activations,
'dimension mismatch between weights and tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
root.dtype.base_dtype,
tokens.dtype.base_dtype],
'dtype mismatch')
root_1xs = tf.expand_dims(root, 0)
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
# Flatten out the batch dimension so we can use a couple big matmuls.
tokens_bnxt = tf.reshape(tokens, [-1, num_target_activations])
weights_targets_bnxs = tf.matmul(tokens_bnxt, weights, transpose_b=True)
roots_1xbn = tf.matmul(root_1xs, weights_targets_bnxs, transpose_b=True)
# Restore the batch dimension in the output.
roots_bxn = tf.reshape(roots_1xbn, [batch_size, num_tokens])
return roots_bxn
def CombineArcAndRootPotentials(arcs, roots):
"""Combines arc and root potentials into a single set of potentials.
Args:
arcs: [B,N,N] tensor of batched arc potentials.
roots: [B,N] matrix of batched root potentials.
Returns:
[B,N,N] tensor P of combined potentials where
P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
"""
# All arguments must have statically-known rank.
check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')
# All arguments must share the same type.
dtype = arcs.dtype.base_dtype
check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')
roots_shape = tf.shape(roots)
arcs_shape = tf.shape(arcs)
batch_size = roots_shape[0]
num_tokens = roots_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, arcs_shape[0]),
tf.assert_equal(num_tokens, arcs_shape[1]),
tf.assert_equal(num_tokens, arcs_shape[2])]):
return tf.matrix_set_diag(arcs, roots)
def LabelPotentialsFromTokens(tokens, weights):
r"""Computes label potentials from tokens and weights.
For each batch of token activations, computes a scalar potential for each
label as the product between the activations of the source token and the
|weights|. Specifically,
labels[b,t,l] = \sum_{i} weights[l,i] * tokens[b,t,i]
Args:
tokens: [B,N,T] tensor of batched token activations.
weights: [L,T] matrix of weights.
B,N may be dynamic, but L,T must be static. The dtype of all arguments must
be compatible.
Returns:
[B,N,L] tensor of label potentials as defined above, with the same dtype as
the arguments.
"""
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
num_labels = weights.get_shape().as_list()[0]
num_activations = weights.get_shape().as_list()[1]
check.NotNone(num_labels, 'unknown number of labels')
check.NotNone(num_activations, 'unknown activation dimension')
check.Eq(tokens.get_shape().as_list()[2], num_activations,
'activation mismatch between weights and tokens')
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
check.Same([tokens.dtype.base_dtype,
weights.dtype.base_dtype],
'dtype mismatch')
# Flatten out the batch dimension so we can use one big matmul().
tokens_bnxt = tf.reshape(tokens, [-1, num_activations])
labels_bnxl = tf.matmul(tokens_bnxt, weights, transpose_b=True)
# Restore the batch dimension in the output.
labels_bxnxl = tf.reshape(labels_bnxl, [batch_size, num_tokens, num_labels])
return labels_bxnxl
def LabelPotentialsFromTokenPairs(sources, targets, weights):
r"""Computes label potentials from source and target tokens and weights.
For each aligned pair of source and target token activations, computes a
scalar potential for each label on the arc from the source to the target.
Specifically,
labels[b,t,l] = \sum_{i,j} sources[b,t,i] * weights[l,i,j] * targets[b,t,j]
Args:
sources: [B,N,S] tensor of batched source token activations.
targets: [B,N,T] tensor of batched target token activations.
weights: [L,S,T] tensor of weights.
B,N may be dynamic, but L,S,T must be static. The dtype of all arguments
must be compatible.
Returns:
[B,N,L] tensor of label potentials as defined above, with the same dtype as
the arguments.
"""
check.Eq(sources.get_shape().ndims, 3, 'sources must be rank 3')
check.Eq(targets.get_shape().ndims, 3, 'targets must be rank 3')
check.Eq(weights.get_shape().ndims, 3, 'weights must be rank 3')
num_labels = weights.get_shape().as_list()[0]
num_source_activations = weights.get_shape().as_list()[1]
num_target_activations = weights.get_shape().as_list()[2]
check.NotNone(num_labels, 'unknown number of labels')
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(sources.get_shape().as_list()[2], num_source_activations,
'activation mismatch between weights and source tokens')
check.Eq(targets.get_shape().as_list()[2], num_target_activations,
'activation mismatch between weights and target tokens')
check.Same([sources.dtype.base_dtype,
targets.dtype.base_dtype,
weights.dtype.base_dtype],
'dtype mismatch')
sources_shape = tf.shape(sources)
targets_shape = tf.shape(targets)
batch_size = sources_shape[0]
num_tokens = sources_shape[1]
with tf.control_dependencies([tf.assert_equal(batch_size, targets_shape[0]),
tf.assert_equal(num_tokens, targets_shape[1])]):
# For each token, we must compute a vector-3tensor-vector product. There is
# no op for this, but we can use reshape() and matmul() to compute it.
# Reshape |weights| and |targets| so we can use a single matmul().
weights_lsxt = tf.reshape(weights, [num_labels * num_source_activations,
num_target_activations])
targets_bnxt = tf.reshape(targets, [-1, num_target_activations])
weights_targets_bnxls = tf.matmul(targets_bnxt, weights_lsxt,
transpose_b=True)
# Restore all dimensions.
weights_targets_bxnxlxs = tf.reshape(
weights_targets_bnxls,
[batch_size, num_tokens, num_labels, num_source_activations])
# Incorporate the source activations. In this case, we perform a batched
# matmul() between the trailing [L,S] matrices of the current result and the
# trailing [S] vectors of the tokens.
sources_bxnx1xs = tf.expand_dims(sources, 2)
labels_bxnxlx1 = tf.matmul(weights_targets_bxnxlxs, sources_bxnx1xs,
transpose_b=True)
labels_bxnxl = tf.squeeze(labels_bxnxlx1, [3])
return labels_bxnxl
|
the-stack_0_12091 | """main module
"""
import argparse
import importlib.util
import os
import shutil
import tempfile
import threading
import uuid
import docker
import yaml
from . import preprocess
def __import_configurator(path):
conf_path = os.path.join(path, "configurator.py")
spec = importlib.util.spec_from_file_location("configurator", conf_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def __start_container(client, base_path, properties, build, nocache, network,
alias=None, **container_opts):
image_path = os.path.join(base_path, properties.pop("image"))
cache = client.images.list(filters={'label':f"seed={image_path}"})
if (not build) and cache:
print(" Using Cached Image")
image = cache[0]
else:
print(" Building Image... ", end='', flush=True)
image = client.images.build(
path=image_path,
nocache=nocache,
rm=True,
pull=True,
labels={'seed':image_path}
)
print("Done")
configurator = __import_configurator(image_path)
ret = configurator.configure(properties)
if isinstance(ret, tuple):
configurator_opts, teardown = ret
else:
configurator_opts, teardown = (ret, lambda: None)
del configurator
print(" Starting Container... ", end='', flush=True)
container = client.containers.create(
image=image.id,
detach=True,
init=True,
**container_opts,
**configurator_opts
)
print("Done")
network.connect(
container,
aliases=[alias] if alias is not None else None
)
container.start()
return (container, teardown)
def __log_container(name, container):
logs = container.logs(
stdout=True,
stderr=True,
stream=True,
follow=True
)
for log in logs:
print(f"{name}:", log.decode(), end='', flush=True)
def __parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--wdir', default="",
help="path to working directory")
parser.add_argument('--config', default='config.yml',
help="path to config file relative to working directory")
parser.add_argument('--adapterdir', default='adapters',
help="adapter search path relative to working directory")
parser.add_argument('--applicationdir', default='applications',
help="application search path relative to working directory")
parser.add_argument('--bundledir', default='bundles',
help="bundle search path relative to working directory")
parser.add_argument('--controllerdir', default='controllers',
help="controller search path relative to working directory")
parser.add_argument('--build', action='store_true',
help="rebuild adapter or controller images")
parser.add_argument('--nocache', action='store_true',
help="don't use build cache on adapter or controller rebuild")
parser.add_argument('--verbose', '-v', action='count', default=0,
help="print adapter (1st level) and controller (2nd level) log")
args = parser.parse_args()
args.wdir = os.path.join(os.getcwd(), args.wdir)
return args
def main():
"""main function
"""
args = __parse_args()
adapter, controllers, network, bundledir = None, None, None, None
try:
with open(os.path.join(args.wdir, args.config)) as config_f:
config = yaml.load(config_f)["config"]
run_id = str(uuid.uuid4())
print(f"Starting With Run Id \"{run_id}\"")
#Set up directories
bundledir = tempfile.TemporaryDirectory()
resultdir = os.path.join("results", run_id)
os.makedirs(resultdir)
bundlepath = os.path.join(
args.wdir,
args.bundledir,
config["bundle"]["name"],
"bundle"
)
preprocess.preprocess_bundle(
bundlepath + ".xml",
os.path.join(bundledir.name, "bundle.xml"),
config["bundle"]['parameters']
)
shutil.copy(
bundlepath + ".controller-bindings.yml",
os.path.join(bundledir.name, "bundle.controller-bindings.yml")
)
#Set up network and containers
client = docker.from_env()
network = client.networks.create(run_id)
controllers = {}
for name, properties in config["controllers"].items():
print(f"Starting Controller \"{name}\"")
applications = {
application: os.path.join(
args.wdir,
args.applicationdir,
application)
for application in properties['applications']}
controllers[name] = __start_container(
client,
os.path.join(args.wdir, args.controllerdir),
dict(properties, **{
"applications": applications,
"bundledir": bundledir.name,
"resultdir": os.path.join(os.getcwd(), resultdir)
}),
args.build,
args.nocache,
network,
alias=name
)
print("Done")
print(f"Starting Adapter \"{config['adapter']['image']}\"")
adapter = __start_container(
client,
os.path.join(args.wdir, args.adapterdir),
dict(config["adapter"], **{
"bundledir": bundledir.name,
"resultdir": os.path.join(os.getcwd(), resultdir),
"controllers": config["controllers"]
}),
args.build,
args.nocache,
network,
privileged=True
)
print("Done")
try:
if args.verbose >= 2:
for name, (container, _) in controllers.items():
arg = (name, container)
threading.Thread(target=__log_container, args=arg).start()
if args.verbose >= 1:
__log_container("adapter", adapter[0])
import time
time.sleep(10)
adapter[0].wait()
except KeyboardInterrupt:
pass
finally:
print("Tearing Down")
if adapter is not None:
adapter[0].stop()
adapter[0].remove()
adapter[1]()
if controllers is not None:
for controller, teardown in controllers.values():
controller.stop()
controller.remove()
teardown()
if network is not None:
network.remove()
if bundledir is not None:
bundledir.cleanup()
if __name__ == "__main__":
main()
|
the-stack_0_12093 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Denis Engemann <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
from itertools import count
from math import sqrt
import numpy as np
from scipy import linalg
from .tree import dir_tree_find
from .tag import find_tag
from .constants import FIFF
from .pick import pick_types
from .write import (write_int, write_float, write_string, write_name_list,
write_float_matrix, end_block, start_block)
from ..utils import logger, verbose, warn
from ..externals.six import string_types
class Projection(dict):
"""Projection vector.
A basic class to proj a meaningful print for projection vectors.
"""
def __repr__(self): # noqa: D105
s = "%s" % self['desc']
s += ", active : %s" % self['active']
s += ", n_channels : %s" % self['data']['ncol']
return "<Projection | %s>" % s
class ProjMixin(object):
"""Mixin class for Raw, Evoked, Epochs.
Notes
-----
This mixin adds a proj attribute as a property to data containers.
It is True if at least one proj is present and all of them are active.
The projs might not be applied yet if data are not preloaded. In
this case it's the _projector attribute that does the job.
If a private _data attribute is present then the projs applied
to it are the ones marked as active.
A proj parameter passed in constructor of raw or epochs calls
apply_proj and hence after the .proj attribute is True.
As soon as you've applied the projs it will stay active in the
remaining pipeline.
The suggested pipeline is proj=True in epochs (it's cheaper than for raw).
When you use delayed SSP in Epochs, projs are applied when you call
get_data() method. They are not applied to the evoked._data unless you call
apply_proj(). The reason is that you want to reject with projs although
it's not stored in proj mode.
"""
@property
def proj(self):
"""Whether or not projections are active."""
return (len(self.info['projs']) > 0 and
all(p['active'] for p in self.info['projs']))
@verbose
def add_proj(self, projs, remove_existing=False, verbose=None):
"""Add SSP projection vectors.
Parameters
----------
projs : list
List with projection vectors.
remove_existing : bool
Remove the projection vectors currently in the file.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
self : instance of Raw | Epochs | Evoked
The data container.
"""
if isinstance(projs, Projection):
projs = [projs]
if (not isinstance(projs, list) and
not all(isinstance(p, Projection) for p in projs)):
raise ValueError('Only projs can be added. You supplied '
'something else.')
# mark proj as inactive, as they have not been applied
projs = deactivate_proj(projs, copy=True, verbose=self.verbose)
if remove_existing:
# we cannot remove the proj if they are active
if any(p['active'] for p in self.info['projs']):
raise ValueError('Cannot remove projectors that have '
'already been applied')
self.info['projs'] = projs
else:
self.info['projs'].extend(projs)
# We don't want to add projectors that are activated again.
self.info['projs'] = _uniquify_projs(self.info['projs'],
check_active=False, sort=False)
return self
def apply_proj(self):
"""Apply the signal space projection (SSP) operators to the data.
Notes
-----
Once the projectors have been applied, they can no longer be
removed. It is usually not recommended to apply the projectors at
too early stages, as they are applied automatically later on
(e.g. when computing inverse solutions).
Hint: using the copy method individual projection vectors
can be tested without affecting the original data.
With evoked data, consider the following example::
projs_a = mne.read_proj('proj_a.fif')
projs_b = mne.read_proj('proj_b.fif')
# add the first, copy, apply and see ...
evoked.add_proj(a).copy().apply_proj().plot()
# add the second, copy, apply and see ...
evoked.add_proj(b).copy().apply_proj().plot()
# drop the first and see again
evoked.copy().del_proj(0).apply_proj().plot()
evoked.apply_proj() # finally keep both
Returns
-------
self : instance of Raw | Epochs | Evoked
The instance.
"""
from ..epochs import BaseEpochs
from ..evoked import Evoked
from .base import BaseRaw
if self.info['projs'] is None or len(self.info['projs']) == 0:
logger.info('No projector specified for this dataset. '
'Please consider the method self.add_proj.')
return self
# Exit delayed mode if you apply proj
if isinstance(self, BaseEpochs) and self._do_delayed_proj:
logger.info('Leaving delayed SSP mode.')
self._do_delayed_proj = False
if all(p['active'] for p in self.info['projs']):
logger.info('Projections have already been applied. '
'Setting proj attribute to True.')
return self
_projector, info = setup_proj(deepcopy(self.info), activate=True,
verbose=self.verbose)
# let's not raise a RuntimeError here, otherwise interactive plotting
if _projector is None: # won't be fun.
logger.info('The projections don\'t apply to these data.'
' Doing nothing.')
return self
self._projector, self.info = _projector, info
if isinstance(self, (BaseRaw, Evoked)):
if self.preload:
self._data = np.dot(self._projector, self._data)
else: # BaseEpochs
if self.preload:
for ii, e in enumerate(self._data):
self._data[ii] = self._project_epoch(e)
else:
self.load_data() # will automatically apply
logger.info('SSP projectors applied...')
return self
def del_proj(self, idx='all'):
"""Remove SSP projection vector.
Note: The projection vector can only be removed if it is inactive
(has not been applied to the data).
Parameters
----------
idx : int | list of int | str
Index of the projector to remove. Can also be "all" (default)
to remove all projectors.
Returns
-------
self : instance of Raw | Epochs | Evoked
"""
if isinstance(idx, string_types) and idx == 'all':
idx = list(range(len(self.info['projs'])))
idx = np.atleast_1d(np.array(idx, int)).ravel()
if any(self.info['projs'][ii]['active'] for ii in idx):
raise ValueError('Cannot remove projectors that have already '
'been applied')
self.info['projs'] = [p for pi, p in enumerate(self.info['projs'])
if pi not in idx]
return self
def plot_projs_topomap(self, ch_type=None, layout=None, axes=None):
"""Plot SSP vector.
Parameters
----------
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None | List
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted. If None
(default), it will return all channel types present. If a list of
ch_types is provided, it will return multiple figures.
layout : None | Layout | List of Layouts
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations. Or a list of Layout if projections
are from different sensor types.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
if self.info['projs'] is not None or len(self.info['projs']) != 0:
from ..viz.topomap import plot_projs_topomap
from ..channels.layout import find_layout
if layout is None:
layout = []
if ch_type is None:
ch_type = [ch for ch in ['meg', 'eeg'] if ch in self]
elif isinstance(ch_type, string_types):
ch_type = [ch_type]
for ch in ch_type:
if ch in self:
layout.append(find_layout(self.info, ch, exclude=[]))
else:
warn('Channel type %s is not found in info.' % ch)
fig = plot_projs_topomap(self.info['projs'], layout, axes=axes)
else:
raise ValueError("Info is missing projs. Nothing to plot.")
return fig
def _proj_equal(a, b, check_active=True):
"""Test if two projectors are equal."""
equal = ((a['active'] == b['active'] or not check_active) and
a['kind'] == b['kind'] and
a['desc'] == b['desc'] and
a['data']['col_names'] == b['data']['col_names'] and
a['data']['row_names'] == b['data']['row_names'] and
a['data']['ncol'] == b['data']['ncol'] and
a['data']['nrow'] == b['data']['nrow'] and
np.all(a['data']['data'] == b['data']['data']))
return equal
@verbose
def _read_proj(fid, node, verbose=None):
"""Read spatial projections from a FIF file.
Parameters
----------
fid : file
The file descriptor of the open file.
node : tree node
The node of the tree where to look.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs: dict
The list of projections.
"""
projs = list()
# Locate the projection data
nodes = dir_tree_find(node, FIFF.FIFFB_PROJ)
if len(nodes) == 0:
return projs
tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN)
if tag is not None:
global_nchan = int(tag.data)
items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM)
for item in items:
# Find all desired tags in one item
tag = find_tag(fid, item, FIFF.FIFF_NCHAN)
if tag is not None:
nchan = int(tag.data)
else:
nchan = global_nchan
tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION)
if tag is not None:
desc = tag.data
else:
tag = find_tag(fid, item, FIFF.FIFF_NAME)
if tag is not None:
desc = tag.data
else:
raise ValueError('Projection item description missing')
# XXX : is this useful ?
# tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
# if tag is not None:
# namelist = tag.data
# else:
# raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND)
if tag is not None:
kind = int(tag.data)
else:
raise ValueError('Projection item kind missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC)
if tag is not None:
nvec = int(tag.data)
else:
raise ValueError('Number of projection vectors not specified')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
if tag is not None:
names = tag.data.split(':')
else:
raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS)
if tag is not None:
data = tag.data
else:
raise ValueError('Projection item data missing')
tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE)
if tag is not None:
active = bool(tag.data)
else:
active = False
tag = find_tag(fid, item, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR)
if tag is not None:
explained_var = tag.data
else:
explained_var = None
# handle the case when data is transposed for some reason
if data.shape[0] == len(names) and data.shape[1] == nvec:
data = data.T
if data.shape[1] != len(names):
raise ValueError('Number of channel names does not match the '
'size of data matrix')
# Use exactly the same fields in data as in a named matrix
one = Projection(kind=kind, active=active, desc=desc,
data=dict(nrow=nvec, ncol=nchan, row_names=None,
col_names=names, data=data),
explained_var=explained_var)
projs.append(one)
if len(projs) > 0:
logger.info(' Read a total of %d projection items:' % len(projs))
for k in range(len(projs)):
if projs[k]['active']:
misc = 'active'
else:
misc = ' idle'
logger.info(' %s (%d x %d) %s'
% (projs[k]['desc'], projs[k]['data']['nrow'],
projs[k]['data']['ncol'], misc))
return projs
###############################################################################
# Write
def _write_proj(fid, projs):
"""Write a projection operator to a file.
Parameters
----------
fid : file
The file descriptor of the open file.
projs : dict
The projection operator.
"""
if len(projs) == 0:
return
start_block(fid, FIFF.FIFFB_PROJ)
for proj in projs:
start_block(fid, FIFF.FIFFB_PROJ_ITEM)
write_int(fid, FIFF.FIFF_NCHAN, proj['data']['ncol'])
write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
proj['data']['col_names'])
write_string(fid, FIFF.FIFF_NAME, proj['desc'])
write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind'])
if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD:
write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0)
write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow'])
write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active'])
write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS,
proj['data']['data'])
if proj['explained_var'] is not None:
write_float(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
proj['explained_var'])
end_block(fid, FIFF.FIFFB_PROJ_ITEM)
end_block(fid, FIFF.FIFFB_PROJ)
###############################################################################
# Utils
def _check_projs(projs, copy=True):
"""Check that projs is a list of Projection."""
if not isinstance(projs, (list, tuple)):
raise TypeError('projs must be a list or tuple, got %s'
% (type(projs),))
for pi, p in enumerate(projs):
if not isinstance(p, Projection):
raise TypeError('All entries in projs list must be Projection '
'instances, but projs[%d] is type %s'
% (pi, type(p)))
return deepcopy(projs) if copy else projs
def make_projector(projs, ch_names, bads=(), include_active=True):
"""Create an SSP operator from SSP projection vectors.
Parameters
----------
projs : list
List of projection vectors.
ch_names : list of str
List of channels to include in the projection matrix.
bads : list of str
Some bad channels to exclude. If bad channels were marked
in the raw file when projs were calculated using mne-python,
they should not need to be included here as they will
have been automatically omitted from the projectors.
include_active : bool
Also include projectors that are already active.
Returns
-------
proj : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
nproj : int
How many items in the projector.
U : array
The orthogonal basis of the projection vectors (optional).
"""
return _make_projector(projs, ch_names, bads, include_active)
def _make_projector(projs, ch_names, bads=(), include_active=True,
inplace=False):
"""Subselect projs based on ch_names and bads.
Use inplace=True mode to modify ``projs`` inplace so that no
warning will be raised next time projectors are constructed with
the given inputs. If inplace=True, no meaningful data are returned.
"""
nchan = len(ch_names)
if nchan == 0:
raise ValueError('No channel names specified')
default_return = (np.eye(nchan, nchan), 0, [])
# Check trivial cases first
if projs is None:
return default_return
nvec = 0
nproj = 0
for p in projs:
if not p['active'] or include_active:
nproj += 1
nvec += p['data']['nrow']
if nproj == 0:
return default_return
# Pick the appropriate entries
vecs = np.zeros((nchan, nvec))
nvec = 0
nonzero = 0
for k, p in enumerate(projs):
if not p['active'] or include_active:
if (len(p['data']['col_names']) !=
len(np.unique(p['data']['col_names']))):
raise ValueError('Channel name list in projection item %d'
' contains duplicate items' % k)
# Get the two selection vectors to pick correct elements from
# the projection vectors omitting bad channels
sel = []
vecsel = []
for c, name in enumerate(ch_names):
if name in p['data']['col_names'] and name not in bads:
sel.append(c)
vecsel.append(p['data']['col_names'].index(name))
# If there is something to pick, pickit
nrow = p['data']['nrow']
this_vecs = vecs[:, nvec:nvec + nrow]
if len(sel) > 0:
this_vecs[sel] = p['data']['data'][:, vecsel].T
# Rescale for better detection of small singular values
for v in range(p['data']['nrow']):
psize = sqrt(np.sum(this_vecs[:, v] * this_vecs[:, v]))
if psize > 0:
orig_n = p['data']['data'].any(axis=0).sum()
# Average ref still works if channels are removed
if len(vecsel) < 0.9 * orig_n and not inplace and \
(p['kind'] != FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF or
len(vecsel) == 1):
warn('Projection vector "%s" has magnitude %0.2f '
'(should be unity), applying projector with '
'%s/%s of the original channels available may '
'be dangerous, consider recomputing and adding '
'projection vectors for channels that are '
'eventually used. If this is intentional, '
'consider using info.normalize_proj()'
% (p['desc'], psize, len(vecsel), orig_n))
this_vecs[:, v] /= psize
nonzero += 1
# If doing "inplace" mode, "fix" the projectors to only operate
# on this subset of channels.
if inplace:
p['data']['data'] = this_vecs[sel].T
p['data']['col_names'] = [p['data']['col_names'][ii]
for ii in vecsel]
nvec += p['data']['nrow']
# Check whether all of the vectors are exactly zero
if nonzero == 0 or inplace:
return default_return
# Reorthogonalize the vectors
U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False)
# Throw away the linearly dependent guys
nproj = np.sum((S / S[0]) > 1e-2)
U = U[:, :nproj]
# Here is the celebrated result
proj = np.eye(nchan, nchan) - np.dot(U, U.T)
return proj, nproj, U
def _normalize_proj(info):
"""Normalize proj after subselection to avoid warnings.
This is really only useful for tests, and might not be needed
eventually if we change or improve our handling of projectors
with picks.
"""
# Here we do info.get b/c info can actually be a noise cov
_make_projector(info['projs'], info.get('ch_names', info.get('names')),
info['bads'], include_active=True, inplace=True)
def make_projector_info(info, include_active=True):
"""Make an SSP operator using the measurement info.
Calls make_projector on good channels.
Parameters
----------
info : dict
Measurement info.
include_active : bool
Also include projectors that are already active.
Returns
-------
proj : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
nproj : int
How many items in the projector.
"""
proj, nproj, _ = make_projector(info['projs'], info['ch_names'],
info['bads'], include_active)
return proj, nproj
@verbose
def activate_proj(projs, copy=True, verbose=None):
"""Set all projections to active.
Useful before passing them to make_projector.
Parameters
----------
projs : list
The projectors.
copy : bool
Modify projs in place or operate on a copy.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs : list
The projectors.
"""
if copy:
projs = deepcopy(projs)
# Activate the projection items
for proj in projs:
proj['active'] = True
logger.info('%d projection items activated' % len(projs))
return projs
@verbose
def deactivate_proj(projs, copy=True, verbose=None):
"""Set all projections to inactive.
Useful before saving raw data without projectors applied.
Parameters
----------
projs : list
The projectors.
copy : bool
Modify projs in place or operate on a copy.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs : list
The projectors.
"""
if copy:
projs = deepcopy(projs)
# Deactivate the projection items
for proj in projs:
proj['active'] = False
logger.info('%d projection items deactivated' % len(projs))
return projs
@verbose
def make_eeg_average_ref_proj(info, activate=True, verbose=None):
"""Create an EEG average reference SSP projection vector.
Parameters
----------
info : dict
Measurement info.
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
eeg_proj: instance of Projection
The SSP/PCA projector.
"""
if info.get('custom_ref_applied', False):
raise RuntimeError('A custom reference has been applied to the '
'data earlier. Please use the '
'mne.io.set_eeg_reference function to move from '
'one EEG reference to another.')
logger.info("Adding average EEG reference projection.")
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
ch_names = info['ch_names']
eeg_names = [ch_names[k] for k in eeg_sel]
n_eeg = len(eeg_sel)
if n_eeg == 0:
raise ValueError('Cannot create EEG average reference projector '
'(no EEG data found)')
vec = np.ones((1, n_eeg))
vec /= n_eeg
explained_var = None
eeg_proj_data = dict(col_names=eeg_names, row_names=None,
data=vec, nrow=1, ncol=n_eeg)
eeg_proj = Projection(active=activate, data=eeg_proj_data,
desc='Average EEG reference',
kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF,
explained_var=explained_var)
return eeg_proj
def _has_eeg_average_ref_proj(projs, check_active=False):
"""Determine if a list of projectors has an average EEG ref.
Optionally, set check_active=True to additionally check if the CAR
has already been applied.
"""
for proj in projs:
if (proj['desc'] == 'Average EEG reference' or
proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF):
if not check_active or proj['active']:
return True
return False
def _needs_eeg_average_ref_proj(info):
"""Determine if the EEG needs an averge EEG reference.
This returns True if no custom reference has been applied and no average
reference projection is present in the list of projections.
"""
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
return (len(eeg_sel) > 0 and
not info['custom_ref_applied'] and
not _has_eeg_average_ref_proj(info['projs']))
@verbose
def setup_proj(info, add_eeg_ref=True, activate=True, verbose=None):
"""Set up projection for Raw and Epochs.
Parameters
----------
info : dict
The measurement info.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projector : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
info : dict
The modified measurement info (Warning: info is modified inplace).
"""
# Add EEG ref reference proj if necessary
if add_eeg_ref and _needs_eeg_average_ref_proj(info):
eeg_proj = make_eeg_average_ref_proj(info, activate=activate)
info['projs'].append(eeg_proj)
# Create the projector
projector, nproj = make_projector_info(info)
if nproj == 0:
if verbose:
logger.info('The projection vectors do not apply to these '
'channels')
projector = None
else:
logger.info('Created an SSP operator (subspace dimension = %d)'
% nproj)
# The projection items have been activated
if activate:
info['projs'] = activate_proj(info['projs'], copy=False)
return projector, info
def _uniquify_projs(projs, check_active=True, sort=True):
"""Make unique projs."""
final_projs = []
for proj in projs: # flatten
if not any(_proj_equal(p, proj, check_active) for p in final_projs):
final_projs.append(proj)
my_count = count(len(final_projs))
def sorter(x):
"""Sort in a nice way."""
digits = [s for s in x['desc'] if s.isdigit()]
if digits:
sort_idx = int(digits[-1])
else:
sort_idx = next(my_count)
return (sort_idx, x['desc'])
return sorted(final_projs, key=sorter) if sort else final_projs
|
the-stack_0_12094 | from django.shortcuts import render, get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from .models import Report, UploadedFile, Folder
from .forms import ReportForm, FolderForm
from django.template import RequestContext
from web.models import UserGroup
from django.contrib.auth.models import User
from Crypto import Random
from datetime import datetime
# Create your views here.
random_generator = Random.new().read
def index(request):
return render(request, 'createReport.html')
def thanks(request):
return render(request, 'form.html')
def folders(request):
reports = Report.objects.all()
folders = Folder.objects.all()
return render(request, 'reports/folders.html', {'folders': folders})
def viewReportsInFolders(request):
folders = Folder.objects.all()
reports = Report.objects.all()
return render(request, 'reports/savedReports.html',
{'folders': folders, 'reports': reports})
def create_folder(request):
reports = Report.objects.all()
username_id = request.user
if request.method == 'POST':
form = FolderForm(request.POST, request.FILES)
selected = request.POST.getlist('selected_report[]')
if form.is_valid():
folder_object = Folder.objects.create(
name=form.cleaned_data['title'], owner=username_id
)
for report_selected in selected:
re = Report.objects.get(title=report_selected)
folder_object.members.add(re)
return HttpResponse("Folder has been updated")
else:
form = FolderForm()
variables = RequestContext(request, {
'form': form, 'reports': reports
})
return render_to_response(
'reports/folderz.html',
variables,
)
def edit_folder(request, id=None):
try:
folder=Folder.objects.get(id=id)
form_class=FolderForm(user=request.user, instance=folder)
if request.method == 'POST':
form = FolderForm(request.POST, request.FILES, instance=folder)
selected = request.POST.getlist('selected_report[]')
if form.is_valid():
folder_object = Folder.objects.create(
name=form.cleaned_data['title'], owner=username_id
)
for report_selected in selected:
re = Report.objects.get(title=report_selected)
folder_object.members.add(re)
return render(request, '/reports/doneEditingFolder.html', {'form_class': form_class})
except:
return HttpResponse("You can't update this folder")
def edit_with_delete(request, id=None):
try:
folder = Folder.objects.get(id=id)
print(folder.owner)
if folder.owner != request.user:
text = "You do not have permission to change this folder"
return HttpResponse(text)
else:
Folder.objects.filter(id=id).delete()
print("deleted")
return render(request, 'reports/redirect_to_change.html')
except:
text = "You are unable to change this folder"
return HttpResponse(text)
def folder(request):
folder_name = request.POST.get('selected')
print(folder_name)
reports = Report.objects.all()
print(reports)
return render(request, 'reports/folder.html', {'folder_name': folder_name,
'reports': reports})
@login_required
def delete_folder(request, id=None):
try:
folder = Folder.objects.get(id=id)
if folder.owner != request.user:
text = "You do not have permission to delete this folder"
return HttpResponse(text)
else:
Folder.objects.filter(id=id).delete()
return render(request, 'reports/deleteFolder.html')
except:
text = "You are unable to delete this folder"
return HttpResponse(text)
@login_required
def add_report(request):
form_class = ReportForm(user=request.user)
# if this is a POST request process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request
form = ReportForm(request.POST, request.FILES, user=request.user)
# check whether it's valid:
if form.is_valid():
report = form.save(commit=False)
report.owner = User.objects.get(username=request.user.username)
if form.cleaned_data['Share with:'] != 'all':
report.group = UserGroup.objects.get(
name=form.cleaned_data['Share with:'])
files = request.FILES.getlist('file_field')
report.save()
for f in files:
file = UploadedFile(report=report, owner=request.user)
file.file_obj = f
file.save()
# redirect to a new URL:
return render(request, 'reports/createReport.html', {'form': form_class})
else:
text = form.errors
return HttpResponse(text)
return render(request, 'reports/createReport.html', {'form': form_class})
@login_required
def edit_report(request, id=None):
try:
if id:
report = Report.objects.get(pk=id)
if report.owner != request.user:
text = "You do not have permission to edit this report"
return HttpResponse(text)
else:
report = Report()
form_class = ReportForm(user=request.user, instance=report)
if request.method == 'POST':
form = ReportForm(request.POST, request.FILES, instance=report, user=request.user)
if form.is_valid():
report = form.save(commit=False)
report.owner = User.objects.get(username=request.user.username)
if form.cleaned_data['Share with:'] != 'all':
report.group = UserGroup.objects.get(
name=form.cleaned_data['Share with:'])
files = request.FILES.getlist('file_field')
report.save()
for curr in report.file_set.all():
curr.delete()
for f in files:
file = UploadedFile(report=report, owner=request.user)
file.file_obj = f
file.save()
return render(request, 'reports/doneEditing.html', {'form': form_class})
else:
text = form.errors
return HttpResponse(text)
except:
text = "You are not able to edit this report"
return HttpResponse(text)
return render(request, 'reports/editReport.html', {'form': form_class, 'id': id})
@login_required
def see_reports(request):
initial_search = {}
reports_list = Report.objects.all().filter(group=None)
for group in UserGroup.objects.filter(members=request.user):
reports_list = reports_list | group.report_set.all()
# Filter based by min date
if request.GET.get('sincesearch', False):
date_in = request.GET['sincesearch']
initial_search['since'] = date_in
date_since = datetime(
*[int(v) for v in date_in.replace('T', '-').replace(':',
'-').split('-')])
reports_list = reports_list.filter(timestamp__gte=date_since)
# Filter based by max date
if request.GET.get('beforesearch', False):
date_in = request.GET['beforesearch']
initial_search['before'] = date_in
date_since = datetime(
*[int(v) for v in date_in.replace('T', '-').replace(':',
'-').split('-')])
reports_list = reports_list.filter(timestamp__lte=date_since)
# Filter based on creator
if request.GET.get('ownersearch', False):
owner = request.GET['ownersearch']
initial_search['owner'] = owner
reports_list = reports_list.filter(owner__username__icontains=owner)
# Filter based on title
if request.GET.get('titlesearch', False):
title = request.GET['titlesearch']
initial_search['title'] = title
reports_list = reports_list.filter(title__icontains=title)
# Filter based on descriptions
if request.GET.get('descsearch', False):
desc = request.GET['descsearch']
initial_search['desc'] = desc
short_search = reports_list.filter(short_desc__icontains=desc)
long_search = reports_list.filter(long_desc__icontains=desc)
reports_list = short_search | long_search
for report in reports_list:
report.files = report.file_set.all()
for file in report.files:
file.file_obj.name = file.file_obj.name.split('/')[-1]
return render(request, 'reports/see_reports.html', {'reports_list':
reports_list,
'search_values':
initial_search})
def add_reports(request, folder_name):
print("hi")
print(folder_name)
reports = Report.objects.all()
username_id = request.user
print(request.method)
if request.method == 'POST':
print("hi2")
form = FolderForm(request.POST)
selected = request.POST.getlist('selectedReport[]')
print(selected)
if form.is_valid():
print("hi3")
folder_object = Folder.objects.create(name=folder_name,
owner=username_id)
folder_object.save()
for report_selected in selected:
re = Report.objects.get(title=report_selected)
folder_object.members.add(re)
print(folder_object.members)
else:
form = FolderForm()
folder_object = []
if folder_name is not None:
folder_object = Folder.objects.get(name=folder_name)
print(folder_object)
print(folder_object.members)
variables = RequestContext(request, {'form': form, 'reports': reports})
return render_to_response(
'reports/folder.html',
variables,
)
def viewFolders(request):
context = {}
context['folders_list'] = Folder.objects.all()
return render(request, '/reports/folders', context)
@login_required
def see_my_reports(request):
initial_search = {}
my_reports_list = Report.objects.all().filter(owner=request.user).order_by('keyword')
for group in UserGroup.objects.filter(members=request.user):
my_reports_list = my_reports_list | group.report_set.all()
# Filter based by min date
if request.GET.get('sincesearch', False):
date_in = request.GET['sincesearch']
initial_search['since'] = date_in
date_since = datetime(
*[int(v) for v in date_in.replace('T', '-').replace(':',
'-').split('-')])
my_reports_list = my_reports_list.filter(timestamp__gte=date_since)
# Filter based by max date
if request.GET.get('beforesearch', False):
date_in = request.GET['beforesearch']
initial_search['before'] = date_in
date_since = datetime(
*[int(v) for v in date_in.replace('T', '-').replace(':',
'-').split('-')])
my_reports_list = my_reports_list.filter(timestamp__lte=date_since)
# Filter based on creator
if request.GET.get('ownersearch', False):
owner = request.GET['ownersearch']
initial_search['owner'] = owner
my_reports_list = my_reports_list.filter(owner__username__icontains=owner)
# Filter based on title
if request.GET.get('titlesearch', False):
title = request.GET['titlesearch']
initial_search['title'] = title
my_reports_list = my_reports_list.filter(title__icontains=title)
# Filter based on descriptions
if request.GET.get('descsearch', False):
desc = request.GET['descsearch']
initial_search['desc'] = desc
short_search = my_reports_list.filter(short_desc__icontains=desc)
long_search = my_reports_list.filter(long_desc__icontains=desc)
my_reports_list = short_search | long_search
for report in my_reports_list:
report.files = report.file_set.all()
for file in report.files:
file.file_obj.name = file.file_obj.name.split('/')[-1]
return render(request, 'reports/see_my_reports.html',
{'my_reports_list': my_reports_list, 'search_values': initial_search})
@login_required
def delete_report(request, id=None):
try:
report = Report.objects.get(id=id)
print(report.owner)
if report.owner != request.user:
text = "You do not have permission to delete this report"
return HttpResponse(text)
else:
Report.objects.filter(id=id).delete()
return render(request, 'reports/deleteReport.html')
except:
# text = "You are not able to delete this report"
# return HttpResponse(text)
return render(request, 'reports/deleteReport.html')
@login_required
def download_file(request, pk):
file = get_object_or_404(UploadedFile, pk=pk)
if file.report.group is not None:
if file.report.group not in UserGroup.objects.filter(members=request.user):
return HttpResponse(status=404)
filename = file.file_obj.name.split('/')[-1]
response = HttpResponse(file.file_obj, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
the-stack_0_12097 | import urllib3
import json
from api.src.postgre import Postgres
class DataCrawler:
def getData(self):
jsonOndeFoiRoubado = self.getJsonFromOndeFoiRoubado()
jsonOndeTemTiro = self.getJsonFromOnteTemTiro()
postgre = Postgres()
postgre.open()
postgre.insertOndeFoiRoubado(jsonOndeFoiRoubado)
postgre.insertOndeTemTiro(jsonOndeTemTiro)
postgre.close()
def getJsonFromOndeFoiRoubado(self):
http = urllib3.PoolManager()
r = http.request('GET', 'http://www.ondefuiroubado.com.br/rio-de-janeiro/RJ');
htmlData = str(r.data.decode('utf-8'))
idxStart = htmlData.find('OndeFuiRoubado.Views.CrimesIndexView.initialize')
idxEnd = htmlData.find('OndeFuiRoubado.PoliceStations')
htmlData = htmlData[idxStart:idxEnd]
htmlData = htmlData.replace('OndeFuiRoubado.Views.CrimesIndexView.initialize(','')
htmlData = htmlData.strip()
htmlData = htmlData.replace(');\\n });\\n\\n document.addEventListener(\\\'onMainMapLoad\\\', function(data) {\\n','')
htmlData = htmlData.strip()
htmlData = htmlData.replace("document.addEventListener('onMainMapLoad', function(data) {",'')
htmlData = htmlData.replace("\n","")
htmlData = htmlData.replace("); });","")
return json.loads(htmlData)
def getJsonFromOnteTemTiro(self):
http = urllib3.PoolManager()
r = http.request('GET', 'https://www.googleapis.com/fusiontables/v1/query?sql=SELECT%20*%20FROM%201HaQhL95pS0XhFQcifZ6fzKifuCXVdFxl-caH0zDf&key=AIzaSyC1CNeSPJOm5mPzk3kTrXuHJgG5vJP9Tgo');
htmlData = str(r.data.decode('utf-8'))
htmlData = htmlData.replace("\\n","#")
return json.loads(htmlData)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.