id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
76477
|
import locale
import os
from enum import Enum, IntEnum
from functools import lru_cache
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("Gdk", "3.0")
gi.require_version("Notify", "0.7")
from gi.repository import Gtk, Gdk, Notify
from app.settings import Settings, SettingsException, IS_DARWIN
# Init notify
Notify.init("DemonEditor")
# Setting mod mask for the keyboard depending on the platform.
MOD_MASK = Gdk.ModifierType.MOD2_MASK if IS_DARWIN else Gdk.ModifierType.CONTROL_MASK
# Path to *.glade files.
UI_RESOURCES_PATH = "app/ui/" if os.path.exists("app/ui/") else "/usr/share/demoneditor/app/ui/"
IS_GNOME_SESSION = int(bool(os.environ.get("GNOME_DESKTOP_SESSION_ID")))
# Translation.
TEXT_DOMAIN = "demon-editor"
APP_FONT = None
try:
settings = Settings.get_instance()
except SettingsException:
pass
else:
os.environ["LANGUAGE"] = settings.language
if UI_RESOURCES_PATH == "app/ui/":
locale.bindtextdomain(TEXT_DOMAIN, UI_RESOURCES_PATH + "lang")
st = Gtk.Settings().get_default()
APP_FONT = st.get_property("gtk-font-name")
if not settings.list_font:
settings.list_font = APP_FONT
if settings.is_themes_support:
st.set_property("gtk-theme-name", settings.theme)
st.set_property("gtk-icon-theme-name", settings.icon_theme)
theme = Gtk.IconTheme.get_default()
theme.append_search_path(UI_RESOURCES_PATH + "icons")
_IMAGE_MISSING = theme.load_icon("image-missing", 16, 0) if theme.lookup_icon("image-missing", 16, 0) else None
CODED_ICON = theme.load_icon("emblem-readonly", 16, 0) if theme.lookup_icon(
"emblem-readonly", 16, 0) else _IMAGE_MISSING
LOCKED_ICON = theme.load_icon("changes-prevent-symbolic", 16, 0) if theme.lookup_icon(
"system-lock-screen", 16, 0) else _IMAGE_MISSING
HIDE_ICON = theme.load_icon("go-jump", 16, 0) if theme.lookup_icon("go-jump", 16, 0) else _IMAGE_MISSING
TV_ICON = theme.load_icon("tv-symbolic", 16, 0) if theme.lookup_icon("tv-symbolic", 16, 0) else _IMAGE_MISSING
IPTV_ICON = theme.load_icon("emblem-shared", 16, 0) if theme.lookup_icon("emblem-shared", 16, 0) else None
EPG_ICON = theme.load_icon("gtk-index", 16, 0) if theme.lookup_icon("gtk-index", 16, 0) else None
DEFAULT_ICON = theme.load_icon("emblem-default", 16, 0) if theme.lookup_icon("emblem-default", 16, 0) else None
@lru_cache(maxsize=1)
def get_yt_icon(icon_name, size=24):
""" Getting YouTube icon.
If the icon is not found in the icon themes, the "Info" icon is returned by default!
"""
default_theme = Gtk.IconTheme.get_default()
if default_theme.has_icon(icon_name):
return default_theme.load_icon(icon_name, size, 0)
n_theme = Gtk.IconTheme.new()
import glob
for theme_name in map(os.path.basename, filter(os.path.isdir, glob.glob("/usr/share/icons/*"))):
n_theme.set_custom_theme(theme_name)
if n_theme.has_icon(icon_name):
return n_theme.load_icon(icon_name, size, 0)
return default_theme.load_icon("info", size, 0)
def show_notification(message, timeout=10000, urgency=1):
""" Shows notification.
@param message: text to display
@param timeout: milliseconds
@param urgency: 0 - low, 1 - normal, 2 - critical
"""
notify = Notify.Notification.new("DemonEditor", message, "demon-editor")
notify.set_urgency(urgency)
notify.set_timeout(timeout)
notify.show()
class KeyboardKey(Enum):
""" The raw(hardware) codes of the keyboard keys. """
E = 26
R = 27
T = 28
P = 33
S = 39
F = 41
X = 53
C = 54
V = 55
W = 25
Z = 52
INSERT = 118
HOME = 110
END = 115
UP = 111
DOWN = 116
PAGE_UP = 112
PAGE_DOWN = 117
LEFT = 113
RIGHT = 114
F2 = 68
F7 = 73
SPACE = 65
DELETE = 119
BACK_SPACE = 22
CTRL_L = 37
CTRL_R = 105
# Laptop codes
HOME_KP = 79
END_KP = 87
PAGE_UP_KP = 81
PAGE_DOWN_KP = 89
@classmethod
def value_exist(cls, value):
return value in (val.value for val in cls.__members__.values())
# Keys for move in lists. KEY_KP_(NAME) for laptop!!!
MOVE_KEYS = (KeyboardKey.UP, KeyboardKey.PAGE_UP, KeyboardKey.DOWN, KeyboardKey.PAGE_DOWN, KeyboardKey.HOME,
KeyboardKey.END, KeyboardKey.HOME_KP, KeyboardKey.END_KP, KeyboardKey.PAGE_UP_KP, KeyboardKey.PAGE_DOWN_KP)
class FavClickMode(IntEnum):
""" Double click mode on the service in the bouquet(FAV) list. """
DISABLED = 0
STREAM = 1
PLAY = 2
ZAP = 3
ZAP_PLAY = 4
class ViewTarget(Enum):
""" Used for set target view. """
BOUQUET = 0
FAV = 1
SERVICES = 2
class BqGenType(Enum):
""" Bouquet generation type. """
SAT = 0
EACH_SAT = 1
PACKAGE = 2
EACH_PACKAGE = 3
TYPE = 4
EACH_TYPE = 5
class Column(IntEnum):
""" Column nums in the views """
# Main view
SRV_CAS_FLAGS = 0
SRV_STANDARD = 1
SRV_CODED = 2
SRV_SERVICE = 3
SRV_LOCKED = 4
SRV_HIDE = 5
SRV_PACKAGE = 6
SRV_TYPE = 7
SRV_PICON = 8
SRV_PICON_ID = 9
SRV_SSID = 10
SRV_FREQ = 11
SRV_RATE = 12
SRV_POL = 13
SRV_FEC = 14
SRV_SYSTEM = 15
SRV_POS = 16
SRV_DATA_ID = 17
SRV_FAV_ID = 18
SRV_TRANSPONDER = 19
SRV_TOOLTIP = 20
SRV_BACKGROUND = 21
# FAV view
FAV_NUM = 0
FAV_CODED = 1
FAV_SERVICE = 2
FAV_LOCKED = 3
FAV_HIDE = 4
FAV_TYPE = 5
FAV_POS = 6
FAV_ID = 7
FAV_PICON = 8
FAV_TOOLTIP = 9
FAV_BACKGROUND = 10
# Bouquets view
BQ_NAME = 0
BQ_LOCKED = 1
BQ_HIDDEN = 2
BQ_TYPE = 3
# Alternatives view
ALT_NUM = 0
ALT_PICON = 1
ALT_SERVICE = 2
ALT_TYPE = 3
ALT_POS = 4
ALT_FAV_ID = 5
ALT_ID = 6
ALT_ITER = 7
def __index__(self):
""" Overridden to get the index in slices directly """
return self.value
if __name__ == "__main__":
pass
|
76493
|
from abc import ABC, abstractmethod
from typing import List
import numpy as np
from scipy.stats import t, spearmanr
from scipy.special import erfinv
from chemprop.uncertainty.uncertainty_calibrator import UncertaintyCalibrator
from chemprop.train import evaluate_predictions
class UncertaintyEvaluator(ABC):
"""
A class for evaluating the effectiveness of uncertainty estimates with metrics.
"""
def __init__(
self,
evaluation_method: str,
calibration_method: str,
uncertainty_method: str,
dataset_type: str,
loss_function: str,
calibrator: UncertaintyCalibrator,
):
self.evaluation_method = evaluation_method
self.calibration_method = calibration_method
self.uncertainty_method = uncertainty_method
self.dataset_type = dataset_type
self.loss_function = loss_function
self.calibrator = calibrator
self.raise_argument_errors()
def raise_argument_errors(self):
"""
Raise errors for incompatibilities between dataset type and uncertainty method, or similar.
"""
if self.dataset_type == "spectra":
raise NotImplementedError(
"No uncertainty evaluators implemented for spectra dataset type."
)
if self.uncertainty_method in ['ensemble', 'dropout'] and self.dataset_type in ['classification', 'multiclass']:
raise NotImplementedError(
'Though ensemble and dropout uncertainty methods are available for classification \
multiclass dataset types, their outputs are not confidences and are not \
compatible with any implemented evaluation methods for classification.'
)
@abstractmethod
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
) -> List[float]:
"""
Evaluate the performance of uncertainty predictions against the model target values.
:param targets: The target values for prediction.
:param preds: The prediction values of a model on the test set.
:param uncertainties: The estimated uncertainty values, either calibrated or uncalibrated, of a model on the test set.
:return: A list of metric values for each model task.
"""
class MetricEvaluator(UncertaintyEvaluator):
"""
A class for evaluating confidence estimates of classification and multiclass datasets using builtin evaluation metrics.
"""
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
return evaluate_predictions(
preds=uncertainties,
targets=targets,
num_tasks=np.array(targets).shape[1],
metrics=[self.evaluation_method],
dataset_type=self.dataset_type,
)[self.evaluation_method]
class NLLRegressionEvaluator(UncertaintyEvaluator):
"""
A class for evaluating regression uncertainty values using the mean negative-log-likelihood
of the actual targets given the probability distributions estimated by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
"NLL Regression Evaluator is only for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
if self.calibrator is None: # uncalibrated regression uncertainties are variances
uncertainties = np.array(uncertainties)
preds = np.array(preds)
targets = np.array(targets)
nll = np.log(2 * np.pi * uncertainties) / 2 \
+ (preds - targets) ** 2 / (2 * uncertainties)
return np.mean(nll, axis=0).tolist()
else:
nll = self.calibrator.nll(
preds=preds, unc=uncertainties, targets=targets
) # shape(data, task)
return np.mean(nll, axis=0).tolist()
class NLLClassEvaluator(UncertaintyEvaluator):
"""
A class for evaluating classification uncertainty values using the mean negative-log-likelihood
of the actual targets given the probabilities assigned to them by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "classification":
raise ValueError(
"NLL Classification Evaluator is only for classification dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets)
uncertainties = np.array(uncertainties)
likelihood = uncertainties * targets + (1 - uncertainties) * (1 - targets)
nll = -1 * np.log(likelihood)
return np.mean(nll, axis=0).tolist()
class NLLMultiEvaluator(UncertaintyEvaluator):
"""
A class for evaluating multiclass uncertainty values using the mean negative-log-likelihood
of the actual targets given the probabilities assigned to them by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "multiclass":
raise ValueError(
"NLL Multiclass Evaluator is only for multiclass dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets, dtype=int) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
nll = np.zeros_like(targets)
for i in range(targets.shape[1]):
task_preds = uncertainties[:, i]
task_targets = targets[:, i] # shape(data)
bin_targets = np.zeros_like(preds[:, 0, :]) # shape(data, classes)
bin_targets[np.arange(targets.shape[0]), task_targets] = 1
task_likelihood = np.sum(bin_targets * task_preds, axis=1)
task_nll = -1 * np.log(task_likelihood)
nll[:, i] = task_nll
return np.mean(nll, axis=0).tolist()
class CalibrationAreaEvaluator(UncertaintyEvaluator):
"""
A class for evaluating regression uncertainty values based on how they deviate from perfect
calibration on an observed-probability versus expected-probability plot.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise NotImplementedError(
f"Miscalibration area is only implemented for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
fractions = np.zeros([preds.shape[1], 101]) # shape(tasks, 101)
fractions[:, 100] = 1
if self.calibrator is not None:
# using 101 bin edges, hardcoded
original_metric = self.calibrator.regression_calibrator_metric
original_scaling = self.calibrator.scaling
original_interval = self.calibrator.interval_percentile
for i in range(1, 100):
self.calibrator.regression_calibrator_metric = "interval"
self.calibrator.interval_percentile = i
self.calibrator.calibrate()
bin_scaling = self.calibrator.scaling
bin_unc = (
uncertainties
/ np.expand_dims(original_scaling, axis=0)
* np.expand_dims(bin_scaling, axis=0)
) # shape(data, tasks)
bin_fraction = np.mean(bin_unc >= abs_error, axis=0)
fractions[:, i] = bin_fraction
self.calibrator.regression_calibrator_metric = original_metric
self.calibrator.scaling = original_scaling
self.calibrator.interval_percentile = original_interval
else: # uncertainties are uncalibrated variances
std = np.sqrt(uncertainties)
for i in range(1, 100):
bin_scaling = erfinv(i / 100) * np.sqrt(2)
bin_unc = std * bin_scaling
bin_fraction = np.mean(bin_unc >= abs_error, axis=0)
fractions[:, i] = bin_fraction
# trapezoid rule
auce = np.sum(
0.01 * np.abs(fractions - np.expand_dims(np.arange(101) / 100, axis=0)),
axis=1,
)
return auce.tolist()
class ExpectedNormalizedErrorEvaluator(UncertaintyEvaluator):
"""
A class that evaluates uncertainty performance by binning together clusters of predictions
and comparing the average predicted variance of the clusters against the RMSE of the cluster.
Method discussed in https://doi.org/10.1021/acs.jcim.9b00975.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
f"Expected normalized error is only appropriate for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
sort_record = np.rec.fromarrays([uncertainties, abs_error], names="i, j")
sort_record.sort(axis=0)
uncertainties = sort_record["i"]
abs_error = sort_record["j"]
# get stdev scaling
if self.calibrator is not None:
original_metric = self.calibrator.regression_calibrator_metric
original_scaling = self.calibrator.scaling
# 100 bins
split_unc = np.array_split(
uncertainties, 100, axis=0
) # shape(list100, data, tasks)
split_error = np.array_split(abs_error, 100, axis=0)
mean_vars = np.zeros([preds.shape[1], 100]) # shape(tasks, 100)
rmses = np.zeros_like(mean_vars)
for i in range(100):
if self.calibrator is None: # starts as a variance
mean_vars[:, i] = np.mean(split_unc[i], axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
elif self.calibration_method == "tscaling": # convert back to sample stdev
bin_unc = split_unc[i] / np.expand_dims(original_scaling, axis=0)
bin_var = t.var(df=self.calibrator.num_models - 1, scale=bin_unc)
mean_vars[:, i] = np.mean(bin_var, axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
else:
self.calibrator.regression_calibrator_metric = "stdev"
self.calibrator.calibrate()
stdev_scaling = self.calibrator.scaling
self.calibrator.regression_calibrator_metric = original_metric
self.calibrator.scaling = original_scaling
bin_unc = split_unc[i]
bin_unc = (
bin_unc
/ np.expand_dims(original_scaling, axis=0)
* np.expand_dims(stdev_scaling, axis=0)
) # convert from interval to stdev as needed
mean_vars[:, i] = np.mean(np.square(bin_unc), axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
ence = np.mean(np.abs(mean_vars - rmses) / mean_vars, axis=1)
return ence.tolist()
class SpearmanEvaluator(UncertaintyEvaluator):
"""
Class evaluating uncertainty performance using the spearman rank correlation. Method produces
better scores (closer to 1 in the [-1, 1] range) when the uncertainty values are predictive
of the ranking of prediciton errors.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
f"Spearman rank correlation is only appropriate for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
num_tasks = targets.shape[1]
spearman_coeffs = []
for i in range(num_tasks):
spmn = spearmanr(uncertainties[:, i], abs_error[:, i]).correlation
spearman_coeffs.append(spmn)
return spearman_coeffs
def build_uncertainty_evaluator(
evaluation_method: str,
calibration_method: str,
uncertainty_method: str,
dataset_type: str,
loss_function: str,
calibrator: UncertaintyCalibrator,
) -> UncertaintyEvaluator:
"""
Function that chooses and returns the appropriate :class: `UncertaintyEvaluator` subclass
for the provided arguments.
"""
supported_evaluators = {
"nll": {
"regression": NLLRegressionEvaluator,
"classification": NLLClassEvaluator,
"multiclass": NLLMultiEvaluator,
"spectra": None,
}[dataset_type],
"miscalibration_area": CalibrationAreaEvaluator,
"ence": ExpectedNormalizedErrorEvaluator,
"spearman": SpearmanEvaluator,
}
classification_metrics = [
"auc",
"prc-auc",
"accuracy",
"binary_cross_entropy",
"f1",
"mcc",
]
multiclass_metrics = [
"cross_entropy",
"accuracy",
"f1",
"mcc"
]
if dataset_type == "classification" and evaluation_method in classification_metrics:
evaluator_class = MetricEvaluator
elif dataset_type == "multiclass" and evaluation_method in multiclass_metrics:
evaluator_class = MetricEvaluator
else:
evaluator_class = supported_evaluators.get(evaluation_method, None)
if evaluator_class is None:
raise NotImplementedError(
f"Evaluator type {evaluation_method} is not supported. Avalable options are all calibration/multiclass metrics and {list(supported_evaluators.keys())}"
)
else:
evaluator = evaluator_class(
evaluation_method=evaluation_method,
calibration_method=calibration_method,
uncertainty_method=uncertainty_method,
dataset_type=dataset_type,
loss_function=loss_function,
calibrator=calibrator,
)
return evaluator
|
76534
|
import hydra
import hydra.utils as utils
from pathlib import Path
import torch
import numpy as np
from tqdm import tqdm
import soundfile as sf
from model_encoder import Encoder, Encoder_lf0
from model_decoder import Decoder_ac
from model_encoder import SpeakerEncoder as Encoder_spk
import os
import random
from glob import glob
import subprocess
from spectrogram import logmelspectrogram
import kaldiio
import resampy
import pyworld as pw
def select_wavs(paths, min_dur=2, max_dur=8):
pp = []
for p in paths:
x, fs = sf.read(p)
if len(x)/fs>=min_dur and len(x)/fs<=8:
pp.append(p)
return pp
def extract_logmel(wav_path, mean, std, sr=16000):
# wav, fs = librosa.load(wav_path, sr=sr)
wav, fs = sf.read(wav_path)
if fs != sr:
wav = resampy.resample(wav, fs, sr, axis=0)
fs = sr
#wav, _ = librosa.effects.trim(wav, top_db=15)
# duration = len(wav)/fs
assert fs == 16000
peak = np.abs(wav).max()
if peak > 1.0:
wav /= peak
mel = logmelspectrogram(
x=wav,
fs=fs,
n_mels=80,
n_fft=400,
n_shift=160,
win_length=400,
window='hann',
fmin=80,
fmax=7600,
)
mel = (mel - mean) / (std + 1e-8)
tlen = mel.shape[0]
frame_period = 160/fs*1000
f0, timeaxis = pw.dio(wav.astype('float64'), fs, frame_period=frame_period)
f0 = pw.stonemask(wav.astype('float64'), f0, timeaxis, fs)
f0 = f0[:tlen].reshape(-1).astype('float32')
nonzeros_indices = np.nonzero(f0)
lf0 = f0.copy()
lf0[nonzeros_indices] = np.log(f0[nonzeros_indices]) # for f0(Hz), lf0 > 0 when f0 != 0
mean, std = np.mean(lf0[nonzeros_indices]), np.std(lf0[nonzeros_indices])
lf0[nonzeros_indices] = (lf0[nonzeros_indices] - mean) / (std + 1e-8)
return mel, lf0
@hydra.main(config_path="config/convert.yaml")
def convert(cfg):
src_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p225/*mic1.flac') # modified to absolute wavs path, can select any unseen speakers
src_wav_paths = select_wavs(src_wav_paths)
tar1_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p231/*mic1.flac') # can select any unseen speakers
tar2_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p243/*mic1.flac') # can select any unseen speakers
# tar1_wav_paths = select_wavs(tar1_wav_paths)
# tar2_wav_paths = select_wavs(tar2_wav_paths)
tar1_wav_paths = [sorted(tar1_wav_paths)[0]]
tar2_wav_paths = [sorted(tar2_wav_paths)[0]]
print('len(src):', len(src_wav_paths), 'len(tar1):', len(tar1_wav_paths), 'len(tar2):', len(tar2_wav_paths))
tmp = cfg.checkpoint.split('/')
steps = tmp[-1].split('-')[-1].split('.')[0]
out_dir = f'test/{tmp[-3]}-{tmp[-2]}-{steps}'
out_dir = Path(utils.to_absolute_path(out_dir))
out_dir.mkdir(exist_ok=True, parents=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = Encoder(**cfg.model.encoder)
encoder_lf0 = Encoder_lf0()
encoder_spk = Encoder_spk()
decoder = Decoder_ac(dim_neck=64)
encoder.to(device)
encoder_lf0.to(device)
encoder_spk.to(device)
decoder.to(device)
print("Load checkpoint from: {}:".format(cfg.checkpoint))
checkpoint_path = utils.to_absolute_path(cfg.checkpoint)
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
encoder.load_state_dict(checkpoint["encoder"])
encoder_spk.load_state_dict(checkpoint["encoder_spk"])
decoder.load_state_dict(checkpoint["decoder"])
encoder.eval()
encoder_spk.eval()
decoder.eval()
mel_stats = np.load('./data/mel_stats.npy')
mean = mel_stats[0]
std = mel_stats[1]
feat_writer = kaldiio.WriteHelper("ark,scp:{o}.ark,{o}.scp".format(o=str(out_dir)+'/feats.1'))
for i, src_wav_path in tqdm(enumerate(src_wav_paths, 1)):
if i>10:
break
mel, lf0 = extract_logmel(src_wav_path, mean, std)
if i % 2 == 1:
ref_wav_path = random.choice(tar2_wav_paths)
tar = 'tarMale_'
else:
ref_wav_path = random.choice(tar1_wav_paths)
tar = 'tarFemale_'
ref_mel, _ = extract_logmel(ref_wav_path, mean, std)
mel = torch.FloatTensor(mel.T).unsqueeze(0).to(device)
lf0 = torch.FloatTensor(lf0).unsqueeze(0).to(device)
ref_mel = torch.FloatTensor(ref_mel.T).unsqueeze(0).to(device)
out_filename = os.path.basename(src_wav_path).split('.')[0]
with torch.no_grad():
z, _, _, _ = encoder.encode(mel)
lf0_embs = encoder_lf0(lf0)
spk_embs = encoder_spk(ref_mel)
output = decoder(z, lf0_embs, spk_embs)
logmel = output.squeeze(0).cpu().numpy()
feat_writer[out_filename] = logmel
feat_writer[out_filename+'_src'] = mel.squeeze(0).cpu().numpy().T
feat_writer[out_filename+'_ref'] = ref_mel.squeeze(0).cpu().numpy().T
subprocess.call(['cp', src_wav_path, out_dir])
feat_writer.close()
print('synthesize waveform...')
cmd = ['parallel-wavegan-decode', '--checkpoint', \
'/vocoder/checkpoint-3000000steps.pkl', \
'--feats-scp', f'{str(out_dir)}/feats.1.scp', '--outdir', str(out_dir)]
subprocess.call(cmd)
if __name__ == "__main__":
convert()
|
76536
|
import numpy as np
from nms import nms
import cfg
from shapely.geometry import Polygon
class Averager(object):
"""Compute average for torch.Tensor, used for loss average."""
def __init__(self):
self.reset()
def add(self, v):
count = v.data.numel()
v = v.data.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
class eval_pre_rec_f1(object):
'''输入每个batch的预测结果跟图片真实矩形框,计算查准率precision/召回率recall/F1 score'''
def __init__(self):
self.pixel_threshold = float(cfg.pixel_threshold)
self.reset()
def reset(self):
self.img_num = 0
self.pre = 0
self.rec = 0
self.f1_score = 0
def val(self):
mpre = self.pre / self.img_num * 100
mrec = self.rec / self.img_num * 100
mf1_score = self.f1_score / self.img_num * 100
return mpre, mrec, mf1_score
def sigmoid(self, x):
"""`y = 1 / (1 + exp(-x))`"""
return 1 / (1 + np.exp(-x))
def get_iou(self, g, p):
g = Polygon(g)
p = Polygon(p)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter/union
def eval_one(self, quad_scores, quad_after_nms, gt_xy, quiet=cfg.quiet):
num_gts = len(gt_xy)
quad_scores_no_zero = [] # 剔除残缺quad,并储存每个quad的score
quad_after_nms_no_zero = [] # 剔除残缺quad
for score, geo in zip(quad_scores, quad_after_nms):
if np.amin(score) > 0:
quad_scores_no_zero.append(sum(score))
quad_after_nms_no_zero.append(geo)
elif not quiet:
print('quad invalid with vertex num less then 4.')
continue
num_quads = len(quad_after_nms_no_zero)
if num_quads == 0:
return 0, 0, 0
quad_flag = np.zeros(num_quads) # 记录quad是否被匹配
gt_flag = np.zeros(num_gts) # 记录gt是否被匹配
# print(num_quads, '-------', num_gts)
quad_scores_no_zero = np.array(quad_scores_no_zero)
scores_idx = np.argsort(quad_scores_no_zero)[::-1] # 记录quad_scores从大到小坐标
for i in range(num_quads):
idx = scores_idx[i]
# score = quad_scores_no_zero[idx]
geo = quad_after_nms_no_zero[idx] # 按score值从大到小依次取出对应矩形框
for j in range(num_gts):
if gt_flag[j] == 0:
gt_geo = gt_xy[j]
iou = self.get_iou(geo, gt_geo)
if iou >= cfg.iou_threshold:
gt_flag[j] = 1 # 记录被匹配的gt框
quad_flag[i] = 1 # 记录被匹配的quad框
tp = np.sum(quad_flag)
fp = num_quads - tp
fn = num_gts - tp
pre = tp / (tp + fp) # 查准率
rec = tp / (tp + fn) # 查全率
if pre + rec == 0:
f1_score = 0
else:
f1_score = 2 * pre * rec / (pre + rec)
# print(pre, '---', rec, '---', f1_score)
return pre, rec, f1_score
def add(self, out, gt_xy_list):
self.img_num += len(gt_xy_list)
ys = out.cpu().detach().numpy() # (N, 7, 64, 64)
if ys.shape[1] == 7:
ys = ys.transpose((0, 2, 3, 1)) # NCHW->NHWC
for y, gt_xy in zip(ys, gt_xy_list): # 取出每张图片的预测结果与矩形框
y[:, :, :3] = self.sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], self.pixel_threshold)
activation_pixels = np.where(cond)
quad_scores, quad_after_nms = nms(y, activation_pixels)
# nms返回的quad_scores为:[[a, a, b, b], [c, c, d, d]...]
# 每个矩形框返回四个score,四个score中头两个相同,后两个相同分别代表头部跟尾部的分数
if (len(quad_after_nms) == 0) or (sum(sum(quad_scores)) == 0):
if not cfg.quiet:
print('NMS后不存在矩形框!!')
continue
else:
pre, rec, f1_score = self.eval_one(quad_scores, quad_after_nms, gt_xy)
self.pre += pre
self.rec += rec
self.f1_score += f1_score
|
76538
|
class ListView(Control,IComponent,IDisposable,IOleControl,IOleObject,IOleInPlaceObject,IOleInPlaceActiveObject,IOleWindow,IViewObject,IViewObject2,IPersist,IPersistStreamInit,IPersistPropertyBag,IPersistStorage,IQuickActivate,ISupportOleDropSource,IDropTarget,ISynchronizeInvoke,IWin32Window,IArrangedElement,IBindableComponent):
"""
Represents a Windows list view control,which displays a collection of items that can be displayed using one of four different views.
ListView()
"""
def AccessibilityNotifyClients(self,*args):
"""
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,objectID: int,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control .
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client applications of.
objectID: The identifier of the System.Windows.Forms.AccessibleObject.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control.
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client applications of.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
"""
pass
def ArrangeIcons(self,value=None):
"""
ArrangeIcons(self: ListView)
Arranges items in the control when they are displayed as icons based on the value of the
System.Windows.Forms.ListView.Alignment property.
ArrangeIcons(self: ListView,value: ListViewAlignment)
Arranges items in the control when they are displayed as icons with a specified alignment
setting.
value: One of the System.Windows.Forms.ListViewAlignment values.
"""
pass
def AutoResizeColumn(self,columnIndex,headerAutoResize):
"""
AutoResizeColumn(self: ListView,columnIndex: int,headerAutoResize: ColumnHeaderAutoResizeStyle)
Resizes the width of the given column as indicated by the resize style.
columnIndex: The zero-based index of the column to resize.
headerAutoResize: One of the System.Windows.Forms.ColumnHeaderAutoResizeStyle values.
"""
pass
def AutoResizeColumns(self,headerAutoResize):
"""
AutoResizeColumns(self: ListView,headerAutoResize: ColumnHeaderAutoResizeStyle)
Resizes the width of the columns as indicated by the resize style.
headerAutoResize: One of the System.Windows.Forms.ColumnHeaderAutoResizeStyle values.
"""
pass
def BeginUpdate(self):
"""
BeginUpdate(self: ListView)
Prevents the control from drawing until the System.Windows.Forms.ListView.EndUpdate method is
called.
"""
pass
def Clear(self):
"""
Clear(self: ListView)
Removes all items and columns from the control.
"""
pass
def CreateAccessibilityInstance(self,*args):
"""
CreateAccessibilityInstance(self: Control) -> AccessibleObject
Creates a new accessibility object for the control.
Returns: A new System.Windows.Forms.AccessibleObject for the control.
"""
pass
def CreateControlsInstance(self,*args):
"""
CreateControlsInstance(self: Control) -> ControlCollection
Creates a new instance of the control collection for the control.
Returns: A new instance of System.Windows.Forms.Control.ControlCollection assigned to the control.
"""
pass
def CreateHandle(self,*args):
""" CreateHandle(self: ListView) """
pass
def DefWndProc(self,*args):
"""
DefWndProc(self: Control,m: Message) -> Message
Sends the specified message to the default window procedure.
m: The Windows System.Windows.Forms.Message to process.
"""
pass
def DestroyHandle(self,*args):
"""
DestroyHandle(self: Control)
Destroys the handle associated with the control.
"""
pass
def Dispose(self):
"""
Dispose(self: ListView,disposing: bool)
Releases the unmanaged resources used by the System.Windows.Forms.ListView and optionally
releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def EndUpdate(self):
"""
EndUpdate(self: ListView)
Resumes drawing of the list view control after drawing is suspended by the
System.Windows.Forms.ListView.BeginUpdate method.
"""
pass
def EnsureVisible(self,index):
"""
EnsureVisible(self: ListView,index: int)
Ensures that the specified item is visible within the control,scrolling the contents of the
control if necessary.
index: The zero-based index of the item to scroll into view.
"""
pass
def FindItemWithText(self,text,includeSubItemsInSearch=None,startIndex=None,isPrefixSearch=None):
"""
FindItemWithText(self: ListView,text: str,includeSubItemsInSearch: bool,startIndex: int,isPrefixSearch: bool) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem or
System.Windows.Forms.ListViewItem.ListViewSubItem,if indicated,that begins with the specified
text value. The search starts at the specified index.
text: The text to search for.
includeSubItemsInSearch: true to include subitems in the search; otherwise,false.
startIndex: The index of the item at which to start the search.
isPrefixSearch: true to allow partial matches; otherwise,false.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
FindItemWithText(self: ListView,text: str,includeSubItemsInSearch: bool,startIndex: int) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem or
System.Windows.Forms.ListViewItem.ListViewSubItem,if indicated,that begins with the specified
text value. The search starts at the specified index.
text: The text to search for.
includeSubItemsInSearch: true to include subitems in the search; otherwise,false.
startIndex: The index of the item at which to start the search.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
FindItemWithText(self: ListView,text: str) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem that begins with the specified text value.
text: The text to search for.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
"""
pass
def FindNearestItem(self,*__args):
"""
FindNearestItem(self: ListView,searchDirection: SearchDirectionHint,x: int,y: int) -> ListViewItem
Finds the next item from the given x- and y-coordinates,searching in the specified direction.
searchDirection: One of the System.Windows.Forms.SearchDirectionHint values.
x: The x-coordinate for the point at which to begin searching.
y: The y-coordinate for the point at which to begin searching.
Returns: The System.Windows.Forms.ListViewItem that is closest to the given coordinates,searching in the
specified direction.
FindNearestItem(self: ListView,dir: SearchDirectionHint,point: Point) -> ListViewItem
Finds the next item from the given point,searching in the specified direction
dir: One of the System.Windows.Forms.SearchDirectionHint values.
point: The point at which to begin searching.
Returns: The System.Windows.Forms.ListViewItem that is closest to the given point,searching in the
specified direction.
"""
pass
def GetAccessibilityObjectById(self,*args):
"""
GetAccessibilityObjectById(self: Control,objectId: int) -> AccessibleObject
Retrieves the specified System.Windows.Forms.AccessibleObject.
objectId: An Int32 that identifies the System.Windows.Forms.AccessibleObject to retrieve.
Returns: An System.Windows.Forms.AccessibleObject.
"""
pass
def GetAutoSizeMode(self,*args):
"""
GetAutoSizeMode(self: Control) -> AutoSizeMode
Retrieves a value indicating how a control will behave when its
System.Windows.Forms.Control.AutoSize property is enabled.
Returns: One of the System.Windows.Forms.AutoSizeMode values.
"""
pass
def GetItemAt(self,x,y):
"""
GetItemAt(self: ListView,x: int,y: int) -> ListViewItem
Retrieves the item at the specified location.
x: The x-coordinate of the location to search for an item (expressed in client coordinates).
y: The y-coordinate of the location to search for an item (expressed in client coordinates).
Returns: A System.Windows.Forms.ListViewItem that represents the item at the specified position. If there
is no item at the specified location,the method returns null.
"""
pass
def GetItemRect(self,index,portion=None):
"""
GetItemRect(self: ListView,index: int,portion: ItemBoundsPortion) -> Rectangle
Retrieves the specified portion of the bounding rectangle for a specific item within the list
view control.
index: The zero-based index of the item within the System.Windows.Forms.ListView.ListViewItemCollection
whose bounding rectangle you want to return.
portion: One of the System.Windows.Forms.ItemBoundsPortion values that represents a portion of the
System.Windows.Forms.ListViewItem for which to retrieve the bounding rectangle.
Returns: A System.Drawing.Rectangle that represents the bounding rectangle for the specified portion of
the specified System.Windows.Forms.ListViewItem.
GetItemRect(self: ListView,index: int) -> Rectangle
Retrieves the bounding rectangle for a specific item within the list view control.
index: The zero-based index of the item within the System.Windows.Forms.ListView.ListViewItemCollection
whose bounding rectangle you want to return.
Returns: A System.Drawing.Rectangle that represents the bounding rectangle of the specified
System.Windows.Forms.ListViewItem.
"""
pass
def GetScaledBounds(self,*args):
"""
GetScaledBounds(self: Control,bounds: Rectangle,factor: SizeF,specified: BoundsSpecified) -> Rectangle
Retrieves the bounds within which the control is scaled.
bounds: A System.Drawing.Rectangle that specifies the area for which to retrieve the display bounds.
factor: The height and width of the control's bounds.
specified: One of the values of System.Windows.Forms.BoundsSpecified that specifies the bounds of the
control to use when defining its size and position.
Returns: A System.Drawing.Rectangle representing the bounds within which the control is scaled.
"""
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def GetStyle(self,*args):
"""
GetStyle(self: Control,flag: ControlStyles) -> bool
Retrieves the value of the specified control style bit for the control.
flag: The System.Windows.Forms.ControlStyles bit to return the value from.
Returns: true if the specified control style bit is set to true; otherwise,false.
"""
pass
def GetTopLevel(self,*args):
"""
GetTopLevel(self: Control) -> bool
Determines if the control is a top-level control.
Returns: true if the control is a top-level control; otherwise,false.
"""
pass
def HitTest(self,*__args):
"""
HitTest(self: ListView,x: int,y: int) -> ListViewHitTestInfo
Provides item information,given x- and y-coordinates.
x: The x-coordinate at which to retrieve the item information. The coordinate is relative to the
upper-left corner of the control.
y: The y-coordinate at which to retrieve the item information. The coordinate is relative to the
upper-left corner of the control.
Returns: A System.Windows.Forms.ListViewHitTestInfo.
HitTest(self: ListView,point: Point) -> ListViewHitTestInfo
Provides item information,given a point.
point: The System.Drawing.Point at which to retrieve the item information. The coordinates are relative
to the upper-left corner of the control.
Returns: A System.Windows.Forms.ListViewHitTestInfo.
"""
pass
def InitLayout(self,*args):
"""
InitLayout(self: Control)
Called after the control has been added to another container.
"""
pass
def InvokeGotFocus(self,*args):
"""
InvokeGotFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokeLostFocus(self,*args):
"""
InvokeLostFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokeOnClick(self,*args):
"""
InvokeOnClick(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Click event to.
e: An System.EventArgs that contains the event data.
"""
pass
def InvokePaint(self,*args):
"""
InvokePaint(self: Control,c: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event for the specified control.
c: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def InvokePaintBackground(self,*args):
"""
InvokePaintBackground(self: Control,c: Control,e: PaintEventArgs)
Raises the PaintBackground event for the specified control.
c: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def IsInputChar(self,*args):
"""
IsInputChar(self: Control,charCode: Char) -> bool
Determines if a character is an input character that the control recognizes.
charCode: The character to test.
Returns: true if the character should be sent directly to the control and not preprocessed; otherwise,
false.
"""
pass
def IsInputKey(self,*args):
"""
IsInputKey(self: ListView,keyData: Keys) -> bool
keyData: One of the System.Windows.Forms.Keys values.
Returns: true if the specified key is a regular input key; otherwise,false.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def NotifyInvalidate(self,*args):
"""
NotifyInvalidate(self: Control,invalidatedArea: Rectangle)
Raises the System.Windows.Forms.Control.Invalidated event with a specified region of the control
to invalidate.
invalidatedArea: A System.Drawing.Rectangle representing the area to invalidate.
"""
pass
def OnAfterLabelEdit(self,*args):
"""
OnAfterLabelEdit(self: ListView,e: LabelEditEventArgs)
Raises the System.Windows.Forms.ListView.AfterLabelEdit event.
e: A System.Windows.Forms.LabelEditEventArgs that contains the event data.
"""
pass
def OnAutoSizeChanged(self,*args):
"""
OnAutoSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.AutoSizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackColorChanged(self,*args):
"""
OnBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundImageChanged(self,*args):
"""
OnBackgroundImageChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackgroundImageLayoutChanged(self,*args):
"""
OnBackgroundImageLayoutChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBeforeLabelEdit(self,*args):
"""
OnBeforeLabelEdit(self: ListView,e: LabelEditEventArgs)
Raises the System.Windows.Forms.ListView.BeforeLabelEdit event.
e: A System.Windows.Forms.LabelEditEventArgs that contains the event data.
"""
pass
def OnBindingContextChanged(self,*args):
"""
OnBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnCacheVirtualItems(self,*args):
"""
OnCacheVirtualItems(self: ListView,e: CacheVirtualItemsEventArgs)
Raises the System.Windows.Forms.ListView.CacheVirtualItems event.
e: A System.Windows.Forms.CacheVirtualItemsEventArgs that contains the event data.
"""
pass
def OnCausesValidationChanged(self,*args):
"""
OnCausesValidationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CausesValidationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnChangeUICues(self,*args):
"""
OnChangeUICues(self: Control,e: UICuesEventArgs)
Raises the System.Windows.Forms.Control.ChangeUICues event.
e: A System.Windows.Forms.UICuesEventArgs that contains the event data.
"""
pass
def OnClick(self,*args):
"""
OnClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnClientSizeChanged(self,*args):
"""
OnClientSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ClientSizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnColumnClick(self,*args):
"""
OnColumnClick(self: ListView,e: ColumnClickEventArgs)
Raises the System.Windows.Forms.ListView.ColumnClick event.
e: A System.Windows.Forms.ColumnClickEventArgs that contains the event data.
"""
pass
def OnColumnReordered(self,*args):
"""
OnColumnReordered(self: ListView,e: ColumnReorderedEventArgs)
Raises the System.Windows.Forms.ListView.ColumnReordered event.
e: The System.Windows.Forms.ColumnReorderedEventArgs that contains the event data.
"""
pass
def OnColumnWidthChanged(self,*args):
"""
OnColumnWidthChanged(self: ListView,e: ColumnWidthChangedEventArgs)
Raises the System.Windows.Forms.ListView.ColumnWidthChanged event.
e: A System.Windows.Forms.ColumnWidthChangedEventArgs that contains the event data.
"""
pass
def OnColumnWidthChanging(self,*args):
"""
OnColumnWidthChanging(self: ListView,e: ColumnWidthChangingEventArgs)
Raises the System.Windows.Forms.ListView.ColumnWidthChanging event.
e: A System.Windows.Forms.ColumnWidthChangingEventArgs that contains the event data.
"""
pass
def OnContextMenuChanged(self,*args):
"""
OnContextMenuChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnContextMenuStripChanged(self,*args):
"""
OnContextMenuStripChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuStripChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnControlAdded(self,*args):
"""
OnControlAdded(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlAdded event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""
pass
def OnControlRemoved(self,*args):
"""
OnControlRemoved(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlRemoved event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""
pass
def OnCreateControl(self,*args):
"""
OnCreateControl(self: Control)
Raises the System.Windows.Forms.Control.CreateControl method.
"""
pass
def OnCursorChanged(self,*args):
"""
OnCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDockChanged(self,*args):
"""
OnDockChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DockChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDoubleClick(self,*args):
"""
OnDoubleClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DoubleClick event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDpiChangedAfterParent(self,*args):
""" OnDpiChangedAfterParent(self: Control,e: EventArgs) """
pass
def OnDpiChangedBeforeParent(self,*args):
""" OnDpiChangedBeforeParent(self: Control,e: EventArgs) """
pass
def OnDragDrop(self,*args):
"""
OnDragDrop(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragDrop event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragEnter(self,*args):
"""
OnDragEnter(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragEnter event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragLeave(self,*args):
"""
OnDragLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DragLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragOver(self,*args):
"""
OnDragOver(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragOver event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDrawColumnHeader(self,*args):
"""
OnDrawColumnHeader(self: ListView,e: DrawListViewColumnHeaderEventArgs)
Raises the System.Windows.Forms.ListView.DrawColumnHeader event.
e: A System.Windows.Forms.DrawListViewColumnHeaderEventArgs that contains the event data.
"""
pass
def OnDrawItem(self,*args):
"""
OnDrawItem(self: ListView,e: DrawListViewItemEventArgs)
Raises the System.Windows.Forms.ListView.DrawItem event.
e: A System.Windows.Forms.DrawListViewItemEventArgs that contains the event data.
"""
pass
def OnDrawSubItem(self,*args):
"""
OnDrawSubItem(self: ListView,e: DrawListViewSubItemEventArgs)
Raises the System.Windows.Forms.ListView.DrawSubItem event.
e: A System.Windows.Forms.DrawListViewSubItemEventArgs that contains the event data.
"""
pass
def OnEnabledChanged(self,*args):
"""
OnEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnEnter(self,*args):
"""
OnEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Enter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnFontChanged(self,*args):
"""
OnFontChanged(self: ListView,e: EventArgs)
Raises the FontChanged event.
e: The System.EventArgs that contains the event data.
"""
pass
def OnForeColorChanged(self,*args):
"""
OnForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnGiveFeedback(self,*args):
"""
OnGiveFeedback(self: Control,gfbevent: GiveFeedbackEventArgs)
Raises the System.Windows.Forms.Control.GiveFeedback event.
gfbevent: A System.Windows.Forms.GiveFeedbackEventArgs that contains the event data.
"""
pass
def OnGotFocus(self,*args):
"""
OnGotFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnHandleCreated(self,*args):
"""
OnHandleCreated(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnHandleDestroyed(self,*args):
"""
OnHandleDestroyed(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnHelpRequested(self,*args):
"""
OnHelpRequested(self: Control,hevent: HelpEventArgs)
Raises the System.Windows.Forms.Control.HelpRequested event.
hevent: A System.Windows.Forms.HelpEventArgs that contains the event data.
"""
pass
def OnImeModeChanged(self,*args):
"""
OnImeModeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ImeModeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnInvalidated(self,*args):
"""
OnInvalidated(self: Control,e: InvalidateEventArgs)
Raises the System.Windows.Forms.Control.Invalidated event.
e: An System.Windows.Forms.InvalidateEventArgs that contains the event data.
"""
pass
def OnItemActivate(self,*args):
"""
OnItemActivate(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.ItemActivate event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnItemCheck(self,*args):
"""
OnItemCheck(self: ListView,ice: ItemCheckEventArgs)
Raises the System.Windows.Forms.ListView.ItemCheck event.
ice: An System.Windows.Forms.ItemCheckEventArgs that contains the event data.
"""
pass
def OnItemChecked(self,*args):
"""
OnItemChecked(self: ListView,e: ItemCheckedEventArgs)
Raises the System.Windows.Forms.ListView.ItemChecked event.
e: An System.Windows.Forms.ItemCheckedEventArgs that contains the event data.
"""
pass
def OnItemDrag(self,*args):
"""
OnItemDrag(self: ListView,e: ItemDragEventArgs)
Raises the System.Windows.Forms.ListView.ItemDrag event.
e: An System.Windows.Forms.ItemDragEventArgs that contains the event data.
"""
pass
def OnItemMouseHover(self,*args):
"""
OnItemMouseHover(self: ListView,e: ListViewItemMouseHoverEventArgs)
Raises the System.Windows.Forms.ListView.ItemMouseHover event.
e: A System.Windows.Forms.ListViewItemMouseHoverEventArgs that contains the event data.
"""
pass
def OnItemSelectionChanged(self,*args):
"""
OnItemSelectionChanged(self: ListView,e: ListViewItemSelectionChangedEventArgs)
Raises the System.Windows.Forms.ListView.ItemSelectionChanged event.
e: A System.Windows.Forms.ListViewItemSelectionChangedEventArgs that contains the event data.
"""
pass
def OnKeyDown(self,*args):
"""
OnKeyDown(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyDown event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnKeyPress(self,*args):
"""
OnKeyPress(self: Control,e: KeyPressEventArgs)
Raises the System.Windows.Forms.Control.KeyPress event.
e: A System.Windows.Forms.KeyPressEventArgs that contains the event data.
"""
pass
def OnKeyUp(self,*args):
"""
OnKeyUp(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyUp event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnLayout(self,*args):
"""
OnLayout(self: Control,levent: LayoutEventArgs)
Raises the System.Windows.Forms.Control.Layout event.
levent: A System.Windows.Forms.LayoutEventArgs that contains the event data.
"""
pass
def OnLeave(self,*args):
"""
OnLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Leave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLocationChanged(self,*args):
"""
OnLocationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LocationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLostFocus(self,*args):
"""
OnLostFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMarginChanged(self,*args):
"""
OnMarginChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MarginChanged event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnMouseCaptureChanged(self,*args):
"""
OnMouseCaptureChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseCaptureChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseClick(self,*args):
"""
OnMouseClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseDoubleClick(self,*args):
"""
OnMouseDoubleClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDoubleClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseDown(self,*args):
"""
OnMouseDown(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDown event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseEnter(self,*args):
"""
OnMouseEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseEnter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseHover(self,*args):
"""
OnMouseHover(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseHover event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseLeave(self,*args):
"""
OnMouseLeave(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseMove(self,*args):
"""
OnMouseMove(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseMove event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseUp(self,*args):
"""
OnMouseUp(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseUp event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseWheel(self,*args):
"""
OnMouseWheel(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseWheel event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMove(self,*args):
"""
OnMove(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Move event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnNotifyMessage(self,*args):
"""
OnNotifyMessage(self: Control,m: Message)
Notifies the control of Windows messages.
m: A System.Windows.Forms.Message that represents the Windows message.
"""
pass
def OnPaddingChanged(self,*args):
"""
OnPaddingChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.PaddingChanged event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnPaint(self,*args):
"""
OnPaint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnPaintBackground(self,*args):
"""
OnPaintBackground(self: Control,pevent: PaintEventArgs)
Paints the background of the control.
pevent: A System.Windows.Forms.PaintEventArgs that contains information about the control to paint.
"""
pass
def OnParentBackColorChanged(self,*args):
"""
OnParentBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event when the
System.Windows.Forms.Control.BackColor property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentBackgroundImageChanged(self,*args):
"""
OnParentBackgroundImageChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event when the
System.Windows.Forms.Control.BackgroundImage property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentBindingContextChanged(self,*args):
"""
OnParentBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event when the
System.Windows.Forms.Control.BindingContext property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentChanged(self,*args):
"""
OnParentChanged(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentCursorChanged(self,*args):
"""
OnParentCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentEnabledChanged(self,*args):
"""
OnParentEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event when the
System.Windows.Forms.Control.Enabled property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentFontChanged(self,*args):
"""
OnParentFontChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event when the
System.Windows.Forms.Control.Font property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentForeColorChanged(self,*args):
"""
OnParentForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event when the
System.Windows.Forms.Control.ForeColor property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentRightToLeftChanged(self,*args):
"""
OnParentRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event when the
System.Windows.Forms.Control.RightToLeft property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentVisibleChanged(self,*args):
"""
OnParentVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event when the
System.Windows.Forms.Control.Visible property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnPreviewKeyDown(self,*args):
"""
OnPreviewKeyDown(self: Control,e: PreviewKeyDownEventArgs)
Raises the System.Windows.Forms.Control.PreviewKeyDown event.
e: A System.Windows.Forms.PreviewKeyDownEventArgs that contains the event data.
"""
pass
def OnPrint(self,*args):
"""
OnPrint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnQueryContinueDrag(self,*args):
"""
OnQueryContinueDrag(self: Control,qcdevent: QueryContinueDragEventArgs)
Raises the System.Windows.Forms.Control.QueryContinueDrag event.
qcdevent: A System.Windows.Forms.QueryContinueDragEventArgs that contains the event data.
"""
pass
def OnRegionChanged(self,*args):
"""
OnRegionChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RegionChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnResize(self,*args):
"""
OnResize(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnRetrieveVirtualItem(self,*args):
"""
OnRetrieveVirtualItem(self: ListView,e: RetrieveVirtualItemEventArgs)
Raises the System.Windows.Forms.ListView.RetrieveVirtualItem event.
e: A System.Windows.Forms.RetrieveVirtualItemEventArgs that contains the event data.
"""
pass
def OnRightToLeftChanged(self,*args):
"""
OnRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnRightToLeftLayoutChanged(self,*args):
"""
OnRightToLeftLayoutChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.RightToLeftLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSearchForVirtualItem(self,*args):
"""
OnSearchForVirtualItem(self: ListView,e: SearchForVirtualItemEventArgs)
Raises the System.Windows.Forms.ListView.SearchForVirtualItem event.
e: A System.Windows.Forms.SearchForVirtualItemEventArgs that contains the event data.
"""
pass
def OnSelectedIndexChanged(self,*args):
"""
OnSelectedIndexChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.SelectedIndexChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSizeChanged(self,*args):
"""
OnSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.SizeChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnStyleChanged(self,*args):
"""
OnStyleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.StyleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSystemColorsChanged(self,*args):
"""
OnSystemColorsChanged(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""
pass
def OnTabIndexChanged(self,*args):
"""
OnTabIndexChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabIndexChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTabStopChanged(self,*args):
"""
OnTabStopChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabStopChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTextChanged(self,*args):
"""
OnTextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnValidated(self,*args):
"""
OnValidated(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Validated event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnValidating(self,*args):
"""
OnValidating(self: Control,e: CancelEventArgs)
Raises the System.Windows.Forms.Control.Validating event.
e: A System.ComponentModel.CancelEventArgs that contains the event data.
"""
pass
def OnVirtualItemsSelectionRangeChanged(self,*args):
"""
OnVirtualItemsSelectionRangeChanged(self: ListView,e: ListViewVirtualItemsSelectionRangeChangedEventArgs)
Raises the System.Windows.Forms.ListView.VirtualItemsSelectionRangeChanged event.
e: A System.Windows.Forms.ListViewVirtualItemsSelectionRangeChangedEventArgs that contains the
event data.
"""
pass
def OnVisibleChanged(self,*args):
"""
OnVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def ProcessCmdKey(self,*args):
"""
ProcessCmdKey(self: Control,msg: Message,keyData: Keys) -> (bool,Message)
Processes a command key.
msg: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the character was processed by the control; otherwise,false.
"""
pass
def ProcessDialogChar(self,*args):
"""
ProcessDialogChar(self: Control,charCode: Char) -> bool
Processes a dialog character.
charCode: The character to process.
Returns: true if the character was processed by the control; otherwise,false.
"""
pass
def ProcessDialogKey(self,*args):
"""
ProcessDialogKey(self: Control,keyData: Keys) -> bool
Processes a dialog key.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the key was processed by the control; otherwise,false.
"""
pass
def ProcessKeyEventArgs(self,*args):
"""
ProcessKeyEventArgs(self: Control,m: Message) -> (bool,Message)
Processes a key message and generates the appropriate control events.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessKeyMessage(self,*args):
"""
ProcessKeyMessage(self: Control,m: Message) -> (bool,Message)
Processes a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessKeyPreview(self,*args):
"""
ProcessKeyPreview(self: Control,m: Message) -> (bool,Message)
Previews a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""
pass
def ProcessMnemonic(self,*args):
"""
ProcessMnemonic(self: Control,charCode: Char) -> bool
Processes a mnemonic character.
charCode: The character to process.
Returns: true if the character was processed as a mnemonic by the control; otherwise,false.
"""
pass
def RaiseDragEvent(self,*args):
"""
RaiseDragEvent(self: Control,key: object,e: DragEventArgs)
Raises the appropriate drag event.
key: The event to raise.
e: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def RaiseKeyEvent(self,*args):
"""
RaiseKeyEvent(self: Control,key: object,e: KeyEventArgs)
Raises the appropriate key event.
key: The event to raise.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def RaiseMouseEvent(self,*args):
"""
RaiseMouseEvent(self: Control,key: object,e: MouseEventArgs)
Raises the appropriate mouse event.
key: The event to raise.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def RaisePaintEvent(self,*args):
"""
RaisePaintEvent(self: Control,key: object,e: PaintEventArgs)
Raises the appropriate paint event.
key: The event to raise.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def RealizeProperties(self,*args):
"""
RealizeProperties(self: ListView)
Initializes the properties of the System.Windows.Forms.ListView control that manage the
appearance of the control.
"""
pass
def RecreateHandle(self,*args):
"""
RecreateHandle(self: Control)
Forces the re-creation of the handle for the control.
"""
pass
def RedrawItems(self,startIndex,endIndex,invalidateOnly):
"""
RedrawItems(self: ListView,startIndex: int,endIndex: int,invalidateOnly: bool)
Forces a range of System.Windows.Forms.ListViewItem objects to be redrawn.
startIndex: The index for the first item in the range to be redrawn.
endIndex: The index for the last item of the range to be redrawn.
invalidateOnly: true to invalidate the range of items; false to invalidate and repaint the items.
"""
pass
def RescaleConstantsForDpi(self,*args):
""" RescaleConstantsForDpi(self: Control,deviceDpiOld: int,deviceDpiNew: int) """
pass
def ResetMouseEventArgs(self,*args):
"""
ResetMouseEventArgs(self: Control)
Resets the control to handle the System.Windows.Forms.Control.MouseLeave event.
"""
pass
def RtlTranslateAlignment(self,*args):
"""
RtlTranslateAlignment(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
RtlTranslateAlignment(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the appropriate
System.Windows.Forms.LeftRightAlignment to support right-to-left text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
RtlTranslateAlignment(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the appropriate
System.Windows.Forms.HorizontalAlignment to support right-to-left text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""
pass
def RtlTranslateContent(self,*args):
"""
RtlTranslateContent(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
"""
pass
def RtlTranslateHorizontal(self,*args):
"""
RtlTranslateHorizontal(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the appropriate
System.Windows.Forms.HorizontalAlignment to support right-to-left text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""
pass
def RtlTranslateLeftRight(self,*args):
"""
RtlTranslateLeftRight(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the appropriate
System.Windows.Forms.LeftRightAlignment to support right-to-left text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
"""
pass
def ScaleControl(self,*args):
"""
ScaleControl(self: Control,factor: SizeF,specified: BoundsSpecified)
Scales a control's location,size,padding and margin.
factor: The factor by which the height and width of the control will be scaled.
specified: A System.Windows.Forms.BoundsSpecified value that specifies the bounds of the control to use
when defining its size and position.
"""
pass
def ScaleCore(self,*args):
"""
ScaleCore(self: Control,dx: Single,dy: Single)
This method is not relevant for this class.
dx: The horizontal scaling factor.
dy: The vertical scaling factor.
"""
pass
def Select(self):
"""
Select(self: Control,directed: bool,forward: bool)
Activates a child control. Optionally specifies the direction in the tab order to select the
control from.
directed: true to specify the direction of the control to select; otherwise,false.
forward: true to move forward in the tab order; false to move backward in the tab order.
"""
pass
def SetAutoSizeMode(self,*args):
"""
SetAutoSizeMode(self: Control,mode: AutoSizeMode)
Sets a value indicating how a control will behave when its System.Windows.Forms.Control.AutoSize
property is enabled.
mode: One of the System.Windows.Forms.AutoSizeMode values.
"""
pass
def SetBoundsCore(self,*args):
"""
SetBoundsCore(self: Control,x: int,y: int,width: int,height: int,specified: BoundsSpecified)
Performs the work of setting the specified bounds of this control.
x: The new System.Windows.Forms.Control.Left property value of the control.
y: The new System.Windows.Forms.Control.Top property value of the control.
width: The new System.Windows.Forms.Control.Width property value of the control.
height: The new System.Windows.Forms.Control.Height property value of the control.
specified: A bitwise combination of the System.Windows.Forms.BoundsSpecified values.
"""
pass
def SetClientSizeCore(self,*args):
"""
SetClientSizeCore(self: Control,x: int,y: int)
Sets the size of the client area of the control.
x: The client area width,in pixels.
y: The client area height,in pixels.
"""
pass
def SetStyle(self,*args):
"""
SetStyle(self: Control,flag: ControlStyles,value: bool)
Sets a specified System.Windows.Forms.ControlStyles flag to either true or false.
flag: The System.Windows.Forms.ControlStyles bit to set.
value: true to apply the specified style to the control; otherwise,false.
"""
pass
def SetTopLevel(self,*args):
"""
SetTopLevel(self: Control,value: bool)
Sets the control as the top-level control.
value: true to set the control as the top-level control; otherwise,false.
"""
pass
def SetVisibleCore(self,*args):
"""
SetVisibleCore(self: Control,value: bool)
Sets the control to the specified visible state.
value: true to make the control visible; otherwise,false.
"""
pass
def SizeFromClientSize(self,*args):
"""
SizeFromClientSize(self: Control,clientSize: Size) -> Size
Determines the size of the entire control from the height and width of its client area.
clientSize: A System.Drawing.Size value representing the height and width of the control's client area.
Returns: A System.Drawing.Size value representing the height and width of the entire control.
"""
pass
def Sort(self):
"""
Sort(self: ListView)
Sorts the items of the list view.
"""
pass
def ToString(self):
"""
ToString(self: ListView) -> str
Returns a string representation of the System.Windows.Forms.ListView control.
Returns: A string that states the control type,the count of items in the System.Windows.Forms.ListView
control,and the type of the first item in the System.Windows.Forms.ListView,if the count is
not 0.
"""
pass
def UpdateBounds(self,*args):
"""
UpdateBounds(self: Control,x: int,y: int,width: int,height: int,clientWidth: int,clientHeight: int)
Updates the bounds of the control with the specified size,location,and client size.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
clientWidth: The client System.Drawing.Size.Width of the control.
clientHeight: The client System.Drawing.Size.Height of the control.
UpdateBounds(self: Control,x: int,y: int,width: int,height: int)
Updates the bounds of the control with the specified size and location.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
UpdateBounds(self: Control)
Updates the bounds of the control with the current size and location.
"""
pass
def UpdateExtendedStyles(self,*args):
"""
UpdateExtendedStyles(self: ListView)
Updates the extended styles applied to the list view control.
"""
pass
def UpdateStyles(self,*args):
"""
UpdateStyles(self: Control)
Forces the assigned styles to be reapplied to the control.
"""
pass
def UpdateZOrder(self,*args):
"""
UpdateZOrder(self: Control)
Updates the control in its parent's z-order.
"""
pass
def WndProc(self,*args):
"""
WndProc(self: ListView,m: Message) -> Message
Overrides System.Windows.Forms.Control.WndProc(System.Windows.Forms.Message@).
m: The Windows System.Windows.Forms.Message to process.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self,*args):
pass
Activation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the type of action the user must take to activate an item.
Get: Activation(self: ListView) -> ItemActivation
Set: Activation(self: ListView)=value
"""
Alignment=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the alignment of items in the control.
Get: Alignment(self: ListView) -> ListViewAlignment
Set: Alignment(self: ListView)=value
"""
AllowColumnReorder=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the user can drag column headers to reorder columns in the control.
Get: AllowColumnReorder(self: ListView) -> bool
Set: AllowColumnReorder(self: ListView)=value
"""
AutoArrange=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets whether icons are automatically kept arranged.
Get: AutoArrange(self: ListView) -> bool
Set: AutoArrange(self: ListView)=value
"""
BackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color.
Get: BackColor(self: ListView) -> Color
Set: BackColor(self: ListView)=value
"""
BackgroundImageLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets an System.Windows.Forms.ImageLayout value.
Get: BackgroundImageLayout(self: ListView) -> ImageLayout
Set: BackgroundImageLayout(self: ListView)=value
"""
BackgroundImageTiled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the background image of the System.Windows.Forms.ListView should be tiled.
Get: BackgroundImageTiled(self: ListView) -> bool
Set: BackgroundImageTiled(self: ListView)=value
"""
BorderStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the border style of the control.
Get: BorderStyle(self: ListView) -> BorderStyle
Set: BorderStyle(self: ListView)=value
"""
CanEnableIme=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Windows.Forms.Control.ImeMode property can be set to an active value,to enable IME support.
"""
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determines if events can be raised on the control.
"""
CheckBoxes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether a check box appears next to each item in the control.
Get: CheckBoxes(self: ListView) -> bool
Set: CheckBoxes(self: ListView)=value
"""
CheckedIndices=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the indexes of the currently checked items in the control.
Get: CheckedIndices(self: ListView) -> CheckedIndexCollection
"""
CheckedItems=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the currently checked items in the control.
Get: CheckedItems(self: ListView) -> CheckedListViewItemCollection
"""
Columns=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of all column headers that appear in the control.
Get: Columns(self: ListView) -> ColumnHeaderCollection
"""
CreateParams=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is not relevant for this class.
"""
DefaultCursor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the default cursor for the control.
"""
DefaultImeMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the default Input Method Editor (IME) mode supported by the control.
"""
DefaultMargin=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the space,in pixels,that is specified by default between controls.
"""
DefaultMaximumSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the length and height,in pixels,that is specified as the default maximum size of a control.
"""
DefaultMinimumSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the length and height,in pixels,that is specified as the default minimum size of a control.
"""
DefaultPadding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the internal spacing,in pixels,of the contents of a control.
"""
DefaultSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
DoubleBuffered=property(lambda self: object(),lambda self,v: None,lambda self: None)
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
FocusedItem=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the item in the control that currently has focus.
Get: FocusedItem(self: ListView) -> ListViewItem
Set: FocusedItem(self: ListView)=value
"""
FontHeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the height of the font of the control.
"""
ForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the foreground color.
Get: ForeColor(self: ListView) -> Color
Set: ForeColor(self: ListView)=value
"""
FullRowSelect=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether clicking an item selects all its subitems.
Get: FullRowSelect(self: ListView) -> bool
Set: FullRowSelect(self: ListView)=value
"""
GridLines=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether grid lines appear between the rows and columns containing the items and subitems in the control.
Get: GridLines(self: ListView) -> bool
Set: GridLines(self: ListView)=value
"""
Groups=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of System.Windows.Forms.ListViewGroup objects assigned to the control.
Get: Groups(self: ListView) -> ListViewGroupCollection
"""
HeaderStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the column header style.
Get: HeaderStyle(self: ListView) -> ColumnHeaderStyle
Set: HeaderStyle(self: ListView)=value
"""
HideSelection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the selected item in the control remains highlighted when the control loses focus.
Get: HideSelection(self: ListView) -> bool
Set: HideSelection(self: ListView)=value
"""
HotTracking=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the text of an item or subitem has the appearance of a hyperlink when the mouse pointer passes over it.
Get: HotTracking(self: ListView) -> bool
Set: HotTracking(self: ListView)=value
"""
HoverSelection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether an item is automatically selected when the mouse pointer remains over the item for a few seconds.
Get: HoverSelection(self: ListView) -> bool
Set: HoverSelection(self: ListView)=value
"""
ImeModeBase=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the IME mode of a control.
"""
InsertionMark=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an object used to indicate the expected drop location when an item is dragged within a System.Windows.Forms.ListView control.
Get: InsertionMark(self: ListView) -> ListViewInsertionMark
"""
Items=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a collection containing all items in the control.
Get: Items(self: ListView) -> ListViewItemCollection
"""
LabelEdit=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the user can edit the labels of items in the control.
Get: LabelEdit(self: ListView) -> bool
Set: LabelEdit(self: ListView)=value
"""
LabelWrap=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether item labels wrap when items are displayed in the control as icons.
Get: LabelWrap(self: ListView) -> bool
Set: LabelWrap(self: ListView)=value
"""
LargeImageList=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Forms.ImageList to use when displaying items as large icons in the control.
Get: LargeImageList(self: ListView) -> ImageList
Set: LargeImageList(self: ListView)=value
"""
ListViewItemSorter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the sorting comparer for the control.
Get: ListViewItemSorter(self: ListView) -> IComparer
Set: ListViewItemSorter(self: ListView)=value
"""
MultiSelect=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether multiple items can be selected.
Get: MultiSelect(self: ListView) -> bool
Set: MultiSelect(self: ListView)=value
"""
OwnerDraw=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the System.Windows.Forms.ListView control is drawn by the operating system or by code that you provide.
Get: OwnerDraw(self: ListView) -> bool
Set: OwnerDraw(self: ListView)=value
"""
Padding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the space between the System.Windows.Forms.ListView control and its contents.
Get: Padding(self: ListView) -> Padding
Set: Padding(self: ListView)=value
"""
RenderRightToLeft=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is now obsolete.
"""
ResizeRedraw=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the control redraws itself when resized.
"""
RightToLeftLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the control is laid out from right to left.
Get: RightToLeftLayout(self: ListView) -> bool
Set: RightToLeftLayout(self: ListView)=value
"""
ScaleChildren=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that determines the scaling of child controls.
"""
Scrollable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether a scroll bar is added to the control when there is not enough room to display all items.
Get: Scrollable(self: ListView) -> bool
Set: Scrollable(self: ListView)=value
"""
SelectedIndices=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the indexes of the selected items in the control.
Get: SelectedIndices(self: ListView) -> SelectedIndexCollection
"""
SelectedItems=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the items that are selected in the control.
Get: SelectedItems(self: ListView) -> SelectedListViewItemCollection
"""
ShowFocusCues=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the control should display focus rectangles.
"""
ShowGroups=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether items are displayed in groups.
Get: ShowGroups(self: ListView) -> bool
Set: ShowGroups(self: ListView)=value
"""
ShowItemToolTips=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether ToolTips are shown for the System.Windows.Forms.ListViewItem objects contained in the System.Windows.Forms.ListView.
Get: ShowItemToolTips(self: ListView) -> bool
Set: ShowItemToolTips(self: ListView)=value
"""
ShowKeyboardCues=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the user interface is in the appropriate state to show or hide keyboard accelerators.
"""
SmallImageList=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Forms.ImageList to use when displaying items as small icons in the control.
Get: SmallImageList(self: ListView) -> ImageList
Set: SmallImageList(self: ListView)=value
"""
Sorting=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the sort order for items in the control.
Get: Sorting(self: ListView) -> SortOrder
Set: Sorting(self: ListView)=value
"""
StateImageList=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Forms.ImageList associated with application-defined states in the control.
Get: StateImageList(self: ListView) -> ImageList
Set: StateImageList(self: ListView)=value
"""
Text=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is not relevant for this class.
Get: Text(self: ListView) -> str
Set: Text(self: ListView)=value
"""
TileSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the size of the tiles shown in tile view.
Get: TileSize(self: ListView) -> Size
Set: TileSize(self: ListView)=value
"""
TopItem=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the first visible item in the control.
Get: TopItem(self: ListView) -> ListViewItem
Set: TopItem(self: ListView)=value
"""
UseCompatibleStateImageBehavior=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the System.Windows.Forms.ListView uses state image behavior that is compatible with the .NET Framework 1.1 or the .NET Framework 2.0.
Get: UseCompatibleStateImageBehavior(self: ListView) -> bool
Set: UseCompatibleStateImageBehavior(self: ListView)=value
"""
View=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets how items are displayed in the control.
Get: View(self: ListView) -> View
Set: View(self: ListView)=value
"""
VirtualListSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the number of System.Windows.Forms.ListViewItem objects contained in the list when in virtual mode.
Get: VirtualListSize(self: ListView) -> int
Set: VirtualListSize(self: ListView)=value
"""
VirtualMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether you have provided your own data-management operations for the System.Windows.Forms.ListView control.
Get: VirtualMode(self: ListView) -> bool
Set: VirtualMode(self: ListView)=value
"""
AfterLabelEdit=None
BackgroundImageLayoutChanged=None
BeforeLabelEdit=None
CacheVirtualItems=None
CheckedIndexCollection=None
CheckedListViewItemCollection=None
ColumnClick=None
ColumnHeaderCollection=None
ColumnReordered=None
ColumnWidthChanged=None
ColumnWidthChanging=None
DrawColumnHeader=None
DrawItem=None
DrawSubItem=None
ItemActivate=None
ItemCheck=None
ItemChecked=None
ItemDrag=None
ItemMouseHover=None
ItemSelectionChanged=None
ListViewItemCollection=None
PaddingChanged=None
Paint=None
RetrieveVirtualItem=None
RightToLeftLayoutChanged=None
SearchForVirtualItem=None
SelectedIndexChanged=None
SelectedIndexCollection=None
SelectedListViewItemCollection=None
TextChanged=None
VirtualItemsSelectionRangeChanged=None
|
76555
|
import torch
import torch.nn as nn
from fpconv.pointnet2.pointnet2_modules import PointnetFPModule, PointnetSAModule
import fpconv.pointnet2.pytorch_utils as pt_utils
from fpconv.base import AssemRes_BaseBlock
from fpconv.fpconv import FPConv4x4_BaseBlock, FPConv6x6_BaseBlock
NPOINTS = [8192, 2048, 512, 128]
RADIUS = [0.1, 0.2, 0.4, 0.8, 1.6]
NSAMPLE = [32, 32, 32, 32, 16]
MLPS = [[64,64], [128,128], [256,256], [512,512], [1024,1024]]
FP_MLPS = [[128,128], [256,128], [512,256], [1024,512]]
CLS_FC = [128]
DP_RATIO = 0.5
def get_model(num_class, input_channels=3):
return Pointnet2SSG(num_class, input_channels)
class Pointnet2SSG(nn.Module):
def __init__(self, num_class, input_channels=3, use_xyz=False):
# input_channels: input feature channels (not include xyz)
super().__init__()
print(NPOINTS)
self.SA_modules = nn.ModuleList()
self.conv0 = AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=None,
radius=RADIUS[0],
nsample=NSAMPLE[0],
channel_list=[input_channels] + MLPS[0],
use_xyz=use_xyz)
channel_in = MLPS[0][-1]
skip_channel_list = [channel_in]
for k in range(NPOINTS.__len__()):
mlps = [MLPS[k+1].copy()]
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
print(mlps[0], RADIUS[k], RADIUS[k+1])
if k < 2:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
else:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv4x4_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(FP_MLPS.__len__()):
pre_channel = FP_MLPS[k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
mlp = [pre_channel + skip_channel_list[k]] + FP_MLPS[k]
print(mlp)
self.FP_modules.append(PointnetFPModule(mlp=mlp))
cls_layers = []
pre_channel = FP_MLPS[0][-1]
for k in range(0, CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv2d(pre_channel, CLS_FC[k], bn=True))
pre_channel = CLS_FC[k]
cls_layers.append(pt_utils.Conv2d(pre_channel, num_class, activation=None, bn=False))
cls_layers.insert(1, nn.Dropout(0.5))
self.cls_layer = nn.Sequential(*cls_layers)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
_, features = self.conv0(xyz, features)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
fn_feats = l_features[0].unsqueeze(-1) # B, C, N, 1
pred_cls = self.cls_layer(fn_feats).squeeze(-1).transpose(1, 2).contiguous() # B, N, C
return pred_cls
|
76574
|
import argparse, os
import malmoenv
from pathlib import Path
from gameai.utils.wrappers import DownsampleObs
def parse_args():
parser = argparse.ArgumentParser(description='malmoenv arguments')
parser.add_argument('--mission', type=str, default='../MalmoEnv/missions/mobchase_single_agent.xml',
help='the mission xml')
parser.add_argument('--port', type=int, default=10000, help='the mission server port')
parser.add_argument('--server', type=str, default='127.0.0.1', help='the mission server DNS or IP address')
# todo next 2 arguments can be removed
# https://github.com/elpollouk/malmo/blob/elpollouk/MultiAgentEnv/MalmoEnv/rllib_train.py
parser.add_argument('--port2', type=int, default=None,
help="(Multi-agent) role N's mission port. Defaults to server port.")
parser.add_argument('--server2', type=str, default=None, help="(Multi-agent) role N's server DNS or IP")
parser.add_argument('--episodes', type=int, default=1, help='the number of resets to perform - default is 1')
parser.add_argument('--episode', type=int, default=0, help='the start episode - default is 0')
parser.add_argument('--role', type=int, default=0, help='the agent role - defaults to 0')
parser.add_argument('--episodemaxsteps', type=int, default=0, help='max number of steps per episode')
# parser.add_argument('--saveimagesteps', type=int, default=0, help='save an image every N steps')
parser.add_argument('--resync', type=int, default=0, help='exit and re-sync every N resets'
' - default is 0 meaning never.')
parser.add_argument('--experimentUniqueId', type=str, default='test1', help="the experiment's unique id.")
args = parser.parse_args()
if args.server2 is None:
args.server2 = args.server
# Better to use absolute path for XML files in case working directory would change
args.mission = os.path.realpath(args.mission)
return args
def create_env(args):
xml = Path(args.mission).read_text()
env = malmoenv.make()
print(f"create env listening on port {args.port}")
env.init(xml, args.port,
server=args.server,
server2=args.server2, port2=args.port2,
role=args.role,
exp_uid=args.experimentUniqueId,
episode=args.episode, resync=args.resync,
reshape=True)
env.reward_range = (-float('inf'), float('inf'))
# env = DownsampleObs(env, shape=tuple((84, 84)))
# env = MultiEntrySymbolicObs(env)
return env
|
76594
|
import pytest
from tests.utils.device_mock import DeviceMock
@pytest.fixture()
def device():
dev = DeviceMock({
0x10: bytes.fromhex('59')
})
return dev
class TestEncryptionTemperature:
def test_read_battery(self, device):
assert device.battery == 89
def test_write_battery(self, device):
with pytest.raises(AttributeError):
device.battery = 50
|
76608
|
from dataclasses import dataclass
import discord
@dataclass(init=False)
class TwitchProfile:
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.login = kwargs.get("login")
self.display_name = kwargs.get("display_name")
self.acc_type = kwargs.get("acc_type")
self.broadcaster_type = kwargs.get("broadcaster_type")
self.description = kwargs.get("description")
self.profile_image_url = kwargs.get("profile_image_url")
self.offline_image_url = kwargs.get("offline_image_url")
self.view_count = kwargs.get("view_count")
@classmethod
def from_json(cls, data: dict):
data = data["data"][0]
return cls(**data)
def make_user_embed(self) -> discord.Embed:
# makes the embed for a twitch profile
em = discord.Embed(colour=int("6441A4", 16))
em.description = self.description
url = "https://twitch.tv/{}".format(self.login)
em.set_author(name=self.display_name, url=url, icon_url=self.profile_image_url)
em.set_image(url=self.offline_image_url)
em.set_thumbnail(url=self.profile_image_url)
footer_text = "{} Viewer count".format(self.view_count)
em.set_footer(text=footer_text, icon_url=self.profile_image_url)
return em
@dataclass(init=False)
class TwitchFollower:
def __init__(self, **kwargs):
self.from_id = kwargs.get("from_id")
self.to_id = kwargs.get("to_id")
self.followed_at = kwargs.get("followed_at")
@classmethod
def from_json(cls, data: dict):
return cls(**data)
|
76627
|
import pandas as pd
from pandas_datareader import data
start_date = '2014-01-01'
end_date = '2018-01-01'
SRC_DATA_FILENAME = 'goog_data.pkl'
try:
goog_data2 = pd.read_pickle(SRC_DATA_FILENAME)
except FileNotFoundError:
goog_data2 = data.DataReader('GOOG', 'yahoo', start_date, end_date)
goog_data2.to_pickle(SRC_DATA_FILENAME)
goog_data = goog_data2.tail(620)
close = goog_data['Close']
'''
Standard Deviation is a statistical calculation
used to measure the variability. In trading this value is known
as volatility. A low standard deviation indicates that the data
points tend to be very close to the mean, whereas high standard
deviation indicates that the data points are spread out over a large
range of values.
n = number of periods
Calculate the moving average.
The formula is:
d = ((P1-MA)^2 + (P2-MA)^2 + ... (Pn-MA)^2)/n
Pn is the price you pay for the nth interval
n is the number of periods you select
Take the square root of d. This gives you the standard deviation.
stddev = sqrt(d)
'''
import statistics as stats
import math as math
time_period = 20 # look back period
history = [] # history of prices
sma_values = [] # to track moving average values for visualization purposes
stddev_values = [] # history of computed stdev values
for close_price in close:
history.append(close_price)
if len(history) > time_period: # we track at most 'time_period' number of prices
del (history[0])
sma = stats.mean(history)
sma_values.append(sma)
variance = 0 # variance is square of standard deviation
for hist_price in history:
variance = variance + ((hist_price - sma) ** 2)
stdev = math.sqrt(variance / len(history))
stddev_values.append(stdev)
goog_data = goog_data.assign(ClosePrice=pd.Series(close, index=goog_data.index))
goog_data = goog_data.assign(StandardDeviationOver20Days=pd.Series(stddev_values, index=goog_data.index))
close_price = goog_data['ClosePrice']
stddev = goog_data['StandardDeviationOver20Days']
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(211, ylabel='Google price in $')
close_price.plot(ax=ax1, color='g', lw=2., legend=True)
ax2 = fig.add_subplot(212, ylabel='Stddev in $')
stddev.plot(ax=ax2, color='b', lw=2., legend=True)
ax2.axhline(y=stats.mean(stddev_values), color='k')
plt.show()
|
76641
|
import random
import pecan
from pecan import expose, response, request
_body = pecan.x_test_body
_headers = pecan.x_test_headers
class TestController:
def __init__(self, account_id):
self.account_id = account_id
@expose(content_type='text/plain')
def test(self):
user_agent = request.headers['User-Agent'] # NOQA
limit = request.params.get('limit', '10') # NOQA
response.headers.update(_headers)
return _body
class HelloController:
@expose()
def _lookup(self, account_id, *remainder):
return TestController(account_id), remainder
class RootController:
@expose(content_type='text/plain')
def index(self):
response.headers.update(_headers)
return _body
hello = HelloController()
|
76657
|
import csv
import os
def remove_if_exist(path):
if os.path.exists(path):
os.remove(path)
def load_metadata(path):
res = {}
headers = None
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
if headers is None:
headers = row
continue
item = {}
uid = row[0]
for index, token in enumerate(row):
if index != 0:
item[headers[index]] = token
res[uid] = item
return res
def load_specter_embeddings(path):
res = {}
dim = None
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
uid = row[0]
vector = row[1:]
res[uid] = vector
if dim is None:
dim = len(vector)
else:
assert dim == len(
vector), "Embedding dimension mismatch"
return res, dim
def save_index_to_uid_file(index_to_uid, index, path):
remove_if_exist(path)
with open(path, 'w') as f:
for index, uid in enumerate(index_to_uid):
f.write(f"{index} {uid}\n")
|
76674
|
import sys
import csv
from matplotlib import image as mpimg
import numpy as np
import scipy.misc
import cv2
vert_filename = sys.argv[1]
edge_filename = sys.argv[2]
img_filename = sys.argv[3]
output_img_filename = sys.argv[4]
thresh = int(sys.argv[5])
print('reading in verts...')
verts = []
with open(vert_filename, 'r') as vert_file:
reader = csv.reader(vert_file, delimiter=' ')
for row in reader:
v = [int(row[0]), int(row[1]), int(row[2])]
verts.append(v)
vert_file.close()
print('verts:', len(verts))
print('reading in edges...')
edges = []
with open(edge_filename, 'r') as edge_file:
reader = csv.reader(edge_file, delimiter=' ')
for row in reader:
e = [int(row[0]), int(row[1])]
edges.append(e)
edge_file.close()
print('edges:', len(edges))
img = mpimg.imread(img_filename)
nx, ny = img.shape
output = []
for r in range(nx):
row = []
for c in range(ny):
row.append(0)
output.append(row)
output = np.asarray(output)
print('building adjacency')
adj = []
for i in range(len(verts)):
adj.append([])
for e in edges:
#print(e)
v0 = e[0]
v1 = e[1]
adj[v0].append(v1)
adj[v1].append(v0)
maxs = 0
print('building max')
for i in range(len(verts)):
#print(i, adj[i])
v = verts[i]
f_v = img[v[0], v[1]]
local_max = True
for j in adj[i]:
u = verts[j]
f_u = img[u[0], u[1]]
if f_u > f_v:
local_max = False
break
if local_max and v[2] > thresh:
maxs += 1
# print('max at:', i, 'verts: (',v[1], v[0],') val:', f_v)
output[v[0], v[1]] = 255
print('maxs:', maxs)
cv2.imwrite(output_img_filename, output)
#scipy.misc.imsave(output_img_filename, output)
|
76687
|
import torch
from hypothesis import given
from hypothesis import strategies as st
from subset_samplers import ConstructiveRandomSampler
from subset_samplers import ExhaustiveSubsetSampler
from subset_samplers import ProportionalConstructiveRandomSampler
from subset_samplers import RandomProportionSubsetSampler
from subset_samplers import RandomSubsetSampler
from subset_samplers import RandomSubsetSamplerWithoutRepeats
from tensor_ops import compute_subset_relations
def assert_no_repeats(collection_of_collections):
assert len(set(map(frozenset, collection_of_collections))) == len(
collection_of_collections
)
class BaseSubsetSamplerTests:
@given(st.data())
def test_sampler_generates_ordered_idx(self, data):
sampler = self.make_sampler(data)
n_video_frames, n_frames = self.draw_sampling_parameters(data)
sample_idxs = sampler.sample(n_video_frames, n_frames)
for sample_idx in sample_idxs:
assert sorted(sample_idx) == list(sample_idx)
@given(st.data())
def test_sampling_0_elements(self, data):
sampler = self.make_sampler(data)
sample = sampler.sample(12, 0)
assert sample.shape == (1, 0)
assert sample.dtype == torch.long
def draw_sampling_parameters(self, data):
n_video_frames = data.draw(st.integers(min_value=1, max_value=12))
n_frames = data.draw(st.integers(min_value=1, max_value=n_video_frames))
return n_video_frames, n_frames
def make_sampler(self, data):
raise NotImplementedError()
class TestExhaustiveSubsetSampler(BaseSubsetSamplerTests):
def make_sampler(self, data):
return ExhaustiveSubsetSampler()
class TestRandomSubsetSampler(BaseSubsetSamplerTests):
def make_sampler(self, data):
return RandomSubsetSampler(
max_samples=20, exclude_repeats=data.draw(st.booleans())
)
@given(st.data())
def test_no_repeats_when_exclude_repeats_is_set(self, data):
sampler = RandomSubsetSampler(max_samples=20, exclude_repeats=True)
n_video_frames, n_frames = self.draw_sampling_parameters(data)
sample_idxs = sampler.sample(n_video_frames, n_frames)
assert_no_repeats(sample_idxs)
class TestRandomProportionSubsetSampler(BaseSubsetSamplerTests):
def make_sampler(self, data):
return RandomProportionSubsetSampler(
p=data.draw(st.floats(min_value=1e-5, max_value=1)),
min_samples=1,
exclude_repeats=data.draw(st.booleans()),
)
class TestRandomSubsetSamplerWithoutRepeats(BaseSubsetSamplerTests):
def make_sampler(self, data):
return RandomSubsetSamplerWithoutRepeats(
max_samples=data.draw(st.integers(min_value=1, max_value=20))
)
class BaseConstructiveSamplerTests:
def _make_sampler(self):
return ConstructiveRandomSampler(max_samples=20)
def test_sampling_0_elements(self):
sampler = self._make_sampler()
sample = sampler.sample(12, 0)
print(sample)
assert sample.shape == (1, 0)
def test_sampler_generates_ordered_idx(self):
sampler = self._make_sampler()
samples = []
n_frames = 12
for scale in range(1, n_frames + 1):
samples.extend(sampler.sample(n_video_frames=12, n_frames=scale))
for sample in samples:
assert sorted(sample) == list(sample)
def test_sampler_builds_on_previous_subsets(self):
sampler = self._make_sampler()
samples = self.sample_all_scales(sampler)
for previous_samples, current_samples in zip(samples, samples[1:]):
subset_relations = compute_subset_relations(
current_samples, previous_samples
)
assert subset_relations.any(-1).all()
def test_sampler_does_not_have_repeats(self):
sampler = self._make_sampler()
samples = self.sample_all_scales(sampler)
for scale_samples in samples:
assert_no_repeats(scale_samples)
def sample_all_scales(self, sampler):
samples = []
max_set_size = 12
for sample_size in range(1, max_set_size + 1):
samples.append(sampler.sample(max_set_size, sample_size))
return samples
class TestConstructiveRandomSampler(BaseConstructiveSamplerTests):
def _make_sampler(self):
return ConstructiveRandomSampler(max_samples=20)
class TestProportionalConstructiveRandomSampler(BaseConstructiveSamplerTests):
def _make_sampler(self):
return ProportionalConstructiveRandomSampler(p=0.1)
|
76702
|
import tensorflow as tf
import numpy as np
def batches(l, n):
"""Yield successive n-sized batches from l, the last batch is the left indexes."""
for i in range(0, l, n):
yield range(i,min(l,i+n))
class Deep_Autoencoder(object):
def __init__(self, sess, input_dim_list=[7,64,64,7],transfer_function=tf.nn.relu,learning_rate=0.001):
"""input_dim_list must include the original data dimension"""
#assert len(input_dim_list) < 2
#raise ValueError(
# "Do you need more one layer!")
self.W_list = []
self.encoding_b_list = []
self.decoding_b_list = []
self.dim_list = input_dim_list
self.transfer = transfer_function
self.learning_rate=0.001
## Encoders parameters
for i in range(len(input_dim_list)-1):
init_max_value = 4*np.sqrt(6. / (self.dim_list[i] + self.dim_list[i+1]))
self.W_list.append(tf.Variable(tf.random_uniform([self.dim_list[i],self.dim_list[i+1]],
np.negative(init_max_value),init_max_value)))
self.encoding_b_list.append(tf.Variable(tf.random_uniform([self.dim_list[i+1]],-0.1,0.1)))
## Decoders parameters
for i in range(len(input_dim_list)-2,-1,-1):
self.decoding_b_list.append(tf.Variable(tf.random_uniform([self.dim_list[i]],-0.1,0.1)))
## Placeholder for input
self.input_x = tf.placeholder(tf.float32,[None,self.dim_list[0]])
## coding graph :
last_layer = self.input_x
for weight,bias in zip(self.W_list,self.encoding_b_list):
hidden = self.transfer(tf.matmul(last_layer,weight) + bias)
last_layer = hidden
self.hidden = hidden
## decode graph:
for weight,bias in zip(reversed(self.W_list),self.decoding_b_list):
hidden = self.transfer(tf.matmul(last_layer,tf.transpose(weight)) + bias)
last_layer = hidden
self.recon = last_layer
#self.cost = tf.reduce_mean(tf.square(self.input_x - self.recon))
self.cost =0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.recon, self.input_x), 2.0))
self.train_step = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
sess.run(tf.global_variables_initializer())
def fit(self, X, sess,iteration=100, batch_size=12, init=False,verbose=False):
assert X.shape[1] == self.dim_list[0]
if init:
sess.run(tf.global_variables_initializer())
sample_size = X.shape[0]
for i in range(iteration):
for one_batch in batches(sample_size, batch_size):
e,op=sess.run((self.cost,self.train_step),feed_dict = {self.input_x:X[one_batch]})
if verbose and i%20==0:
#e = self.cost.eval(session = sess,feed_dict = {self.input_x: X[one_batch]})
print(" iteration :", i ,", cost:", e)
def transform(self, X, sess):
return self.hidden.eval(session = sess, feed_dict={self.input_x: X})
def getRecon(self, X, sess):
return self.recon.eval(session = sess,feed_dict={self.input_x: X})
|
76710
|
from .component import Component
class Location(Component):
NAME = "location"
def __init__(self):
super().__init__()
# TODO THIS HOLDS ALL THE INFORMATION NEEDED TO LOCATE SOMETHING
self.local_x = 0
self.local_y = 0
self.global_x = 0
self.global_y = 0
self.area = None
self.level = None
def get_local_coords(self):
return self.local_x, self.local_y
|
76740
|
from time import time
import os
from random import gauss
import sys
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation
from Bio.SeqIO import SeqRecord
from Bio import SeqIO, Seq
from advntr.sam_utils import get_id_of_reads_mapped_to_vntr_in_bamfile, make_bam_and_index
from advntr.models import load_unique_vntrs_data
from advntr import settings
hg38 = True
PREFIX_DIR = '/mnt/hg38_dnn/' if hg38 else '/mnt/'
OUTPUT_DIR_PREFIX = '../advntr2_recruitment_comparison_hg38/' if hg38 else '../advntr2_recruitment_comparison/'
read_length = 150
kmer_length = 6
if __name__ == '__main__':
if len(sys.argv) > 1:
kmer_length = int(sys.argv[1])
# input_dim = 4 ** kmer_length * position_partition
input_dim = 4 ** kmer_length
# input_dim = read_length * 4
reduced_dimensions = 150 * (2 * kmer_length - 6)
losses = ['binary_crossentropy', 'mean_squared_error', 'mean_absolute_error', 'mean_squared_logarithmic_error', 'hinge', 'squared_hinge']
loss_to_activatio = {'binary_crossentropy': 'sigmoid',
'mean_squared_error': 'linear',
'mean_absolute_error': 'linear',
'mean_squared_logarithmic_error': 'linear',
'hinge': 'tanh',
'squared_hinge': 'tanh'}
loss_index = 0
if __name__ == '__main__':
if len(sys.argv) > 4:
loss_index = int(sys.argv[4])
loss_function = losses[loss_index]
loss_suffix = '_%s' % loss_index if loss_index > 0 else ''
result_dir = OUTPUT_DIR_PREFIX + 'hmm_dnn_comparison_%s/' % (str(kmer_length) + loss_suffix)
bowtie_result_dir = OUTPUT_DIR_PREFIX + 'hmm_dnn_comparison_bowtie/'
bowtie_working_dir = PREFIX_DIR + '/bowtie_recruitment/'
dnn_models_dir = PREFIX_DIR + '/dnn_models_%s/' % (str(kmer_length)) + loss_suffix
def align_with_bowtie(fq_file):
bowtie_alignment = fq_file[:-3] + '_bowtie_aln.sam'
if not os.path.exists(bowtie_alignment[:-4] + '.bam'):
os.system('bowtie2 -x /mnt/hg19_chromosomes/hg19_bt2_idx -f %s -S %s --threads 7' % (fq_file, bowtie_alignment))
make_bam_and_index(bowtie_alignment)
return bowtie_alignment[:-4] + '.bam'
def get_embedding_of_string(sequence, kmer_length=6):
input_dim = 4 ** kmer_length
sequence = sequence.upper()
num = 0
mapping = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
for c in 'ASDFGHJKLPOIUYTREWQZXCVBNM':
if c not in mapping.keys():
mapping[c] = 0
for i in range(len(sequence[:kmer_length])):
num += mapping[sequence[i]] * (4 ** (kmer_length - i - 1))
result = [0] * input_dim
result[num] = 1
# result = set()
# result.add(num)
highest_position = 4 ** (kmer_length-1)
for i in range(kmer_length, len(sequence)):
num -= highest_position * mapping[sequence[i-kmer_length]]
num *= 4
num += mapping[sequence[i]]
result[num] = 1
# result.add(num)
return result
def get_google_embedding_of_string(sequence):
sequence = sequence.upper()
result = [0] * input_dim
mapping = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
for i, c in enumerate(sequence):
if c not in mapping.keys():
mapping[c] = 0
result[i * 4 + mapping[c]] = 1
return result
def make_random_unit_vector(dims):
vec = [gauss(0, 1) for i in range(dims)]
mag = sum(x ** 2 for x in vec) ** .5
return [x / mag for x in vec]
def get_random_vector_set(seed=10):
import random
random.seed(seed)
random_vectors = []
for _ in range(reduced_dimensions):
random_vectors.append(make_random_unit_vector(input_dim))
return random_vectors
def get_hashed_embedding_of_string(sequence, random_vectors):
original_embedding = get_embedding_of_string(sequence)
hashed_embedding = []
for i, random_vector in enumerate(random_vectors):
hashed_embedding.append(0)
for position in list(original_embedding):
hashed_embedding[i] += random_vector[position]
# hashed_embedding = np.array(random_vectors).dot(np.array(original_embedding))
return hashed_embedding
def generate_d_neighborhood(pattern, d):
neigh = set([pattern])
for i in range(d):
addition = set([])
for p in neigh:
for j in range(len(p)):
insertion = [p[j] + inserted for inserted in 'ACGT']
for sub in [''] + ['A', 'C', 'G', 'T'] + insertion:
new_str = p[:j] + sub + p[j+1:]
addition.add(new_str)
neigh |= addition
return neigh
def get_blast_keywords(reference_vntr, keyword_size=11):
vntr = ''.join(reference_vntr.get_repeat_segments())
if len(vntr) < keyword_size:
min_copies = int(keyword_size / len(vntr)) + 1
vntr = str(vntr) * min_copies
locus = reference_vntr.left_flanking_region[-15:] + vntr + reference_vntr.right_flanking_region[:15]
queries = []
step_size = 5 if len(reference_vntr.pattern) != 5 else 6
for i in range(0, len(locus) - keyword_size + 1, step_size):
queries.append(locus[i:i+keyword_size])
return queries
def get_hmm_accuracy(vntr_finder, simulated_true_reads, simulated_false_filtered_reads):
output_dir = result_dir + '/%s/' % vntr_finder.reference_vntr.id
print('running BLAST')
from blast_wrapper import get_blast_matched_ids, make_blast_database
blast_dir = output_dir + 'blast_dir/'
if not os.path.exists(blast_dir):
os.makedirs(blast_dir)
vntr_id = vntr_finder.reference_vntr.id
fasta_file = blast_dir + 'reads.fasta'
records = []
for i, read in enumerate(simulated_false_filtered_reads):
records.append(SeqRecord(seq=Seq.Seq(read), id='fasle_%s' % i))
for i, read in enumerate(simulated_true_reads):
records.append(SeqRecord(seq=Seq.Seq(read), id='true_%s' % i))
with open(fasta_file, 'w') as output_handle:
SeqIO.write(records, output_handle, 'fasta')
make_blast_database(fasta_file, blast_dir + 'blast_db_%s' % vntr_id)
query = '@'.join(get_blast_keywords(vntr_finder.reference_vntr))
search_id = 'search_id'
search_results = get_blast_matched_ids(query, blast_dir + 'blast_db_%s' % vntr_id, max_seq='100000', word_size='7',
evalue=sys.maxsize, search_id=search_id, identity_cutoff='100', blast_tmp_dir=blast_dir)
from collections import Counter
res = Counter(search_results)
filtered = [item for item, occur in res.items() if occur >= 2]
print('BLAST results computed')
print(len(filtered))
print(len(simulated_true_reads))
print(len(simulated_false_filtered_reads))
tp = float(len([e for e in filtered if e.startswith('true')]))
fp = float(len([e for e in filtered if e.startswith('false')]))
fn = float(len(simulated_true_reads) - tp)
tn = float(len(simulated_false_filtered_reads) - fp)
train_time = 0
passed_time = 0
precision = tp / (tp + fp) if tp > 0 else 0
recall = tp / (tp + fn)
accuracy = (100 * (tp + tn) / (fp + fn + tp + tn))
print('BLAST:')
print(tp, fp, fn, tn)
print('Precision:', precision)
print('Recall:', recall)
print('acc: %s' % accuracy)
with open(output_dir + '/blast.txt', 'w') as outfile:
outfile.write('%s\n' % train_time)
outfile.write('%s\n' % passed_time)
outfile.write('%s\n' % precision)
outfile.write('%s\n' % recall)
outfile.write('%s\n' % accuracy)
outfile.write('%s,%s,%s,%s\n' % (tp, fn, fp, tn))
return passed_time
output_dir = result_dir + '/%s/' % vntr_finder.reference_vntr.id
if os.path.exists(output_dir + '/hmm.txt') and os.path.getsize(output_dir + '/hmm.txt') > 0:
if sum(1 for _ in open(output_dir + 'hmm.txt')) > 5:
print('HMM info is already calculated')
with open(output_dir + 'hmm.txt') as infile:
lines = infile.readlines()
return float(lines[1])
train_true_reads = [read for i, read in enumerate(simulated_true_reads) if i % 2 == 0]
train_false_reads = [read for i, read in enumerate(simulated_false_filtered_reads) if i % 2 == 0]
test_true_reads = [read for i, read in enumerate(simulated_true_reads) if i % 2 == 1]
test_false_reads = [read for i, read in enumerate(simulated_false_filtered_reads) if i % 2 == 1]
start_time = time()
hmm = vntr_finder.get_vntr_matcher_hmm(read_length=read_length)
processed_true_reads = vntr_finder.find_hmm_score_of_simulated_reads(hmm, train_true_reads)
processed_false_reads = vntr_finder.find_hmm_score_of_simulated_reads(hmm, train_false_reads)
recruitment_score = vntr_finder.find_recruitment_score_threshold(processed_true_reads, processed_false_reads)
train_time = time() - start_time
print('HMM train time: %s' % train_time)
tp = 0.0
fn = 0.0
tn = 0.0
fp = 0.0
start_time = time()
true_reads = vntr_finder.find_hmm_score_of_simulated_reads(hmm, test_true_reads)
false_reads = vntr_finder.find_hmm_score_of_simulated_reads(hmm, test_false_reads)
passed_time = time() - start_time
for read in true_reads:
if read.logp > recruitment_score:
tp += 1
else:
fn += 1
for read in false_reads:
if read.logp > recruitment_score:
fp += 1
else:
tn += 1
precision = tp / (tp + fp) if tp > 0 else 0
recall = tp / (tp + fn)
accuracy = (100 * (tp + tn) / (fp + fn + tp + tn))
print('HMM: %s' % passed_time)
print(tp, fp, fn, tn)
print('Precision:', precision)
print('Recall:', recall)
print('acc: %s' % accuracy)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_dir + '/hmm.txt', 'w') as outfile:
outfile.write('%s\n' % train_time)
outfile.write('%s\n' % passed_time)
outfile.write('%s\n' % precision)
outfile.write('%s\n' % recall)
outfile.write('%s\n' % accuracy)
outfile.write('%s,%s,%s,%s\n' % (tp, fn, fp, tn))
return passed_time
def simulate_and_store_false_reads(vntr_finder, false_reads_file, min_matches=4):
simulated_false_filtered_reads = []
reference_files = []
for chromosome in settings.CHROMOSOMES:
reference_file = settings.HG19_DIR + chromosome + '.fa'
reference_files.append(reference_file)
if hg38:
reference_files = ['/mnt/hg38_chromosomes/hg38.fa']
for reference_file in reference_files:
simulated_false_filtered_reads += vntr_finder.simulate_false_filtered_reads(reference_file, min_matches)
print(len(simulated_false_filtered_reads))
if len(simulated_false_filtered_reads) > 41000:
break
with open(false_reads_file, 'w') as outfile:
for read in simulated_false_filtered_reads:
outfile.write('%s\n' % read)
def get_true_reads_and_false_reads(vntr_finder, vntr_id):
simulated_true_reads = vntr_finder.simulate_true_reads(read_length)
print('true reads: %s' % len(simulated_true_reads))
false_reads_file = PREFIX_DIR + '/false_reads/false_reads_%s.txt' % vntr_id
if not os.path.exists(false_reads_file) or os.path.getsize(false_reads_file) == 0:
if os.path.exists(false_reads_file) and os.path.getsize(false_reads_file) == 0:
print('There is no false read in the file')
no_false_read = True
else:
no_false_read = False
min_matches = 1 if no_false_read else 4
simulate_and_store_false_reads(vntr_finder, false_reads_file, min_matches)
min_matches = 6
while True:
with open(false_reads_file) as infile:
lines = infile.readlines()
if len(lines) > 40000:
print('There are more than %s reads in the file. Trying min_matches = %s' % (len(lines), min_matches))
simulate_and_store_false_reads(vntr_finder, false_reads_file, min_matches)
min_matches += 2
else:
break
simulated_false_filtered_reads = [read.strip() for read in lines if 'N' not in read.upper()]
print('false reads: %s' % len(simulated_false_filtered_reads))
print('true reads: %s' % len(simulated_true_reads))
return simulated_true_reads, simulated_false_filtered_reads
def get_nn_model(train, three_hidden_layers=False, model_function='relu', first_layer=100, second_layer=0):
model = Sequential()
model.add(Dense(first_layer, input_dim=input_dim, kernel_initializer="uniform", activation=model_function))
# if three_hidden_layers:
if second_layer > 0:
model.add(Dense(second_layer, activation=model_function, kernel_initializer="uniform"))
model.add(Dense(2))
model.add(Activation("softmax"))
model.compile(loss=loss_function, optimizer='adam', metrics=['accuracy'])
model.fit(train[0], train[1], epochs=3, batch_size=10)
return model
def is_true(result_class):
return result_class[0] > result_class[1]
def select_positive_and_negative_reads_with_bowtie(reads, vntr_finder, label):
working_dir = bowtie_working_dir + '/%s/' % vntr_finder.reference_vntr.id
if not os.path.exists(working_dir):
os.makedirs(working_dir)
fq_file = working_dir + label + '.fa'
records = []
for i, read in enumerate(reads):
record = SeqRecord('')
record.seq = Seq.Seq(read)
record.id = 'read_%s/1' % str(i)
records.append(record)
with open(fq_file, 'w') as output_handle:
SeqIO.write(records, output_handle, 'fasta')
passed_time = time()
bowtie_bamfile = align_with_bowtie(fq_file)
bowtie_selected = len(get_id_of_reads_mapped_to_vntr_in_bamfile(bowtie_bamfile, vntr_finder.reference_vntr))
return float(bowtie_selected), float(len(reads) - bowtie_selected), time() - passed_time
def run_bowtie2(true_reads, false_reads, vntr_finder):
output_dir = bowtie_result_dir + '%s/' % vntr_finder.reference_vntr.id
if os.path.exists(output_dir + '/bowtie.txt') and os.path.getsize(output_dir + '/bowtie.txt') > 0:
if sum(1 for _ in open(output_dir + 'bowtie.txt')) > 5:
print('bowtie results is already computed')
return
train_time = 0
tp, fn, t1 = select_positive_and_negative_reads_with_bowtie(true_reads, vntr_finder, 'true')
fp, tn, t2 = select_positive_and_negative_reads_with_bowtie(false_reads, vntr_finder, 'false')
passed_time = t1 + t2
precision = tp / (tp + fp) if tp > 0 else 0
recall = tp / (tp + fn)
accuracy = float(tp + tn) / (tp + tn + fp + fn)
print('Bowtie2: %s' % passed_time)
print('Precision:', precision)
print('Recall:', recall)
print('acc: %s' % accuracy)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_dir + '/bowtie.txt', 'w') as outfile:
outfile.write('%s\n' % train_time)
outfile.write('%s\n' % passed_time)
outfile.write('%s\n' % precision)
outfile.write('%s\n' % recall)
outfile.write('%s\n' % accuracy)
outfile.write('%s,%s,%s,%s\n' % (tp, fn, fp, tn))
def run_simulation(vntr_map, vntr_id):
print('vntr:', vntr_id)
ref_vntr = vntr_map[vntr_id]
from advntr.vntr_finder import VNTRFinder
vntr_finder = VNTRFinder(ref_vntr)
simulated_true_reads, simulated_false_filtered_reads = get_true_reads_and_false_reads(vntr_finder, vntr_id)
if len(simulated_false_filtered_reads) > 30000 or len(simulated_false_filtered_reads) == 0:
print('skipping VNTR', vntr_id)
return
# map with bowtie2
# run_bowtie2(simulated_true_reads, simulated_false_filtered_reads, vntr_finder)
# hmm_time = get_hmm_accuracy(vntr_finder, simulated_true_reads, simulated_false_filtered_reads)
if not os.path.exists(dnn_models_dir):
os.makedirs(dnn_models_dir)
output_dir = result_dir + '/%s/' % vntr_finder.reference_vntr.id
model_dir = dnn_models_dir + '%s.hd5' % vntr_finder.reference_vntr.id
if os.path.exists(output_dir + 'dnn.txt') and os.path.getsize(output_dir + 'dnn.txt') > 0:
if sum(1 for _ in open(output_dir + 'dnn.txt')) > 5:
print('dnn information is already calculated')
return
true_embeddings = [get_embedding_of_string(seq) for seq in simulated_true_reads]
false_embeddings = [get_embedding_of_string(seq) for seq in simulated_false_filtered_reads]
train_x = [embedding for i, embedding in enumerate(true_embeddings) if i % 2 == 0]
start_time = time()
test_x = [embedding for i, embedding in enumerate(true_embeddings) if i % 2 == 1]
embedding_time = time() - start_time
train_y = [[1, 0]] * len(train_x)
test_y = [[1, 0]] * len(test_x)
train_x += [embedding for i, embedding in enumerate(false_embeddings) if i % 2 == 0]
start_time = time()
test_x += [embedding for i, embedding in enumerate(false_embeddings) if i % 2 == 1]
embedding_time += time() - start_time
train_y += [[0, 1]] * (len(train_x) - len(train_y))
test_y += [[0, 1]] * (len(test_x) - len(test_y))
train = [np.array(train_x), np.array(train_y)]
test = [np.array(test_x), np.array(test_y)]
first_layer = 100
second_layer = 50
start_time = time()
if os.path.exists(model_dir):
print('DNN model is already trained')
model = load_model(model_dir)
else:
model = get_nn_model(train, first_layer=first_layer, second_layer=second_layer)
model.save(model_dir)
train_time = time() - start_time
print('NN train time: %s' % train_time)
scores = model.evaluate(test[0], test[1])
start_time = time()
classes = model.predict(test[0], batch_size=128)
passed_time = embedding_time + time() - start_time
passed_time += hmm_time / len(test[0]) * len(true_embeddings) / 2
fn = 0.0
fp = 0.0
tp = 0.0
tn = 0.0
for i in range(len(test[1])):
majority = int(is_true(classes[i]))# + int(is_true(classes2[i])) + int(is_true(classes3[i]))
# print(majority)
if test[1][i][0] == 1:
if majority >= 1:
tp += 1
else:
fn += 1
else:
if majority >= 1:#is_true(classes[i]):
fp += 1
else:
tn += 1
precision = tp / (tp + fp) if tp > 0 else 0
recall = tp / (tp + fn)
accuracy = scores[1]*100
print('NN: %s' % passed_time)
print(tp, fp, fn, tn)
print('Precision:', precision)
print('Recall:', recall)
print("\n%s: %.2f%%" % (model.metrics_names[1], accuracy))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_dir + '/dnn.txt', 'w') as outfile:
outfile.write('%s\n' % train_time)
outfile.write('%s\n' % passed_time)
outfile.write('%s\n' % precision)
outfile.write('%s\n' % recall)
outfile.write('%s\n' % accuracy)
outfile.write('%s,%s,%s,%s\n' % (tp, fn, fp, tn))
def main():
vntr_map = {}
if hg38:
reference_vntrs = load_unique_vntrs_data('vntr_data/hg38_selected_VNTRs_Illumina.db')
vntr_ids = []
for ref_vntr in reference_vntrs:
vntr_map[ref_vntr.id] = ref_vntr
if 100 >= len(ref_vntr.pattern) >= 6:
vntr_ids.append(ref_vntr.id)
else:
reference_vntrs = load_unique_vntrs_data()
for ref_vntr in reference_vntrs:
vntr_map[ref_vntr.id] = ref_vntr
from advntr.advntr_commands import get_tested_vntrs
vntr_ids = get_tested_vntrs()
print('len of reference_vntrs:', len(reference_vntrs))
print('# of vntrs: %s' % len(vntr_ids))
start, end = int(sys.argv[2]), int(sys.argv[3])
# run_simulation(vntr_map, 503431)
# exit(0)
count = 0
for vid in vntr_ids:
count += 1
if count < start or count > end:
continue
run_simulation(vntr_map, vid)
# best_f, best_s, best_acc = None, None, 0
# with open('param_training2.txt', 'w') as output:
# for f in accuracy_map.keys():
# for s in accuracy_map[f].keys():
# avg_accuracy = sum(accuracy_map[f][s]) / len(accuracy_map[f][s])
# output.write('%s %s %s\n' % (f, s, avg_accuracy))
# if avg_accuracy > best_acc:
# best_acc = avg_accuracy
# best_f = f
# best_s = s
# print(best_f, best_s, best_acc)
if __name__ == '__main__':
main()
|
76752
|
from django.conf import settings
def get_user_model_name():
"""
Returns the app_label.object_name string for the user model.
"""
return getattr(settings, "AUTH_USER_MODEL", "auth.User")
|
76764
|
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODEL_PATH = os.path.join(PROJECT_ROOT, 'models')
DATA_PATH = os.path.join(PROJECT_ROOT, 'data')
|
76767
|
import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBCommon_cfi import *
PoolDBESSourcebtagMuJetsWpNoTtbar = cms.ESSource("PoolDBESSource",
CondDBCommon,
toGet = cms.VPSet(
#
# working points
#
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVL_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVL_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVL_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVL_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVM_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVM_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVM_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVM_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVT_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVT_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1L_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1L_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1L_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1L_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1M_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1M_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1M_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1M_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1T_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1T_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1T_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1T_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1L_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1L_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1L_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1L_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1M_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1M_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1M_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1M_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1T_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1T_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1T_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1T_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPL_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPL_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPL_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPL_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPM_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPM_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPM_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPM_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPT_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARJPT_WP')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARTCHPT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARTCHPT_T')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARTCHPT_v10_offline'),
label = cms.untracked.string('MUJETSWPBTAGNOTTBARTCHPT_WP')
),
))
PoolDBESSourcebtagMuJetsWpNoTtbar.connect = 'frontier://FrontierProd/CMS_COND_PAT_000'
|
76778
|
from simple_NER.utils.log import LOG
from simple_NER.rules import RuleNER
from simple_NER import Entity
from os.path import expanduser, isdir, join
from os import makedirs
try:
from padatious import IntentContainer
except ImportError:
LOG.error("padatious not found, run")
LOG.error("pip install fann2==1.0.7")
LOG.error("pip install padatious>=0.4.5")
raise
class NeuralNER(RuleNER):
def __init__(self):
# TODO XDG
cache = expanduser("~/.simple_NER")
if not isdir(cache):
makedirs(cache)
self._container = IntentContainer(join(cache, "rule_cache"))
self._rules = {}
self._examples = {}
def extract_entities(self, text, as_json=False):
for rule in self._container.calc_intents(text):
for e in rule.matches:
if as_json:
yield Entity(rule.matches[e], entity_type=e,
source_text=text, confidence=rule.conf,
rules=self._rules[rule.name]).as_json()
else:
yield Entity(rule.matches[e], entity_type=e,
source_text=text, confidence=rule.conf,
rules=self._rules[rule.name])
if __name__ == "__main__":
from pprint import pprint
n = NeuralNER()
n.add_rule("name", "my name is {Person}")
for ent in n.extract_entities("the name is jarbas"):
print("TEXT:", ent.source_text)
print("ENTITY TYPE: ", ent.entity_type, "ENTITY_VALUE: ", ent.value)
print("RULES:", ent.rules)
for ent in n.extract_entities("my name is chatterbox", as_json=True):
pprint(ent)
|
76852
|
from dataclasses import dataclass
from dbt.adapters.sqlserver import (SQLServerConnectionManager,
SQLServerCredentials)
@dataclass
class SynapseCredentials(SQLServerCredentials):
@property
def type(self):
return "synapse"
class SynapseConnectionManager(SQLServerConnectionManager):
TYPE = "synapse"
TOKEN = None
|
76863
|
import pytest
from webbpsf import wfirst
from numpy import allclose
def test_WFI_psf():
"""
Just test that instantiating WFI works and can compute a PSF without raising
any exceptions
"""
wi = wfirst.WFI()
wi.calc_psf(fov_pixels=4)
def test_WFI_filters():
wi = wfirst.WFI()
filter_list = wi.filter_list
for filter in filter_list:
wi = wfirst.WFI()
wi.filter = filter
wi.calc_psf(fov_pixels=4, oversample=1, nlambda=3)
def test_aberration_detector_position_setter():
detector = wfirst.FieldDependentAberration(4096, 4096)
with pytest.raises(ValueError) as excinfo:
detector.field_position = (-1, 1)
assert 'pixel_x' in str(excinfo.value), 'Failed to raise exception for small out-of-bounds ' \
'x pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (4096+1, 1)
assert 'pixel_x' in str(excinfo.value), 'Failed to raise exception for large out-of-bounds ' \
'x pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (1, -1)
assert 'pixel_y' in str(excinfo.value), 'Failed to raise exception for small out-of-bounds ' \
'y pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (1, 4096+1)
assert 'pixel_y' in str(excinfo.value), 'Failed to raise exception for large out-of-bounds ' \
'y pixel position'
valid_pos = (1.0, 1.0)
detector.field_position = valid_pos
assert detector._field_position == valid_pos, 'Setting field position through setter did not ' \
'update private `_field_position` value'
def test_WFI_detector_position_setter():
wfi = wfirst.WFI()
wfi.detector = 'SCA01'
valid_pos = (4000, 1000)
wfi.detector_position = valid_pos
assert wfi._detectors[wfi._detector].field_position == valid_pos, (
"Setting field position through Instrument.detector_position did not update field_position "
"for the detector's aberration optic"
)
assert wfi.detector_position == valid_pos, "`detector_position` getter doesn't reflect " \
"assignment to setter"
def test_WFI_includes_aberrations():
wfi = wfirst.WFI()
wfi.detector = 'SCA01'
osys = wfi._get_optical_system()
assert isinstance(osys[2], wfirst.FieldDependentAberration), (
"Third plane of WFIRST WFI optical system should be the "
"field dependent aberration virtual optic"
)
def test_WFI_chooses_pupil_masks():
wfi = wfirst.WFI()
def autopupil():
"""Helper to trigger pupil selection in testing"""
wavelengths, _ = wfi._get_weights()
wfi._validate_config(wavelengths=wavelengths)
wfi.filter = 'Z087'
autopupil()
assert wfi.pupil == wfi._unmasked_pupil_path, "WFI did not select unmasked pupil for Z087"
wfi.filter = 'H158'
autopupil()
assert wfi.pupil == wfi._masked_pupil_path, "WFI did not select masked pupil for H158"
wfi.filter = 'Z087'
autopupil()
assert wfi.pupil == wfi._unmasked_pupil_path, "WFI did not re-select unmasked pupil for Z087"
def _test_filter_pupil(filter_name, expected_pupil):
wfi.filter = 'Z087'
autopupil()
wfi.filter = filter_name
autopupil()
assert wfi.pupil == expected_pupil, "Expected pupil {} " \
"for filter {}".format(filter_name, expected_pupil)
_test_filter_pupil('Y106', wfi._unmasked_pupil_path)
_test_filter_pupil('J129', wfi._unmasked_pupil_path)
_test_filter_pupil('R062', wfi._unmasked_pupil_path)
_test_filter_pupil('H158', wfi._masked_pupil_path)
_test_filter_pupil('F184', wfi._masked_pupil_path)
_test_filter_pupil('W149', wfi._masked_pupil_path)
def test_WFI_limits_interpolation_range():
wfi = wfirst.WFI()
det = wfi._detectors['SCA01']
det.get_aberration_terms(1.29e-6)
det.field_position = (0, 0)
with pytest.raises(RuntimeError) as excinfo:
det.get_aberration_terms(1.29e-6)
assert 'out-of-bounds field point' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
with pytest.raises(RuntimeError) as excinfo:
det.get_aberration_terms(1.29e-6)
assert 'out-of-bounds field point' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
det.field_position = (2048, 2048)
# Test the get_aberration_terms function uses approximated wavelength when
# called with an out-of-bound wavelength.
assert allclose(det.get_aberration_terms(5e-6), det.get_aberration_terms(2e-6)), (
"Aberration outside wavelength range did not return closest value."
)
assert allclose(det.get_aberration_terms(1e-7), det.get_aberration_terms(0.76e-6)), (
"Aberration outside wavelength range did not return closest value."
)
def test_CGI_detector_position():
""" Test existence of the CGI detector position etc, and that you can't set it."""
cgi = wfirst.CGI()
valid_pos = (512,512)
assert cgi.detector_position == valid_pos, "CGI detector position isn't as expected"
with pytest.raises(RuntimeError) as excinfo:
cgi.detector_position = valid_pos
assert 'not adjustable' in str(excinfo.value), ("Failed to raise exception for"\
"trying to change CGI detector position.")
def test_CGI_psf(display=False):
"""
Just test that instantiating CGI works and can compute a PSF without raising
any exceptions
"""
char_spc = wfirst.CGI()
char_spc.mode = 'CHARSPC_F660'
#print('Reading instrument data from {:s}'.format(charspc._WebbPSF_basepath)
#print('Filter list: {:}'.format(charspc.filter_list))
monopsf = char_spc.calc_psf(nlambda=1, display=False)
if display:
wfirst.poppy.display_psf(monopsf)
|
76893
|
from typing import Optional, List
from platypush.message.response import Response
class PrinterResponse(Response):
def __init__(self,
*args,
name: str,
printer_type: int,
info: str,
uri: str,
state: int,
is_shared: bool,
state_message: Optional[str] = None,
state_reasons: Optional[List[str]] = None,
location: Optional[str] = None,
uri_supported: Optional[str] = None,
make_and_model: Optional[str] = None,
**kwargs):
super().__init__(*args, output={
'name': name,
'printer_type': printer_type,
'info': info,
'uri': uri,
'state': state,
'is_shared': is_shared,
'state_message': state_message,
'state_reasons': state_reasons,
'location': location,
'uri_supported': uri_supported,
'make_and_model': make_and_model,
}, **kwargs)
class PrintersResponse(Response):
def __init__(self,
*args,
printers: List[PrinterResponse],
**kwargs):
super().__init__(*args, output={p.output['name']: p.output for p in printers}, **kwargs)
class PrinterJobAddedResponse(Response):
def __init__(self,
*args,
printer: str,
job_id: int,
**kwargs):
super().__init__(*args, output={
'printer': printer,
'job_id': job_id,
}, **kwargs)
# vim:sw=4:ts=4:et:
|
76933
|
from ubinascii import hexlify
from bitcoin import bip32, bip39, script
# NETWORKS contains all constants for HD keys and addresses
from bitcoin.networks import NETWORKS
# we will use testnet:
network = NETWORKS["test"]
entropy = b'\x64\xd3\xe4\xa0\xa3\x87\xe2\x80\x21\xdf\x55\xa5\x1d\x45\x4d\xcf'
recovery_phrase = bip39.mnemonic_from_bytes(entropy)
print("Your recovery phrase:\n%s\n" % recovery_phrase)
# uncomment this line to make invalid mnemonic:
# recovery_phrase += " satoshi"
# you can check if recovery phrase is valid or not:
if not bip39.mnemonic_is_valid(recovery_phrase):
raise ValueError("Meh... Typo in the recovery?")
# convert mnemonic and password to bip-32 seed
seed = bip39.mnemonic_to_seed(recovery_phrase, password="<PASSWORD>")
print("Seed:", hexlify(seed).decode("ascii"))
# create HDKey from 64-byte seed
root_key = bip32.HDKey.from_seed(seed)
# generate an account child key:
# purpose: 84h - BIP-84
# coin type: 1h - Testnet
# account: 0h - first account
account = root_key.derive("m/84h/1h/0h")
# convert HD private key to HD public key
account_pub = account.to_public()
# for Bitcoin Core: pure BIP-32 serialization
print("\nYour xpub:", account_pub.to_base58(version=NETWORKS["test"]["xpub"]))
# for Electrum and others who cares about SLIP-0132
# used for bip-84 by many wallets
print("\nYour zpub:", account_pub.to_base58(version=NETWORKS["test"]["zpub"]))
print("\nLegacy addresses:")
xpub_bip44 = root_key.derive("m/44h/1h/0h").to_public()
print("Legacy xpub:", xpub_bip44.to_base58(version=network["xpub"]))
for i in range(5):
# m/0/i is used for receiving addresses and m/1/i for change addresses
pub = xpub_bip44.derive("m/0/%d" % i)
# get p2pkh script
sc = script.p2pkh(pub)
print("Address %i: %s" % (i, sc.address(network)))
print("\nSegwit addresses:")
xpub_bip84 = root_key.derive("m/84h/1h/0h").to_public()
print("Segwit zpub:", xpub_bip84.to_base58(version=network["zpub"]))
for i in range(5):
pub = xpub_bip84.derive("m/0/%d" % i)
# get p2wsh script
sc = script.p2wpkh(pub)
print("Address %i: %s" % (i, sc.address(network)))
print("\nNested segwit addresses:")
xpub_bip49 = root_key.derive("m/49h/1h/0h").to_public()
print("Nested Segwit ypub:", xpub_bip49.to_base58(version=network["ypub"]))
for i in range(5):
pub = xpub_bip49.derive("m/0/%d" % i)
# get p2sh(p2wpkh) script
sc = script.p2sh(script.p2wpkh(pub))
print("Address %i: %s" % (i, sc.address(network)))
|
76940
|
from manim import *
from manim_ml.neural_network.layers import TripletLayer, triplet
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
from manim_ml.neural_network.neural_network import NeuralNetwork
config.pixel_height = 720
config.pixel_width = 1280
config.frame_height = 6.0
config.frame_width = 6.0
class TripletScene(Scene):
def construct(self):
anchor_path = "../assets/triplet/anchor.jpg"
positive_path = "../assets/triplet/positive.jpg"
negative_path = "../assets/triplet/negative.jpg"
triplet_layer = TripletLayer.from_paths(anchor_path, positive_path, negative_path, grayscale=False)
triplet_layer.scale(0.08)
neural_network = NeuralNetwork([
triplet_layer,
FeedForwardLayer(5),
FeedForwardLayer(3)
])
neural_network.scale(1)
self.play(Create(neural_network), run_time=3)
self.play(neural_network.make_forward_pass_animation(), run_time=10)
|
76944
|
import typing
from pycspr.crypto import KeyAlgorithm
from pycspr.factory import create_public_key
from pycspr.serialisation.cl_type_from_bytes import decode as cl_type_from_bytes
from pycspr.serialisation.cl_value_from_bytes import decode as cl_value_from_bytes
from pycspr.types import Timestamp
from pycspr.types import cl_types
from pycspr.types import Deploy
from pycspr.types import DeployApproval
from pycspr.types import DeployArgument
from pycspr.types import DeployBody
from pycspr.types import DeployExecutableItem
from pycspr.types import DeployHeader
from pycspr.types import DeployTimeToLive
from pycspr.types import ModuleBytes
from pycspr.types import StoredContractByHash
from pycspr.types import StoredContractByHashVersioned
from pycspr.types import StoredContractByName
from pycspr.types import StoredContractByNameVersioned
from pycspr.types import Transfer
def decode(bstream: bytes, typedef: object) -> typing.Tuple[bytes, object]:
"""Decodes a deploy from a byte array.
:param bstream: An array of bytes being decoded.
:param typedef: Deploy related type definition.
:returns: A deploy related type.
"""
try:
decoder = _DECODERS[typedef]
except KeyError:
raise ValueError(f"Cannot decode {typedef} from bytes")
else:
return decoder(bstream)
def _decode_deploy(bstream: bytes) -> typing.Tuple[bytes, Deploy]:
bstream, header = decode(bstream, DeployHeader)
bstream, deploy_hash = cl_value_from_bytes(bstream, cl_types.CL_Type_ByteArray(32))
bstream, payment = decode(bstream, DeployExecutableItem)
bstream, session = decode(bstream, DeployExecutableItem)
bstream, approvals = _decode_deploy_approval_set(bstream)
return bstream, Deploy(
approvals=approvals,
hash=deploy_hash.value,
header=header,
payment=payment,
session=session
)
def _decode_deploy_approval(bstream: bytes) -> typing.Tuple[bytes, DeployApproval]:
algo = KeyAlgorithm(bstream[0])
if algo == KeyAlgorithm.ED25519:
key_length = 32
elif algo == KeyAlgorithm.SECP256K1:
key_length = 33
else:
raise ValueError("Invalid Key Algorithm")
pbk = bstream[1:key_length + 1]
sig = bstream[key_length + 1:key_length + 66]
bstream = bstream[1 + key_length + 66:]
return bstream, DeployApproval(
signer=create_public_key(algo, pbk),
signature=sig
)
def _decode_deploy_approval_set(
bstream: bytes
) -> typing.Tuple[bytes, typing.List[DeployApproval]]:
approvals = []
bstream, args_length = cl_value_from_bytes(bstream, cl_types.CL_Type_U32())
for _ in range(args_length.value):
bstream, approval = decode(bstream, DeployApproval)
approvals.append(approval)
return bstream, approvals
def _decode_deploy_argument(bstream: bytes) -> typing.Tuple[bytes, DeployArgument]:
bstream, name = cl_value_from_bytes(bstream, cl_types.CL_Type_String())
bstream, val_bytes_length = cl_value_from_bytes(bstream, cl_types.CL_Type_U32())
bstream_rem, arg_cl_type = cl_type_from_bytes(bstream[val_bytes_length.value:])
_, arg_cl_value = cl_value_from_bytes(bstream, arg_cl_type)
return bstream_rem, DeployArgument(name.value, arg_cl_value)
def _decode_deploy_argument_set(
bstream: bytes
) -> typing.Tuple[bytes, typing.List[DeployArgument]]:
args = []
bstream, args_length = cl_value_from_bytes(bstream, cl_types.CL_Type_U32())
for _ in range(args_length.value):
bstream, arg = decode(bstream, DeployArgument)
args.append(arg)
return bstream, args
def _decode_deploy_body(bstream: bytes) -> typing.Tuple[bytes, DeployBody]:
bstream, payment = _decode_deploy_executable_item(bstream)
bstream, session = _decode_deploy_executable_item(bstream)
bstream, body_hash = cl_value_from_bytes(bstream, cl_types.CL_Type_ByteArray(32))
return bstream, DeployBody(payment, session, body_hash.value)
def _decode_deploy_executable_item(bstream: bytes) -> DeployExecutableItem:
if bstream[0] == 0:
return decode(bstream, ModuleBytes)
elif bstream[0] == 1:
return decode(bstream, StoredContractByHash)
elif bstream[0] == 2:
return decode(bstream, StoredContractByHashVersioned)
elif bstream[0] == 3:
return decode(bstream, StoredContractByName)
elif bstream[0] == 4:
return decode(bstream, StoredContractByNameVersioned)
elif bstream[0] == 5:
return decode(bstream, Transfer)
raise ValueError("Invalid deploy executable item type tag")
def _decode_deploy_header(bstream: bytes) -> typing.Tuple[bytes, DeployHeader]:
bstream, account_public_key = cl_value_from_bytes(
bstream, cl_types.CL_Type_PublicKey()
)
bstream, timestamp = cl_value_from_bytes(
bstream, cl_types.CL_Type_U64()
)
bstream, ttl = cl_value_from_bytes(
bstream, cl_types.CL_Type_U64()
)
bstream, gas_price = cl_value_from_bytes(
bstream, cl_types.CL_Type_U64()
)
bstream, body_hash = cl_value_from_bytes(
bstream, cl_types.CL_Type_ByteArray(32)
)
bstream, dependencies = cl_value_from_bytes(
bstream, cl_types.CL_Type_List(cl_types.CL_Type_ByteArray(32))
)
bstream, chain_name = cl_value_from_bytes(
bstream, cl_types.CL_Type_String()
)
return bstream, DeployHeader(
account_public_key=account_public_key,
body_hash=body_hash.value,
chain_name=chain_name.value,
dependencies=dependencies.vector,
gas_price=gas_price.value,
timestamp=Timestamp(timestamp.value / 1000),
ttl=DeployTimeToLive.from_milliseconds(ttl.value)
)
def _decode_module_bytes(bstream: bytes) -> typing.Tuple[bytes, ModuleBytes]:
bstream = bstream[1:]
bstream, length = cl_value_from_bytes(bstream, cl_types.CL_Type_U32())
if length.value > 0:
module_bytes = bstream[:length.value]
bstream = bstream[length.value:]
else:
module_bytes = bytes([])
bstream, args = _decode_deploy_argument_set(bstream)
return bstream, ModuleBytes(args, module_bytes)
def _decode_stored_contract_by_hash(bstream: bytes) -> typing.Tuple[bytes, StoredContractByHash]:
bstream = bstream[1:]
bstream, contract_hash = cl_value_from_bytes(bstream, cl_types.CL_Type_ByteArray(32))
bstream, entry_point = cl_value_from_bytes(bstream, cl_types.CL_Type_String())
bstream, args = _decode_deploy_argument_set(bstream)
return bstream, StoredContractByHash(
args=args,
entry_point=entry_point.value,
hash=contract_hash.value
)
def _decode_stored_contract_by_hash_versioned(
bstream: bytes
) -> typing.Tuple[bytes, StoredContractByHashVersioned]:
bstream = bstream[1:]
bstream, contract_hash = cl_value_from_bytes(bstream, cl_types.CL_Type_ByteArray(32))
bstream, contract_version = cl_value_from_bytes(bstream, cl_types.CL_Type_U32())
bstream, entry_point = cl_value_from_bytes(bstream, cl_types.CL_Type_String())
bstream, args = _decode_deploy_argument_set(bstream)
return bstream, StoredContractByHashVersioned(
args=args,
entry_point=entry_point.value,
hash=contract_hash.value,
version=contract_version.value
)
def _decode_stored_contract_by_name(bstream: bytes) -> typing.Tuple[bytes, StoredContractByName]:
bstream = bstream[1:]
bstream, contract_name = cl_value_from_bytes(bstream, cl_types.CL_Type_String())
bstream, entry_point = cl_value_from_bytes(bstream, cl_types.CL_Type_String())
bstream, args = _decode_deploy_argument_set(bstream)
return bstream, StoredContractByName(
args=args,
entry_point=entry_point.value,
name=contract_name.value
)
def _decode_stored_contract_by_name_versioned(
bstream: bytes
) -> typing.Tuple[bytes, StoredContractByNameVersioned]:
bstream = bstream[1:]
bstream, contract_name = cl_value_from_bytes(bstream, cl_types.CL_Type_String())
bstream, contract_version = cl_value_from_bytes(bstream, cl_types.CL_Type_U32())
bstream, entry_point = cl_value_from_bytes(bstream, cl_types.CL_Type_String())
bstream, args = _decode_deploy_argument_set(bstream)
return bstream, StoredContractByNameVersioned(
args=args,
entry_point=entry_point.value,
name=contract_name.value,
version=contract_version.value
)
def _decode_transfer(bstream: bytes) -> typing.Tuple[bytes, Transfer]:
bstream = bstream[1:]
bstream, args = _decode_deploy_argument_set(bstream)
return bstream, Transfer(args)
_DECODERS = {
Deploy: _decode_deploy,
DeployApproval: _decode_deploy_approval,
DeployArgument: _decode_deploy_argument,
DeployBody: _decode_deploy_body,
DeployExecutableItem: _decode_deploy_executable_item,
DeployHeader: _decode_deploy_header,
ModuleBytes: _decode_module_bytes,
StoredContractByHash: _decode_stored_contract_by_hash,
StoredContractByHashVersioned: _decode_stored_contract_by_hash_versioned,
StoredContractByName: _decode_stored_contract_by_name,
StoredContractByNameVersioned: _decode_stored_contract_by_name_versioned,
Transfer: _decode_transfer
}
|
77067
|
import re
from os import environ
import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_efs
from tests.test_efs.junk_drawer import has_status_code
ARN_PATT = r"^arn:(?P<Partition>[^:\n]*):(?P<Service>[^:\n]*):(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?P<Ignore>(?P<ResourceType>[^:\/\n]*)[:\/])?(?P<Resource>.*)$"
STRICT_ARN_PATT = r"^arn:aws:[a-z]+:[a-z]{2}-[a-z]+-[0-9]:[0-9]+:[a-z-]+\/[a-z0-9-]+$"
SAMPLE_1_PARAMS = {
"CreationToken": "myFileSystem1",
"PerformanceMode": "generalPurpose",
"Backup": True,
"Encrypted": True,
"Tags": [{"Key": "Name", "Value": "Test Group1"}],
}
SAMPLE_2_PARAMS = {
"CreationToken": "myFileSystem2",
"PerformanceMode": "generalPurpose",
"Backup": True,
"AvailabilityZoneName": "us-west-2b",
"Encrypted": True,
"ThroughputMode": "provisioned",
"ProvisionedThroughputInMibps": 60,
"Tags": [{"Key": "Name", "Value": "Test Group1"}],
}
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
environ["AWS_ACCESS_KEY_ID"] = "testing"
environ["AWS_SECRET_ACCESS_KEY"] = "testing"
environ["AWS_SECURITY_TOKEN"] = "testing"
environ["AWS_SESSION_TOKEN"] = "testing"
@pytest.fixture(scope="function")
def efs(aws_credentials):
with mock_efs():
yield boto3.client("efs", region_name="us-east-1")
# Testing Create
# ==============
def test_create_file_system_correct_use(efs):
from datetime import datetime
creation_token = "<PASSWORD>"
create_fs_resp = efs.create_file_system(
CreationToken=creation_token,
Tags=[{"Key": "Name", "Value": "Test EFS Container"}],
)
# Check the response.
assert has_status_code(create_fs_resp, 201)
assert create_fs_resp["CreationToken"] == creation_token
assert "fs-" in create_fs_resp["FileSystemId"]
assert isinstance(create_fs_resp["CreationTime"], datetime)
assert create_fs_resp["LifeCycleState"] == "available"
assert create_fs_resp["Tags"][0] == {"Key": "Name", "Value": "Test EFS Container"}
assert create_fs_resp["ThroughputMode"] == "bursting"
assert create_fs_resp["PerformanceMode"] == "generalPurpose"
assert create_fs_resp["Encrypted"] == False
assert create_fs_resp["NumberOfMountTargets"] == 0
for key_name in ["Value", "ValueInIA", "ValueInStandard"]:
assert key_name in create_fs_resp["SizeInBytes"]
assert create_fs_resp["SizeInBytes"][key_name] == 0
assert re.match(STRICT_ARN_PATT, create_fs_resp["FileSystemArn"])
# Check the (lack of the) backup policy.
with pytest.raises(ClientError) as exc_info:
efs.describe_backup_policy(FileSystemId=create_fs_resp["FileSystemId"])
resp = exc_info.value.response
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 404
assert "PolicyNotFound" in resp["Error"]["Message"]
# Check the arn in detail
match_obj = re.match(ARN_PATT, create_fs_resp["FileSystemArn"])
arn_parts = match_obj.groupdict()
assert arn_parts["ResourceType"] == "file-system"
assert arn_parts["Resource"] == create_fs_resp["FileSystemId"]
assert arn_parts["Service"] == "elasticfilesystem"
assert arn_parts["AccountID"] == create_fs_resp["OwnerId"]
def test_create_file_system_aws_sample_1(efs):
resp = efs.create_file_system(**SAMPLE_1_PARAMS)
resp_metadata = resp.pop("ResponseMetadata")
assert resp_metadata["HTTPStatusCode"] == 201
assert set(resp.keys()) == {
"OwnerId",
"CreationToken",
"Encrypted",
"PerformanceMode",
"FileSystemId",
"FileSystemArn",
"CreationTime",
"LifeCycleState",
"NumberOfMountTargets",
"SizeInBytes",
"Tags",
"ThroughputMode",
}
assert resp["Tags"] == [{"Key": "Name", "Value": "Test Group1"}]
assert resp["PerformanceMode"] == "generalPurpose"
assert resp["Encrypted"]
policy_resp = efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])
assert policy_resp["BackupPolicy"]["Status"] == "ENABLED"
def test_create_file_system_aws_sample_2(efs):
resp = efs.create_file_system(**SAMPLE_2_PARAMS)
resp_metadata = resp.pop("ResponseMetadata")
assert resp_metadata["HTTPStatusCode"] == 201
assert set(resp.keys()) == {
"AvailabilityZoneId",
"AvailabilityZoneName",
"PerformanceMode",
"ProvisionedThroughputInMibps",
"SizeInBytes",
"Tags",
"ThroughputMode",
"CreationTime",
"CreationToken",
"Encrypted",
"LifeCycleState",
"FileSystemId",
"FileSystemArn",
"NumberOfMountTargets",
"OwnerId",
}
assert resp["ProvisionedThroughputInMibps"] == 60
assert resp["AvailabilityZoneId"] == "usw2-az1"
assert resp["AvailabilityZoneName"] == "us-west-2b"
assert resp["ThroughputMode"] == "provisioned"
policy_resp = efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])
assert policy_resp["BackupPolicy"]["Status"] == "ENABLED"
def test_create_file_system_az_name_given_backup_default(efs):
resp = efs.create_file_system(AvailabilityZoneName="us-east-1e")
policy_resp = efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])
assert policy_resp["BackupPolicy"]["Status"] == "ENABLED"
def test_create_file_system_no_creation_token_given(efs):
# Note that from the API docs, it would seem this should create an error. However it
# turns out that botocore just automatically assigns a UUID.
resp = efs.create_file_system()
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 201
assert "CreationToken" in resp
def test_create_file_system_file_system_already_exists(efs):
efs.create_file_system(CreationToken="foo")
with pytest.raises(ClientError) as exc_info:
efs.create_file_system(CreationToken="foo")
resp = exc_info.value.response
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 409
assert "FileSystemAlreadyExists" in resp["Error"]["Message"]
# Testing Describe
# ================
def test_describe_file_systems_minimal_case(efs):
# Create the file system.
create_fs_resp = efs.create_file_system(CreationToken="foobar")
create_fs_resp.pop("ResponseMetadata")
# Describe the file systems.
desc_fs_resp = efs.describe_file_systems()
desc_fs_resp_metadata = desc_fs_resp.pop("ResponseMetadata")
assert desc_fs_resp_metadata["HTTPStatusCode"] == 200
# Check the list results.
fs_list = desc_fs_resp["FileSystems"]
assert len(fs_list) == 1
file_system = fs_list[0]
assert set(file_system.keys()) == {
"CreationTime",
"CreationToken",
"Encrypted",
"LifeCycleState",
"PerformanceMode",
"SizeInBytes",
"Tags",
"ThroughputMode",
"FileSystemId",
"FileSystemArn",
"NumberOfMountTargets",
"OwnerId",
}
assert file_system["FileSystemId"] == create_fs_resp["FileSystemId"]
# Pop out the timestamps and see if the rest of the description is the same.
create_fs_resp["SizeInBytes"].pop("Timestamp")
file_system["SizeInBytes"].pop("Timestamp")
assert file_system == create_fs_resp
def test_describe_file_systems_aws_create_sample_2(efs):
efs.create_file_system(**SAMPLE_2_PARAMS)
# Describe the file systems.
desc_resp = efs.describe_file_systems()
desc_fs_resp_metadata = desc_resp.pop("ResponseMetadata")
assert desc_fs_resp_metadata["HTTPStatusCode"] == 200
# Check the list results.
fs_list = desc_resp["FileSystems"]
assert len(fs_list) == 1
file_system = fs_list[0]
assert set(file_system.keys()) == {
"AvailabilityZoneId",
"AvailabilityZoneName",
"CreationTime",
"CreationToken",
"Encrypted",
"LifeCycleState",
"PerformanceMode",
"ProvisionedThroughputInMibps",
"SizeInBytes",
"Tags",
"ThroughputMode",
"FileSystemId",
"FileSystemArn",
"NumberOfMountTargets",
"OwnerId",
}
assert file_system["ProvisionedThroughputInMibps"] == 60
assert file_system["AvailabilityZoneId"] == "usw2-az1"
assert file_system["AvailabilityZoneName"] == "us-west-2b"
assert file_system["ThroughputMode"] == "provisioned"
def test_describe_file_systems_paging(efs):
# Create several file systems.
for i in range(10):
efs.create_file_system(CreationToken="foobar_{}".format(i))
# First call (Start)
# ------------------
# Call the tested function
resp1 = efs.describe_file_systems(MaxItems=4)
# Check the response status
assert has_status_code(resp1, 200)
# Check content of the result.
resp1.pop("ResponseMetadata")
assert set(resp1.keys()) == {"NextMarker", "FileSystems"}
assert len(resp1["FileSystems"]) == 4
fs_id_set_1 = {fs["FileSystemId"] for fs in resp1["FileSystems"]}
# Second call (Middle)
# --------------------
# Get the next marker.
resp2 = efs.describe_file_systems(MaxItems=4, Marker=resp1["NextMarker"])
# Check the response status
resp2_metadata = resp2.pop("ResponseMetadata")
assert resp2_metadata["HTTPStatusCode"] == 200
# Check the response contents.
assert set(resp2.keys()) == {"NextMarker", "FileSystems", "Marker"}
assert len(resp2["FileSystems"]) == 4
assert resp2["Marker"] == resp1["NextMarker"]
fs_id_set_2 = {fs["FileSystemId"] for fs in resp2["FileSystems"]}
assert fs_id_set_1 & fs_id_set_2 == set()
# Third call (End)
# ----------------
# Get the last marker results
resp3 = efs.describe_file_systems(MaxItems=4, Marker=resp2["NextMarker"])
# Check the response status
resp3_metadata = resp3.pop("ResponseMetadata")
assert resp3_metadata["HTTPStatusCode"] == 200
# Check the response contents.
assert set(resp3.keys()) == {"FileSystems", "Marker"}
assert len(resp3["FileSystems"]) == 2
assert resp3["Marker"] == resp2["NextMarker"]
fs_id_set_3 = {fs["FileSystemId"] for fs in resp3["FileSystems"]}
assert fs_id_set_3 & (fs_id_set_1 | fs_id_set_2) == set()
def test_describe_file_systems_invalid_marker(efs):
with pytest.raises(ClientError) as exc_info:
efs.describe_file_systems(Marker="fiddlesticks")
resp = exc_info.value.response
assert has_status_code(resp, 400)
assert "BadRequest" in resp["Error"]["Message"]
def test_describe_file_systems_invalid_creation_token(efs):
resp = efs.describe_file_systems(CreationToken="fizzle")
assert has_status_code(resp, 200)
assert len(resp["FileSystems"]) == 0
def test_describe_file_systems_invalid_file_system_id(efs):
with pytest.raises(ClientError) as exc_info:
efs.describe_file_systems(FileSystemId="fs-29879313")
resp = exc_info.value.response
assert has_status_code(resp, 404)
assert "FileSystemNotFound" in resp["Error"]["Message"]
def test_describe_file_system_creation_token_and_file_system_id(efs):
with pytest.raises(ClientError) as exc_info:
efs.describe_file_systems(CreationToken="<PASSWORD>", FileSystemId="fs-07987987")
resp = exc_info.value.response
assert has_status_code(resp, 400)
assert "BadRequest" in resp["Error"]["Message"]
# Testing Delete
# ==============
def test_delete_file_system_minimal_case(efs):
# Create the file system
resp = efs.create_file_system()
# Describe the file system, prove it shows up.
desc1 = efs.describe_file_systems()
assert len(desc1["FileSystems"]) == 1
assert resp["FileSystemId"] in {fs["FileSystemId"] for fs in desc1["FileSystems"]}
# Delete the file system.
del_resp = efs.delete_file_system(FileSystemId=resp["FileSystemId"])
assert has_status_code(del_resp, 204)
# Check that the file system is no longer there.
desc2 = efs.describe_file_systems()
assert len(desc2["FileSystems"]) == 0
def test_delete_file_system_invalid_file_system_id(efs):
with pytest.raises(ClientError) as exc_info:
efs.delete_file_system(FileSystemId="fs-2394287")
resp = exc_info.value.response
assert has_status_code(resp, 404)
assert "FileSystemNotFound" in resp["Error"]["Message"]
|
77089
|
import numpy as np
from scipy.optimize import least_squares
from scipy.special import gamma
from scipy.stats import gengamma
from percentile_3_moments_first_guess import percentile_3_moments_first_guess
from tqdm import trange
import sys
def percentile_3_moments(trA,trAsq,trAcub,proba,MaxFunEvals):
""" percentile_3_moments returns the approximate percentiles of a normal form x'*A*x (where x is a multivariate standard normal distribution and A is a real symmetric matrix) by conserving its first 3 moments. The normal form is approximated with a generalized gamma distribution (which has 3 parameters).
Inputs:
- trA [1-dim numpy array of floats]: Trace of Matrix A. Several values are allowed (put them in a vector) in order to compute the percentiles of several normal forms.
- trAsq [1-dim numpy array of floats - size=trA.size]: Trace of A^2.
- trAcub [1-dim numpy array of floats - size=trA.size]: Trace of A^3.
N.B.: Remember that tr(A)=sum of the eigenvalues - Tr(A^2)=sum of squared eigenvalues - etc. It is usually quicker to compute trA, trAsq and trAcub from the eigenvalues of A.
- proba [1-dim numpy array of floats]: the percentage at which the percentile is computed (for ex. 0.95) -> 0<=proba<=1. Several values are allowed (put them in a vector) in order to compute the percentiles at different percentages.
- MaxFunEvals: "max_nfev" option for "least_squares" - see python help of "scipy.optimize.least_squares"
Outputs:
- alpha [numpy array of floats - size=trA.size]
- beta [numpy array of floats - size=trA.size]
- delta [numpy array of floats - size=trA.size]
alpha, beta and delta are the parameters of the generalized gamma distribution. More details in:
'A General Theory on Spectral Analysis for Irregularly Sampled Time Series. I. Frequency Analysis', <NAME> and <NAME>
- percentile [numpy array of floats - dim=(trA.size,proba.size)]: the percentiles.
-----------------------------
WARNING FOR EXTRA USES:
If TrA, trAsq and trAcub are vectors, the parameters alpha, beta and delta of the generalized gamma distribution are determined for the first entry of those vectors. They are then used as a first guess for the next entry of trA, trAsq and trAcub to determine the new values of alpha, beta and delta. Etc. We thus implicitely guess that alpha, beta and delta are changing slowly when going through the values of trA, trAsq and trAcub, which is quite realistic in our case (the confidence levels slowly vary along a frequency (periodogram) or along the translation time (scalogram)).
-----------------------------
This is part of WAVEPAL
(C) 2016 <NAME>"""
m=trA.size
nproba=proba.size
percentile=np.zeros((m,nproba))
l=0
while (trA[l]==0. and trAsq[l]==0. and trAcub[l]==0.):
l+=1
g=trAsq[l]/trA[l]
R=trA[l]**2/trAsq[l]
alpha0,beta0,delta0=percentile_3_moments_first_guess(g,R)
c0=np.zeros(3); c0[0]=alpha0; c0[1]=beta0; c0[2]=delta0 # first guess for the first frequency
alpha=np.zeros(m)
beta=np.zeros(m)
delta=np.zeros(m)
myfun1=lambda x: x[1]*gamma(x[0]+1.0/x[2])/gamma(x[0])
myfun2=lambda x: x[1]**2*gamma(x[0]+2.0/x[2])/gamma(x[0])
myfun3=lambda x: x[1]**3*gamma(x[0]+3.0/x[2])/gamma(x[0])
print "Root-searching for the coefficients of the generalized gamma distribution:"
for k in trange(m):
if (trA[k]==0. and trAsq[k]==0. and trAcub[k]==0.):
continue
else:
moment1=trA[k]
moment2=2.0*trAsq[k]+trA[k]**2
moment3=8.0*trAcub[k]+6.0*trA[k]*trAsq[k]+trA[k]**3
# NOTE: I must use a method which does not give negative parameters as a solution
# => "least_squares" is suitable for that, because It allows the user to provide bounds on the parameter values
# !!! absolute() for use with least_squares (don't forget we look for the zeros)
F=lambda x: [np.absolute(myfun1(x)-moment1),np.absolute(myfun2(x)-moment2),np.absolute(myfun3(x)-moment3)]
answ=least_squares(F,c0,bounds=(0.0*c0,1000.0*c0),ftol=1.e-15,xtol=1.e-09,max_nfev=MaxFunEvals)
try:
assert answ.status>0
except AssertionError:
print "Error in percentile_3_moments.py with function least_squares"
sys.exit(1)
alphak=answ.x[0]
betak=answ.x[1]
deltak=answ.x[2]
c0=answ.x # first guess for the next frequency (whose solution should be close to the current one)
percentile[k,:]=gengamma.ppf(proba,alphak,deltak,scale=betak)
alpha[k]=alphak
beta[k]=betak
delta[k]=deltak
return alpha,beta,delta,percentile
|
77104
|
from setuptools import setup, find_packages
VERSION_NUMBER = '0.1.0'
with open('requirements.txt', 'rb') as handle:
REQUIREMENTS = [
x.decode('utf8') for x in handle.readlines()
]
with open('dev_requirements.txt', 'rb') as handle:
TEST_REQUIREMENTS = [
x.decode('utf8') for x in handle.readlines()
]
setup(
name='apache_beam_example',
version=VERSION_NUMBER,
description="",
long_description=open("README.md").read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Programming Language :: Python",
],
keywords='',
author='<NAME>',
author_email='<EMAIL>',
url='',
license='GPL',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS
)
|
77119
|
import json
from os import write
w = open("json.txt","r",encoding="utf-8")
f = open("dict.txt","w",encoding="utf-8")
arr = json.load(w)
s = dict()
f.write('{')
for item in arr:
s[item["value"]] = item["key"]
print(s)
for key in s:
f.write('"'+str(key)+'"'+':')
f.write('"'+str(s[key])+'"'+',\n')
f.write("}")
|
77127
|
import ee
# Initialize the GEE API
# try:
# ee.Initialize()
# except Exception as ee:
# ee.Authenticate()
# ee.Initialize()
# geemap:A Python package for interactive mapping with Google Earth Engine, ipyleaflet, and ipywidgets
# Documentation: https://geemap.org
import geemap
from geemap import ml
import pickle
# from geemap import ee_basemaps
import matplotlib.pyplot as plt
import numpy as np
#eemont
import eemont
# import developed utilities
# import Utilities as ut
from Utilities import *
# geetols: Google earth engine tools
# https://github.com/gee-community/gee_tools
import geetools
from geetools import tools
# hydrafloods: Hydrologic Remote Sensing Analysis for Floods
#https://github.com/Servir-Mekong/hydra-floods
import hydrafloods as hf
from hydrafloods import geeutils
# Ipywidgets for GUI design
import ipywidgets as ipw
from IPython.display import display
from ipywidgets import HBox, VBox, Layout
# A simple Python file chooser widget for use in Jupyter/IPython in conjunction with ipywidgets
# https://pypi.org/project/ipyfilechooser/
from ipyfilechooser import FileChooser
# Plotly Python API for interactive graphing
import plotly
import plotly.express as px
import plotly.graph_objects as go
# Pandas - Python Data Analysis Library for data analysis and manipulation
import pandas as pd
# Miscellaneous Python modules
from datetime import datetime, timedelta
import os
class Toolbox:
def __init__(self):
#----------------------------------------------------------------------------------------------------
""" UI Design"""
#---------------------------------------------------------------------------------------------------
# Program Title
Title_text = ipw.HTML(
"<h3 class= 'text-center'><font color = 'blue'>Python-GEE Surface Water Analysis Toolbox v.1.0.3</font>")
style = {'description_width': 'initial'}
# Image Processing Tab
#************************************************************************************************
# Image Parameters UI
dataset_description = ipw.HTML(value = f"<b><font color='blue'>{'Satellite Imagery Parameters:'}</b>")
dataset_Label = ipw.Label('Select Dataset:', layout=Layout(margin='5px 0 0 5px')) #top right bottom left
Platform_options = ['Landsat', 'Sentinel-1', 'Sentinel-2', 'USDA NAIP' ]
self.Platform_dropdown = ipw.Dropdown(options = Platform_options, value = None,
layout=Layout(width='150px', margin='5px 0 0 5px'))
filtering_Label = ipw.Label('Speckle filter:', layout=Layout(margin='5px 0 0 5px'))
filtering_options = ['Refined-Lee', 'Perona-Malik', 'P-median', 'Lee Sigma', 'Gamma MAP','Boxcar Convolution']
self.filter_dropdown = ipw.Dropdown(options = filtering_options, value = 'Refined-Lee',
layout=Layout(width='150px', margin='5px 0 0 15px'))
self.filter_dropdown.disabled = True
PlatformType = HBox([dataset_Label, self.Platform_dropdown])
FilterType = HBox([filtering_Label, self.filter_dropdown])
# Study period definition
#************************************************************************************************
# Start date picker
lbl_start_date = ipw.Label('Start Date:', layout=Layout(margin='5px 0 0 5px'))
self.start_date = ipw.DatePicker(value = datetime.now()-timedelta(7), disabled=False,
layout=Layout(width='150px', margin='5px 0 0 30px'))
start_date_box = HBox([lbl_start_date, self.start_date])
# End date picker
lbl_end_date = ipw.Label('End Date:', layout=Layout(margin='5px 0 0 5px'))
self.end_date = ipw.DatePicker(value = datetime.now(), disabled=False,
layout=Layout(width='150px', margin='5px 0 0 34px'))
end_date_box = HBox([lbl_end_date, self.end_date])
datePickers = VBox([start_date_box, end_date_box])
# Cloud threshold for filtering data
#************************************************************************************************
# Set cloud threshold
self.cloud_threshold = ipw.IntSlider(description = 'Cloud Threshold:', orientation = 'horizontal',
value = 50, step = 5, style = style)
imageParameters = VBox([dataset_description, PlatformType, FilterType, datePickers, self.cloud_threshold],
layout=Layout(width='305px', border='solid 2px black'))
# Study Area definition
#************************************************************************************************
# Option to use a map drawn boundary or upload shapefile
StudyArea_description = ipw.HTML(value = f"<b><font color='blue'>{'Study Area Definition:'}</b>")
self.user_preference = ipw.RadioButtons(options=['Map drawn boundary','Upload boundary'], value='Map drawn boundary')
self.file_selector = FileChooser(description = 'Upload', filter_pattern = ["*.shp"], use_dir_icons = True)
# Retrieve and process satellite images
#***********************************************************************************************
# Button to retrieve and process satellite images from the GEE platform
self.imageProcessing_Button = ipw.Button(description = 'Process images',
tooltip='Click to process images', button_style = 'info',
layout=Layout(width='150px', margin='5px 0 0 50px', border='solid 2px black'))
# Study area UI and process button container
# ************************************************************************************************
StudyArea = VBox(children = [StudyArea_description, self.user_preference, self.imageProcessing_Button],
layout=Layout(width='300px', border='solid 2px black', margin='0 0 0 10px'))
# Results UI for displaying number and list of files
#*****************************************************************************************************
lbl_results = ipw.HTML(value = f"<b><font color='blue'>{'Processing Results:'}</b>")
lbl_images = ipw.Label('No. of processed images:')
self.lbl_RetrievedImages = ipw.Label()
display_no_images = HBox([lbl_images, self.lbl_RetrievedImages])
lbl_files = ipw.Label('List of files:')
self.lst_files = ipw.Select(layout=Layout(width='360px', height='100px'))
image_Results = VBox([lbl_results, display_no_images, lbl_files, self.lst_files],
layout=Layout(width='400px', border='solid 2px black', margin='0 0 0 10px'))
# Container for Image Processing Tab
#************************************************************************************************
imageProcessing_tab = HBox([imageParameters, StudyArea, image_Results])
# Water Extraction Tab
#*************************************************************************************************
# Water extraction indices
water_index_options = ['NDWI','MNDWI','DSWE', 'AWEInsh', 'AWEIsh']
lbl_indices = ipw.Label('Water Index:', layout=Layout(margin='5px 0 0 5px'))
self.water_indices = ipw.Dropdown(options = water_index_options, value = 'NDWI',
layout=Layout(width='100px', margin='5px 0 0 63px'))
display_indices = HBox([lbl_indices, self.water_indices])
# Color widget for representing water
lbl_color = ipw.Label('Color:', layout=Layout(margin='5px 0 0 5px'))
self.index_color = ipw.ColorPicker(concise = False, value = 'blue',layout=Layout(width='100px', margin='5px 0 0 101px'))
display_color_widget = HBox([lbl_color, self.index_color])
# Water index threshold selection
threshold_options = ['Simple','Otsu']
lbl_threshold_method = ipw.Label('Thresholding Method:', layout=Layout(margin='5px 0 0 5px'))
self.threshold_dropdown = ipw.Dropdown(options = threshold_options,value = 'Simple',
layout=Layout(width='100px', margin='5px 0 0 10px'))
display_thresholds = HBox([lbl_threshold_method, self.threshold_dropdown])
lbl_threshold = ipw.Label('Threshold value:', layout=Layout(margin='5px 0 5px 5px'))
self.threshold_value = ipw.BoundedFloatText(value=0.000, min = -1.0, max = 1.0, step = 0.050,
layout=Layout(width='100px', margin='5px 0 0 40px'))
display_threshold_widget = HBox([lbl_threshold, self.threshold_value])
water_index_Box = VBox([display_indices, display_thresholds, display_threshold_widget, display_color_widget],
layout=Layout(width='250px', border='solid 2px black'))
self.extractWater_Button = ipw.Button(description = 'Extract Water', tooltip='Click to extract surface water',
button_style = 'info',
layout=Layout(width='150px', margin='5px 0 0 20px', border='solid 2px black'))
Extraction_tab = HBox([water_index_Box, self.extractWater_Button])
self.extractWater_Button.disabled = True
# Spatial Analysis Tab
#**************************************************************************************************
self.water_Frequency_button = ipw.Button(description = 'Compute Water Frequency',
tooltip='Click to compute water occurence frequency',
button_style = 'info',
layout=Layout(width='200px', border='solid 2px black',margin='5 0 0 50px'))
self.water_Frequency_button.disabled = True
self.Depths_Button = ipw.Button(description = 'Compute Depth Map',
tooltip='Click to generate depth maps', button_style = 'info',
layout=Layout(width='200px', border='solid 2px black',margin='5 0 0 50px'))
self.Depths_Button.disabled = True
self.elevData_options = ipw.Dropdown(options=['NED','SRTM','User DEM'], value='NED', description='Elev. Dataset:',
layout=Layout(width='210px', margin='0 0 0 10px'), style = style)
self.elevData_options.disabled = True
self.elev_Methods = ipw.Dropdown(options=['Random Forest','Mod_Stumpf','Mod_Lyzenga','FwDET'], value='Random Forest',
description='Depth method:',
layout=Layout(width='210px', margin='0 0 0 10px'), style = style)
self.elev_Methods.disabled = True
self.userDEM = ipw.Dropdown(description='Select GEE asset:',
layout=Layout(width='300px', margin='0 0 0 10px'), style = style)
lbl_Elev = ipw.Label('Elevation Dataset:', layout=Layout(margin='0 0 0 10px'))
elev_Box = HBox([self.Depths_Button, self.elev_Methods, self.elevData_options])
self.zonalAnalysis_Button = ipw.Button(description = 'Zonal Analysis',
tooltip='Click to remove clouds', button_style = 'info',
layout=Layout(width='200px', border='solid 2px black',margin='5 0 0 50px'))
# Spatial_Analysis_Tab = VBox([water_Frequency_button, elev_Box, zonalAnalysis_Button])
Spatial_Analysis_Tab = VBox([self.water_Frequency_button, elev_Box])
# Ploting and Statistics Tab
#***************************************************************************************************
lbl_Area_Plotting = ipw.HTML(value = f"<b><font color='blue'>{'Surface Water Area Computation:'}</b>")
self.area_unit = ipw.Dropdown(options = ['Square m','Square Km', 'Hectares', 'Acre'], value = 'Square m',
description = 'Unit for water surface area:', style=style,
tooltip='Select unit for areas')
self.plot_button = ipw.Button(description = 'Compute and Plot Areas', tooltip='Click to plot graph', button_style = 'info',
layout=Layout(width='170px', margin='10 0 0 200px', border='solid 2px black'))
self.plot_button.disabled = True
# lbl_depth_Plotting = ipw.Label(value ='Plot depth hydrograph at a location:', layout=Layout(margin='10px 0 0 0'))
lbl_depth_Plotting = ipw.HTML(value = f"<b><font color='blue'>{'Plot depth hydrograph at a location:'}</b>")
self.point_preference = ipw.RadioButtons(options=['Map drawn point','Enter coordinates'],
value='Map drawn point')
self.coordinates_textbox = ipw.Text(layout=Layout(width='200px'))
lbl_coordinates = ipw.Label(value='Enter Long, Lat in decimal degrees')
# point_selector = FileChooser(description = 'Upload point', filter_pattern = ["*.shp"], use_dir_icons = True)
self.depth_plot_button = ipw.Button(description = 'Plot depths', tooltip='Click to plot depth hydrograph', button_style = 'info',
layout=Layout(width='170px', margin='10 0 0 100px', border='solid 2px black'))
self.depth_plot_button.disabled = True
depth_box = VBox(children = [lbl_depth_Plotting,self.point_preference, self.depth_plot_button])
plotting_box = VBox([lbl_Area_Plotting, self.area_unit, self.plot_button, depth_box],
layout=Layout(width='310px', border='solid 2px black'))
lbl_Stats = ipw.HTML(value = f"<b><font color='blue'>{'Summary Statistics:'}</b>")
self.lbl_Max_Area = ipw.Label(value ='', layout=Layout(width='100px')) #top right bottom left
self.lbl_Min_Area = ipw.Label(value ='', layout=Layout(width='100px'))
self.lbl_Avg_Area = ipw.Label(value ='', layout=Layout(width='100px'))
self.lbl_Max_Depth = ipw.Label(value ='', layout=Layout(width='100px'))
self.lbl_Min_Depth = ipw.Label(value ='', layout=Layout(width='100px'))
self.lbl_Avg_Depth = ipw.Label(value ='', layout=Layout(width='100px'))
self.cap_Max_Area = ipw.Label(value ='Max. Area:')
self.cap_Min_Area = ipw.Label(value ='Min. Area:')
self.cap_Avg_Area = ipw.Label(value ='Avg. Area:')
self.cap_Max_Depth = ipw.Label(value ='Max. Depth:')
self.cap_Min_Depth = ipw.Label(value ='Min. Depth:')
self.cap_Avg_Depth = ipw.Label(value ='Avg. Depth:')
max_box = HBox([self.cap_Max_Area,self.lbl_Max_Area, self.cap_Max_Depth,self.lbl_Max_Depth])
min_box = HBox([self.cap_Min_Area,self.lbl_Min_Area, self.cap_Min_Depth,self.lbl_Min_Depth])
avg_box = HBox([self.cap_Avg_Area,self.lbl_Avg_Area, self.cap_Avg_Depth,self.lbl_Avg_Depth])
stats_box = VBox([lbl_Stats, max_box, min_box, avg_box])
self.file_selector1 = FileChooser(description = 'Select folder and filename', filter_pattern = "*.csv", use_dir_icons = True)
self.file_selector1.title = 'Select Folder and Filename'
self.file_selector1.default_path = os.getcwd()
self.save_data_button = ipw.Button(description = 'Save Data',tooltip='Click to save computed areas to file',button_style = 'info',
layout=Layout(width='100px', border='solid 2px black',margin='5 0 0 50px'))
lbl_Save = ipw.HTML(value = f"<b><font color='blue'>{'Save Data:'}</b>")
stats_save_box = VBox(children=[stats_box, lbl_Save, self.file_selector1, self.save_data_button],
layout=Layout(width='550px', border='solid 2px black', margin='0 0 0 10px'))
plot_stats_tab = HBox(children=[plotting_box, stats_save_box])
# Downloads Tab
#***************************************************************************************************
self.files_to_download = ipw.RadioButtons(options=['Satellite Images', 'Water Mask', 'Water Frequency', 'Depth Maps',
'DSWE Images'], value='Satellite Images',
description='Files to download:', style = style)
self.download_location = ipw.RadioButtons(options=['Google Drive', 'Local Disk'],
value='Google Drive', description='Download Location:', style = style)
self.folder_name = ipw.Text(description='Folder Name:')
self.folder_selector = FileChooser(description = 'Select Folder', show_only_dirs = True, use_dir_icons = True)
self.folder_selector.title = '<b>Select a folder</b>'
self.folder_selector.default_path = os.getcwd()
self.download_button = ipw.Button(description = 'Download',
tooltip='Click to plot download water images', button_style = 'info')
self.download_button.disabled = True
download_settings = VBox(children=[self.files_to_download, self.download_location, self.folder_name])
download_tab = HBox([download_settings, self.download_button])
# variable to hold the random forest classifier
self.rf_ee_classifier = None
# Functions to control UI changes and parameter settings
#****************************************************************************************************
def platform_index_change(change):
"""
Function to set image visualization parameters, hide or show some UI componets and
show water indices that are applicable to the type of satellite image selected
args:
None
returns:
None
"""
try:
global img_type
global visParams
global water_index_options
if self.Platform_dropdown.value == 'Landsat':
visParams = {'bands': ['red', 'green', 'blue'],
'min': 0,
'max': 3000,
}
self.cloud_threshold.disabled = False
self.water_indices.disabled = False
self.index_color.disabled = False
self.threshold_value.disabled = False
self.water_indices.options = ['NDWI','MNDWI','DSWE','AWEInsh', 'AWEIsh']
self.threshold_dropdown.options = ['Simple','Otsu']
self.filter_dropdown.disabled = True
elif self.Platform_dropdown.value == 'Sentinel-1':
visParams = {'min': -25,'max': 0}
self.cloud_threshold.disabled = True
self.water_indices.options = ['VV','VH']#,'NDPI','NVHI', 'NVVI']
self.index_color.disabled = False
self.threshold_value.disabled = True
self.threshold_dropdown.options = ['Otsu']
self.filter_dropdown.disabled = False
elif self.Platform_dropdown.value == 'Sentinel-2':
visParams = {'bands': ['red', 'green', 'blue'],
'min': 0.0,
'max': 3000}
self.cloud_threshold.disabled = False
self.water_indices.disabled = False
self.index_color.disabled = False
self.threshold_value.disabled = False
self.water_indices.options = ['NDWI','MNDWI']
self.threshold_dropdown.options = ['Simple','Otsu']
self.filter_dropdown.disabled = True
elif self.Platform_dropdown.value == 'USDA NAIP':
visParams = {'bands': ['R', 'G','B'],
'min': 0.0,
'max': 255.0}
self.threshold_value.disabled = False
self.water_indices.disabled = False
self.index_color.disabled = False
self.water_indices.options = ['NDWI']
self.threshold_dropdown.options = ['Simple','Otsu']
self.filter_dropdown.disabled = True
except Exception as e:
print(e)
# Link widget to function
self.Platform_dropdown.observe(platform_index_change, 'value')
def showFileSelector(button):
"""
Function to show or hide shapefile upload widget
args:
None
returns:
None
"""
if button['new']:
StudyArea.children = [StudyArea_description, self.user_preference, self.file_selector, self.imageProcessing_Button]
else:
StudyArea.children = [StudyArea_description, self.user_preference,self.imageProcessing_Button]
# Link widget to file selector function
self.user_preference.observe(showFileSelector, names='index')
def showLocationSelector(button):
"""
Function to show or hide folder selector
args:
None
returns:
None
"""
if button['new']:
download_settings.children = [self.files_to_download, self.download_location, self.folder_selector]
else:
download_settings.children = [self.files_to_download, self.download_location, self.folder_name]
# Link widget to folder selector function
self.download_location.observe(showLocationSelector, names='index')
def pointOptions_selector(button):
"""
Function to show or hide folder selector
args:
None
returns:
None
"""
if button['new']:
depth_box.children = [lbl_depth_Plotting,self.point_preference, lbl_coordinates, self.coordinates_textbox,
self.depth_plot_button]
else:
depth_box.children = [lbl_depth_Plotting,self.point_preference, self.depth_plot_button]
# Link widget to folder selector function
self.point_preference.observe(pointOptions_selector, names='index')
def indexSelection(change):
if self.water_indices.value =='DSWE':
self.threshold_value.min = 1.0
self.threshold_value.max = 4.0
self.threshold_value.step = 1.0
self.threshold_value.value = 4.0
self.threshold_dropdown.options = ['Simple']
else:
self.threshold_value.min = -1.0
self.threshold_value.max = 1.0
self.threshold_value.step = 0.050
self.threshold_value.value = 0.0
self.threshold_dropdown.options = ['Simple','Otsu']
self.water_indices.observe(indexSelection, 'value')
def thresholdSelection(change):
if self.threshold_dropdown.value =='Otsu':
self.threshold_value.disabled = True
else:
self.threshold_value.disabled = False
# Link widget to threshold method selection function
self.threshold_dropdown.observe(thresholdSelection, 'value')
def depthMethodSelection(change):
if self.elev_Methods.value == 'FwDET':
self.elevData_options.disabled = False
else:
self.elevData_options.disabled = True
elev_Box.children = [self.Depths_Button, self.elev_Methods, self.elevData_options]
# Link widget to threshold method selection function
self.elev_Methods.observe(depthMethodSelection, 'value')
def demSelection(change):
if self.elevData_options.value == 'User DEM':
folder = ee.data.getAssetRoots()[0]['id']
assets = ee.data.listAssets({'parent':folder})
# filter only image assets
filtered_asset = list(filter(lambda asset: asset['type'] == 'IMAGE', assets['assets']))
# create a list of image assets
list_assets = [sub['id'] for sub in filtered_asset]
elev_Box.children = [self.Depths_Button, self.elev_Methods, self.elevData_options, self.userDEM]
self.userDEM.options = list_assets # set dropdown options to list of image assets
else:
elev_Box.children = [self.Depths_Button, self.elev_Methods, self.elevData_options]
# Link widget to function
self.elevData_options.observe(demSelection, 'value')
#****************************************************************************************************
# Full UI
#***************************************************************************************************
tab_children = [imageProcessing_tab, Extraction_tab, Spatial_Analysis_Tab, plot_stats_tab, download_tab]
tab = ipw.Tab()
tab.children = tab_children
# changing the title of the first and second window
tab.set_title(0, 'Image Processing')
tab.set_title(1, 'Water Extraction')
tab.set_title(2, 'Spatial Analysis')
tab.set_title(3, 'Plotting & Stats')
tab.set_title(4, 'Download & Export')
# Plotting outputs and feedback to user
#***************************************************************************************************
self.feedback = ipw.Output()
# OUTPUTS = VBox([self.feedback])
# create map instance
self.Map = geemap.Map()
self.Map.add_basemap('HYBRID')
GUI = VBox([Title_text,tab,self.Map])
display(GUI)
self.fig = go.FigureWidget()
self.fig.update_layout(title = '<b>Surface Water Area Hydrograph<b>',
title_x = 0.5, title_y = 0.90, title_font=dict(family="Arial",size=24),
template = "plotly_white",
xaxis =dict(title ='<b>Date<b>', linecolor = 'Black'),
yaxis=dict(title='Area (sq m)', linecolor = 'Black'),
font_family="Arial")
# display plotly figure
display(self.fig)
display(self.feedback)
# Widget-Function connections
self.imageProcessing_Button.on_click(self.process_images)
self.extractWater_Button.on_click(self.Water_Extraction)
self.plot_button.on_click(self.plot_areas)
self.save_data_button.on_click(self.save_data)
self.Depths_Button.on_click(self.calc_depths)
self.download_button.on_click(self.dowload_images)
self.water_Frequency_button.on_click(self.water_frequency)
self.depth_plot_button.on_click(self.plot_depths)
# Function to clip images
def clipImages(self,img):
"""
Function to clip images
args:
Image
returns:
Clipped image
"""
orig = img
clipped_image = img.clip(site).copyProperties(orig, orig.propertyNames())
return clipped_image
def process_images(self, b):
"""
Function to retrieve and process satellite images from GEE platform
args:
None
returns:
None
"""
with self.feedback:
self.feedback.clear_output()
try:
global filtered_Collection
global filtered_landsat
global clipped_images
global imageType
global dates
global site
global img_scale
global file_list
global StartDate
global EndDate
self.fig.data = [] # clear existing plot
self.lbl_RetrievedImages.value = 'Processing....'
cloud_thresh = self.cloud_threshold.value
# Define study area based on user preference
if self.user_preference.index == 1:
file = self.file_selector.selected
site = load_boundary(file)
self.Map.addLayer(site, {}, 'AOI')
self.Map.center_object(site, 15)
# Map.zoom_to_object(site)
# Map.center_object(site, 15)
else:
site = ee.FeatureCollection(self.Map.draw_last_feature)
# get widget values
imageType = self.Platform_dropdown.value
filterType = self.filter_dropdown.value
StartDate = ee.Date.fromYMD(self.start_date.value.year,self.start_date.value.month,self.start_date.value.day)
EndDate = ee.Date.fromYMD(self.end_date.value.year,self.end_date.value.month,self.end_date.value.day)
boxcar = ee.Kernel.circle(**{'radius':3, 'units':'pixels', 'normalize':True})
def filtr(img):
return img.convolve(boxcar)
# filter image collection based on date, study area and cloud threshold(depends of datatype)
if imageType == 'Landsat':
filtered_landsat = load_Landsat(site, StartDate, EndDate, cloud_thresh)
filtered_Collection = filtered_landsat.map(maskLandsatclouds)
elif imageType == 'Sentinel-2':
Collection_before = load_Sentinel2(site, StartDate, EndDate, cloud_thresh)
filtered_Collection = Collection_before.map(maskS2clouds)
elif imageType == 'Sentinel-1':
Collection_before = load_Sentinel1(site, StartDate, EndDate)
# apply speckle filter algorithm or smoothing
if filterType == 'Gamma MAP':
corrected_Collection = Collection_before.map(slope_correction)
filtered_Collection = corrected_Collection.map(hf.gamma_map)
elif filterType == 'Refined-Lee':
corrected_Collection = Collection_before.map(slope_correction)
filtered_Collection = corrected_Collection.map(hf.refined_lee)
elif filterType == 'Perona-Malik':
corrected_Collection = Collection_before.map(slope_correction)
filtered_Collection = corrected_Collection.map(hf.perona_malik)
elif filterType == 'P-median':
corrected_Collection = Collection_before.map(slope_correction)
filtered_Collection = corrected_Collection.map(hf.p_median)
elif filterType == 'Boxcar Convolution':
corrected_Collection = Collection_before.map(slope_correction)
filtered_Collection = corrected_Collection.map(filtr)
elif filterType == 'Lee Sigma':
# corrected_Collection = Collection_before.map(ut.slope_correction) # slope correction before lee_sigma fails
filtered_Collection = Collection_before.map(hf.lee_sigma)
elif imageType == 'USDA NAIP':
filtered_Collection = load_NAIP(site, StartDate, EndDate)
# Clip images to study area
clipped_images = filtered_Collection.map(self.clipImages)
# Mosaic same day images
clipped_images = tools.imagecollection.mosaicSameDay(clipped_images)
# Add first image in collection to Map
first_image = clipped_images.first()
if imageType == 'Sentinel-1':
img_scale = first_image.select(0).projection().nominalScale().getInfo()
else:
bandNames = first_image.bandNames().getInfo()
img_scale = first_image.select(str(bandNames[0])).projection().nominalScale().getInfo()
self.Map.addLayer(clipped_images.first(), visParams, imageType)
# Get no. of processed images
no_of_images = filtered_Collection.size().getInfo()
# Display number of images
self.lbl_RetrievedImages.value = str(no_of_images)
# List of files
file_list = filtered_Collection.aggregate_array('system:id').getInfo()
# display list of files
self.lst_files.options = file_list
self.extractWater_Button.disabled = False # enable the water extraction button
self.download_button.disabled = False
except Exception as e:
print(e)
print('An error occurred during processing.')
def Water_Extraction(self, b):
"""
Function to extract surface water from satellite images
args:
None
returns:
None
"""
with self.feedback:
self.feedback.clear_output()
try:
global water_images
global dswe_images
global waterMasks
global index_images
global dswe_viz
color_palette = self.index_color.value
# Function to extract water using NDWI or MNDWI from multispectral images
def water_index(img):
"""
Function to extract surface water from Landsat and Sentinel-2 images using
water extraction indices: NDWI, MNDWI, and AWEI
args:
Image
returns:
Image with water mask
"""
index_image = ee.Image(1)
if self.water_indices.value == 'NDWI':
if imageType == 'Landsat' or imageType == 'Sentinel-2':
bands = ['green', 'nir']
elif imageType == 'USDA NAIP':
bands = ['G', 'N']
index_image = img.normalizedDifference(bands).rename('waterIndex')\
.copyProperties(img, ['system:time_start'])
elif self.water_indices.value == 'MNDWI':
if imageType == 'Landsat':
bands = ['green', 'swir1']
index_image = img.normalizedDifference(bands).rename('waterIndex')\
.copyProperties(img, ['system:time_start'])
elif imageType == 'Sentinel-2':
# Resample the swir bands from 20m to 10m
resampling_bands = img.select(['swir1','swir2'])
img = img.resample('bilinear').reproject(**
{'crs': resampling_bands.projection().crs(),
'scale':10
})
bands = ['green', 'swir1']
index_image = img.normalizedDifference(bands).rename('waterIndex')\
.copyProperties(img, ['system:time_start'])
elif self.water_indices.value == 'AWEInsh':
index_image = img.expression(
'(4 * (GREEN - SWIR1)) - ((0.25 * NIR)+(2.75 * SWIR2))', {
'NIR': img.select('nir'),
'GREEN': img.select('green'),
'SWIR1': img.select('swir1'),
'SWIR2': img.select('swir2')
}).rename('waterIndex').copyProperties(img, ['system:time_start'])
elif self.water_indices.value == 'AWEIsh':
index_image = img.expression(
'(BLUE + (2.5 * GREEN) - (1.5 * (NIR + SWIR1)) - (0.25 * SWIR2))', {
'BLUE':img.select('blue'),
'NIR': img.select('nir'),
'GREEN': img.select('green'),
'SWIR1': img.select('swir1'),
'SWIR2': img.select('swir2')
}).rename('waterIndex').copyProperties(img, ['system:time_start'])
return img.addBands(index_image)
def water_thresholding(img):
# Compute threshold
if self.threshold_dropdown.value == 'Simple': # Simple value no dynamic thresholding
nd_threshold = self.threshold_value.value
water_image = img.select('waterIndex').gt(nd_threshold).rename('water')\
.copyProperties(img, ['system:time_start'])
elif self.threshold_dropdown.value == 'Otsu':
reducers = ee.Reducer.histogram(255,2).combine(reducer2=ee.Reducer.mean(), sharedInputs=True)\
.combine(reducer2=ee.Reducer.variance(), sharedInputs= True)
histogram = img.select('waterIndex').reduceRegion(
reducer=reducers,
geometry=site.geometry(),
scale=img_scale,
bestEffort=True)
nd_threshold = otsu(histogram.get('waterIndex_histogram')) # get threshold from the nir band
water_image = img.select('waterMask').gt(nd_threshold).rename('water')
water_image = water_image.copyProperties(img, ['system:time_start'])
return img.addBands(water_image)
# Function to extract water from SAR Sentinel 1 images
def add_S1_waterMask(band):
"""
Function to extract surface water from Sentinel-1 images Otsu algorithm
args:
Image
returns:
Image with water mask
"""
def wrap(img):
reducers = ee.Reducer.histogram(255,2).combine(reducer2=ee.Reducer.mean(), sharedInputs=True)\
.combine(reducer2=ee.Reducer.variance(), sharedInputs= True)
histogram = img.select(band).reduceRegion(
reducer=reducers,
geometry=site.geometry(),
scale=img_scale,
bestEffort=True)
# Calculate threshold via function otsu (see before)
threshold = otsu(histogram.get(band+'_histogram'))
# get watermask
waterMask = img.select(band).lt(threshold).rename('water')
# waterMask = waterMask.updateMask(waterMask) #Remove all pixels equal to 0
return img.addBands(waterMask)
return wrap
def maskDSWE_Water(img):
nd_threshold = self.threshold_value.value+1
waterImage = img.select('dswe').rename('water')
water = waterImage.gt(0).And(waterImage.lt(nd_threshold)).copyProperties(img, ['system:time_start'])
return img.addBands(water)
def mask_Water(img):
waterMask = img.select('water').selfMask().rename('waterMask').copyProperties(img, ['system:time_start'])
return img.addBands(waterMask)
if imageType == 'Sentinel-1':
band = self.water_indices.value
water_images = clipped_images.map(add_S1_waterMask(band)).select('water')
waterMasks = water_images.map(mask_Water)
visParams = {'min': 0,'max': 1, 'palette': color_palette}
self.Map.addLayer(waterMasks.select('waterMask').max(), visParams, 'Water')
elif imageType == 'Landsat':
if self.water_indices.value == 'DSWE':
dem = ee.Image('USGS/SRTMGL1_003')
dswe_images = DSWE(filtered_landsat, dem, site)
# Viz parameters: classes: 0, 1, 2, 3, 4, 9
dswe_viz = {'min':0, 'max': 9, 'palette': ['000000', '002ba1', '6287ec', '77b800', 'c1bdb6',
'000000', '000000', '000000', '000000', 'ffffff']}
water_images = dswe_images.map(maskDSWE_Water)
waterMasks = water_images.map(mask_Water)
# Map.addLayer(dswe_images.max(), dswe_viz, 'DSWE')
else:
index_images = clipped_images.map(water_index)
water_images = index_images.map(water_thresholding)
waterMasks = water_images.map(mask_Water)
self.Map.addLayer(waterMasks.select('waterMask').max(), {'palette': color_palette}, 'Water')
else:
index_images = clipped_images.map(water_index)
water_images = index_images.map(water_thresholding)
waterMasks = water_images.map(mask_Water)
self.Map.addLayer(waterMasks.select('waterMask').max(), {'palette': color_palette}, 'Water')
self.water_Frequency_button.disabled = False
self.Depths_Button.disabled = False
# self.elevData_options.disabled = False
self.elev_Methods.disabled = False
self.plot_button.disabled = False
except Exception as e:
print(e)
print('An error occurred during computation.')
def calc_area(self, img):
"""
Function to calculate area of water pixels
args:
Water mask image
returns:
Water image with calculated total area of water pixels
"""
global unit_symbol
unit = self.area_unit.value
divisor = 1
if unit =='Square Km':
divisor = 1e6
unit_symbol = 'Sq km'
elif unit =='Hectares':
divisor = 1e4
unit_symbol = 'Ha'
elif unit =='Square m':
divisor = 1
unit_symbol = 'Sq m'
else:
divisor = 4047
unit_symbol = 'acre'
pixel_area = img.select('waterMask').multiply(ee.Image.pixelArea()).divide(divisor)
img_area = pixel_area.reduceRegion(**{
'geometry': site.geometry(),
'reducer': ee.Reducer.sum(),
'scale': img_scale,
'maxPixels': 1e13
})
return img.set({'water_area': img_area})
def plot_areas(self, b):
"""
Function to plot a time series of calculated water area for each water image
and to cycle through
args:
None
returns:
None
"""
with self.feedback:
self.feedback.clear_output()
try:
global df
global save_water_data
save_water_data = True
# Compute water areas
water_areas = waterMasks.map(self.calc_area)
water_stats = water_areas.aggregate_array('water_area').getInfo()
dates = waterMasks.aggregate_array('system:time_start')\
.map(lambda d: ee.Date(d).format('YYYY-MM-dd')).getInfo()
dates_lst = [datetime.strptime(i, '%Y-%m-%d') for i in dates]
y = [item.get('waterMask') for item in water_stats]
df = pd.DataFrame(list(zip(dates_lst,y)), columns=['Date','Area'])
self.fig.data = []
self.fig.add_trace(go.Scatter(x=df['Date'], y=df['Area'], name='Water Hydrograph',
mode='lines+markers', line=dict(dash = 'solid', color ='Blue', width = 0.5)))
self.fig.layout.title = '<b>Surface Water Area Hydrograph<b>'
self.fig.layout.titlefont = dict(family="Arial",size=24)
self.fig.layout.title.x = 0.5
self.fig.layout.title.y = 0.9
self.fig.layout.yaxis.title = 'Area ('+unit_symbol+')'
scatter = self.fig.data[0] # set figure data to scatter for click function
color_palette = self.index_color.value
max_Area_value = df['Area'].max()
min_Area_value = df['Area'].min()
avg_Area_value = df['Area'].mean()
self.lbl_Max_Area.value = str(round(max_Area_value, 3))
self.lbl_Min_Area.value = str(round(min_Area_value, 3))
self.lbl_Avg_Area.value = str(round(avg_Area_value, 3))
# Function to select and show images on clicking the graph
def update_point(trace, points, selector):
global wImage
global selected_sat
date = df['Date'].iloc[points.point_inds].values[0]
date = pd.to_datetime(str(date))
selected_image = waterMasks.closest(date).first()
wImage = selected_image.select('waterMask')
self.Map.addLayer(selected_image, visParams, imageType)
if self.water_indices.value == 'DSWE':
selected_DWSE = dswe_images.closest(date).first()
self.Map.addLayer(selected_DWSE.select('dswe'), dswe_viz, 'DSWE')
# Map.addLayer(wImage, {'palette': color_palette}, 'Water')
self.Map.addLayer(wImage, {'palette': color_palette}, 'Water')
scatter.on_click(update_point)
except Exception as e:
print(e)
print('An error occurred during computation.')
def save_data(self, b):
"""
Function to save time series to CSV file
args:
None
returns:
None
"""
with self.feedback:
self.feedback.clear_output()
try:
if save_water_data==True:
filename = self.file_selector1.selected
water_df = df
water_df = water_df.rename(columns={'Area':'Area, '+unit_symbol})
water_df.to_csv(filename, index=False)
elif save_water_data==False:
filename = self.file_selector1.selected
filtered_df = depths_df.drop(columns=['reducer'])
filtered_df = filtered_df[['date','Depth']]
filtered_df = filtered_df.rename(columns={'Depth':'Depth, m'})
filtered_df.to_csv(filename, index=False)
except Exception as e:
print(e)
print('Data save error')
def dowload_images(self, b):
with self.feedback:
self.feedback.clear_output()
try:
path = self.folder_selector.selected_path
folder = self.folder_name.value
name_Pattern = '{sat}_{system_date}_{imgType}'
date_pattern = 'YYYY-MM-dd'
extra = dict(sat=imageType, imgType = 'Water')
if self.files_to_download.index == 0:
download_images = clipped_images
extra = dict(sat=imageType, imgType = 'Satellite')
elif self.files_to_download.index == 1:
download_images = water_images
extra = dict(sat=imageType, imgType = 'Water')
elif self.files_to_download.index == 2:
download_images = ee.ImageCollection([water_occurence])
name_Pattern = '{sat}_{start}_{end}_{imgType}'
extra = dict(sat=imageType, imgType = 'Frequency', start=self.start_date.value.strftime("%x"),
end=self.end_date.value.strftime("%x"))
elif self.files_to_download.index == 3:
download_images = depth_maps
extra = dict(sat=imageType, imgType = 'Depth')
else:
download_images = dswe_images
extra = dict(sat=imageType, imgType = 'DSWE')
if self.download_location.index == 0:
task = geetools.batch.Export.imagecollection.toDrive(
collection = download_images,
folder = folder,
region = site.geometry(),
namePattern = name_Pattern,
scale = img_scale,
datePattern=date_pattern,
extra = extra,
verbose=True,
maxPixels = int(1e13))
task
else:
export_image_collection_to_local(download_images,path,name_Pattern,date_pattern,extra,img_scale,region=site)
print('Download complete!!')
except Exception as e:
print(e)
print('Download could not be completed')
def water_frequency(self, b):
with self.feedback:
self.feedback.clear_output()
try:
global water_frequency
global water_occurence
water_occurence = water_images.select('water').reduce(ee.Reducer.sum())
water_frequency = water_occurence.divide(water_images.size()).multiply(100)
Max_Water_Map = waterMasks.select('waterMask').max()
water_frequency = water_frequency.updateMask(Max_Water_Map)
visParams = {'min':0, 'max':100, 'palette': ['orange','yellow','blue','darkblue']}
self.Map.addLayer(water_frequency, visParams, 'Water Frequency')
colors = visParams['palette']
vmin = visParams['min']
vmax = visParams['max']
self.Map.add_colorbar_branca(colors=colors, vmin=vmin, vmax=vmax, layer_name="Water Frequency")
except Exception as e:
print(e)
print('Frequency computation could not be completed')
def get_dates(self, col):
dates = ee.List(col.toList(col.size()).map(lambda img: ee.Image(img).date().format()))
return dates
# Function to count water pixels for each image
def CountWaterPixels(self, img):
count = img.select('waterMask').reduceRegion(ee.Reducer.sum(), site).values().get(0)
return img.set({'pixel_count': count})
def calc_depths(self, b):
with self.feedback:
self.feedback.clear_output()
try:
global depth_maps
global filtered_Water_Images
global depthParams
if self.elevData_options.value =='NED':
demSource = 'USGS/NED'
band = 'elevation'
elif self.elevData_options.value =='SRTM':
demSource = 'USGS/SRTMGL1_003'
band = 'elevation'
else:
demSource = str(self.userDEM.value)
band = 'b1'
dem = ee.Image(demSource).select(band).clip(site)
# get water pixel count per image
countImages = waterMasks.map(self.CountWaterPixels)
# Filter out only images containing water pixels to avoid error in depth estimation
filtered_Water_Images = countImages.filter(ee.Filter.gt('pixel_count', 0))
if self.elev_Methods.value == 'Random Forest':
if self.rf_ee_classifier is not None:
collection_with_depth_variables = waterMasks.map(add_depth_variables)
depth_maps = collection_with_depth_variables.map(RF_Depth_Estimate(self.rf_ee_classifier))
else:
filename = 'ML_models/Landsat_RF_model.sav'
feature_names = ['mod_green','mod_swir1']
loaded_model = pickle.load(open(filename, 'rb'))
trees = ml.rf_to_strings(loaded_model,feature_names)
self.rf_ee_classifier = ml.strings_to_classifier(trees)
collection_with_depth_variables = waterMasks.map(add_depth_variables)
depth_maps = collection_with_depth_variables.map(RF_Depth_Estimate(self.rf_ee_classifier))
elif self.elev_Methods.value == 'Mod_Stumpf':
collection_with_depth_variables = waterMasks.map(add_depth_variables)
depth_maps = collection_with_depth_variables.map(Mod_Stumpf_Depth_Estimate)
elif self.elev_Methods.value == 'Mod_Lyzenga':
collection_with_depth_variables = waterMasks.map(add_depth_variables)
depth_maps = collection_with_depth_variables.map(Mod_Lyzenga_Depth_Estimate)
else:
depth_maps = filtered_Water_Images.map(FwDET_Depth_Estimate(dem))
max_depth_map = depth_maps.select('Depth').max()
maxVal = max_depth_map.reduceRegion(ee.Reducer.max(),site, img_scale).values().get(0).getInfo()
depthParams = {'min':0, 'max':round(maxVal,1), 'palette': ['1400f7','00f4e8','f4f000','f40000','960424']}
#['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']
self.Map.addLayer(max_depth_map, depthParams, 'Depth')
colors = depthParams['palette']
self.Map.add_colorbar_branca(colors=colors, vmin=0, vmax=round(maxVal,1), layer_name='Depth')
self.depth_plot_button.disabled = False # enable depth plotting
except Exception as e:
print(e)
def plot_depths(self, b):
with self.feedback:
self.feedback.clear_output()
try:
global depths_df
global save_water_data
save_water_data = False
if self.point_preference.index == 0:
point = ee.FeatureCollection(self.Map.draw_last_feature)
else:
coordinates = self.coordinates_textbox.value
xy = coordinates.split(',')
floated_xy = [float(i) for i in xy]
point = ee.Geometry.Point(floated_xy)
self.Map.addLayer(point, {}, 'Depth Point')
ts_1 = depth_maps.getTimeSeriesByRegion(geometry = point,
bands = ['Depth'],
reducer = [ee.Reducer.mean()],
scale = img_scale)
depths_df = geemap.ee_to_pandas(ts_1)
depths_df[depths_df == -9999] = np.nan
depths_df = depths_df.fillna(0)
depths_df['date'] = pd.to_datetime(depths_df['date'],infer_datetime_format = True)
self.fig.data = []
self.fig.add_trace(go.Scatter(x=depths_df['date'], y=depths_df['Depth'], name='Depth Hydrograph',
mode='lines+markers', line=dict(dash = 'solid', color ='Red', width = 0.5)))
self.fig.layout.yaxis.title = '<b>Depth (m)<b>'
self.fig.layout.title = '<b>Water Depth Hydrograph<b>'
self.fig.layout.titlefont = dict(family="Arial",size=24)
self.fig.layout.title.x = 0.5
self.fig.layout.title.y = 0.9
scatter = self.fig.data[0] # set figure data to scatter for click function
max_Depth_value = depths_df['Depth'].max()
min_Depth_value = depths_df['Depth'].min()
avg_Depth_value = depths_df['Depth'].mean()
self.lbl_Max_Depth.value = str(round(max_Depth_value, 3))
self.lbl_Min_Depth.value = str(round(min_Depth_value, 3))
self.lbl_Avg_Depth.value = str(round(avg_Depth_value, 3))
color_palette = self.index_color.value
# Function to select and show water image on clicking the graph
def update_point(trace, points, selector):
global wImage
global selected_sat
date = depths_df['date'].iloc[points.point_inds].values[0]
date = pd.to_datetime(str(date))
selected_image = depth_maps.closest(date)
wImage = selected_image.select('waterMask')
# selected_sat = clipped_images.closest(date).first()
depthImage = selected_image.select('Depth')
self.Map.addLayer(selected_image, visParams, imageType)
self.Map.addLayer(wImage, {'palette': color_palette}, 'Water')
self.Map.addLayer(depthImage, depthParams, 'Depth')
scatter.on_click(update_point)
except Exception as e:
print(e)
print('Please draw a point or enter coordinates')
|
77141
|
from __future__ import absolute_import, division, print_function, unicode_literals
import fractions
import math
from six.moves import xrange
# http://stackoverflow.com/questions/4798654/modular-multiplicative-inverse-function-in-python
# from https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
def egcd(a, b):
x, y, u, v = 0, 1, 1, 0
while a:
q, r = b // a, b % a
m, n = x - u*q, y - v*q
b, a, x, y, u, v = a, r, u, v, m, n
return b, x, y
def modinv(a, m):
g, x, y = egcd(a, m)
if g == 1:
return x % m
raise Exception('modular inverse does not exist')
def largest_invertible(x):
"""In the ring Mod(x), returns the invertible number nearest to x / 2, and
its inverse."""
if x >= 5:
for i in xrange(int(x / 2), 1, -1):
try:
ii = (i if i < (x / 2) else x - i)
return ii, modinv(ii, x)
except:
pass
return 1, 1
|
77165
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from app import errors
if TYPE_CHECKING:
from app.database.database import Database
class Autoredeem:
def __init__(self, db: "Database"):
self.db = db
async def get(
self, guild_id: int, user_id: int
) -> Optional[Dict[str, Any]]:
return await self.db.fetchrow(
"""SELECT * FROM autoredeem
WHERE guild_id=$1 AND user_id=$2""",
guild_id,
user_id,
)
async def find_valid(self, guild_id: int) -> List[Dict[str, Any]]:
return await self.db.fetch(
"""SELECT * FROM autoredeem
WHERE guild_id=$1
AND EXISTS (
SELECT * FROM users
WHERE id=user_id
AND credits >= 3
)
ORDER BY enabled_on DESC""",
guild_id,
)
async def get_user_guilds(
self,
user_id: int,
) -> List[Dict[str, Any]]:
return await self.db.fetch(
"""SELECT * FROM autoredeem WHERE user_id=$1""",
user_id,
)
async def create(self, guild_id: int, user_id: int):
if await self.get(guild_id, user_id):
raise errors.AutoRedeemAlreadyOn()
await self.db.execute(
"""INSERT INTO autoredeem (guild_id, user_id)
VALUES ($1, $2)""",
guild_id,
user_id,
)
async def delete(self, guild_id: int, user_id: int):
await self.db.execute(
"""DELETE FROM autoredeem
WHERE guild_id=$1
AND user_id=$2""",
guild_id,
user_id,
)
|
77192
|
import os,sys
from scipy.stats.stats import pearsonr
import numpy as np
try:
bacteriaF = sys.argv[1]
phageF = sys.argv[2]
except:
sys.exit(sys.argv[0] + " <bacterial file> <phage file>")
bact={}
with open(bacteriaF, 'r') as bin:
l=bin.readline()
bactheaders = l.strip().split("\t")
for l in bin:
p=l.strip().split("\t")
taxonomy=p.pop()
bact[p[0]]=map(float, p[1:])
phage={}
with open(phageF, 'r') as bin:
l=bin.readline()
phageheaders = l.strip().split("\t")
# check that the columns are in the same order (hopefully sorted?)
reorderCols = False
for i in xrange(len(phageheaders)):
if phageheaders[i] != bactheaders[i]:
reorderCols = True
colorder=[]
if reorderCols:
for i in xrange(len(bactheaders)):
if bactheaders[i] not in phageheaders:
sys.exit('FATAL column ' + bactheaders[i] + ' was not found in the phages')
colorder.append(phageheaders.index(bactheaders[i]))
for l in bin:
p=l.strip().split("\t")
taxonomy=p.pop()
if reorderCols:
temp=[]
for i in xrange(colorder):
temp[i]=p[colorder[i]]
p=temp
phage[p[0]]=map(float, p[1:])
allbact = bact.keys()
allbact.sort()
sys.stderr.write("Found " + str(len(phage)) + " phages\n")
sys.stderr.write("Found " + str(len(allbact)) + " bacteria\n")
## calculate pearson correlations
with open("pearson_ncnc.tsv", 'w') as out:
out.write("Phage\tBacteria\tDistance\n")
for ph in phage:
for ba in allbact:
pearson, p = pearsonr(phage[ph], bact[ba])
if pearson == np.nan or str(pearson)=="nan":
pearson = 0
out.write(ph + "\t" + ba + "\t" + str(pearson) + "\n")
|
77218
|
import sys
sys.path.append('../')
import tensorflow as tf
from google.cloud import storage
from tempfile import TemporaryDirectory
import os
from mreserve.lowercase_encoder import get_encoder
import argparse
from PIL import Image
import numpy as np
from io import BytesIO
import random
encoder = get_encoder()
class GCSTFRecordWriter(object):
def __init__(self, fn, auto_close=False, options=None):
"""
Shuffle things in the shuffle buffer and write to tfrecords
If buffer_size == 0 then no shuffling
:param fn:
:param buffer_size:
"""
self.fn = fn
if fn.startswith('gs://'):
self.gclient = storage.Client()
self.storage_dir = TemporaryDirectory()
self.writer = tf.io.TFRecordWriter(os.path.join(self.storage_dir.name, 'temp.tfrecord'), options=options)
self.bucket_name, self.file_name = self.fn.split('gs://', 1)[1].split('/', 1)
else:
self.gclient = None
self.bucket_name = None
self.file_name = None
self.storage_dir = None
self.writer = tf.io.TFRecordWriter(fn, options=options)
self.auto_close=auto_close
def write(self, x):
self.writer.write(x)
def close(self):
self.writer.close()
if self.gclient is not None:
print("UPLOADING!!!!!", flush=True)
bucket = self.gclient.get_bucket(self.bucket_name)
blob = bucket.blob(self.file_name)
blob.upload_from_filename(os.path.join(self.storage_dir.name, 'temp.tfrecord'))
self.storage_dir.cleanup()
def __enter__(self):
# Called when entering "with" context.
return self
def __exit__(self, *_):
# Called when exiting "with" context.
# Upload shit
if self.auto_close:
print("CALLING CLOSE")
self.close()
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def get_size_for_resize(image_size, shorter_size_trg=384, longer_size_max=512):
"""
Gets a new size for the image. We will try to make it such that the bigger size is less than
longer_size_max. However, we won't resize it if its shortest side is <= shorter_size_trg.
:param image_size:
:param shorter_size_trg:
:param longer_size_max:
:return:
"""
w, h = image_size
size = shorter_size_trg # Try [size, size]
if min(w, h) <= size:
return w, h
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > longer_size_max:
size = int(round(longer_size_max * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return w, h
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return ow, oh
def resize_image(image, shorter_size_trg=384, longer_size_max=512):
"""
Resize image such that the longer size is <= longer_size_max.
Gets a new size for the image. We will try to make it such that the bigger size is less than
longer_size_max. However, we won't resize it if its shortest side is <= shorter_size_trg.
:param image:
:param shorter_size_trg:
:param longer_size_max:
"""
trg_size = get_size_for_resize(image.size, shorter_size_trg=shorter_size_trg,
longer_size_max=longer_size_max)
if trg_size != image.size:
return image.resize(trg_size, resample=Image.BICUBIC)
return image
def pil_image_to_jpgstring(image: Image, quality=95):
"""
:param image: PIL image
:return: it, as a jpg string
"""
with BytesIO() as output:
image.save(output, format='JPEG', quality=quality, optimize=True)
return output.getvalue()
def create_base_parser():
parser = argparse.ArgumentParser(description='SCRAPE!')
parser.add_argument(
'-fold',
dest='fold',
default=0,
type=int,
help='which fold we are on'
)
parser.add_argument(
'-num_folds',
dest='num_folds',
default=1,
type=int,
help='Number of folds (corresponding to both the number of training files and the number of testing files)',
)
parser.add_argument(
'-seed',
dest='seed',
default=1337,
type=int,
help='which seed to use'
)
parser.add_argument(
'-split',
dest='split',
default='train',
type=str,
help='which split to use'
)
parser.add_argument(
'-base_fn',
dest='base_fn',
default='gs://replace_with_your_path/',
type=str,
help='Base filename to use. You can start this with gs:// and we\'ll put it on google cloud.'
)
return parser
|
77222
|
import os.path as osp
import random
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.core import (ActivityNetLocalization,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, mean_average_precision,
mean_class_accuracy, mmit_mean_average_precision,
pairwise_temporal_iou, top_k_accuracy)
from mmaction.core.evaluation.ava_utils import ava_eval
def gt_confusion_matrix(gt_labels, pred_labels, normalize=None):
"""Calculate the ground truth confusion matrix."""
max_index = max(max(gt_labels), max(pred_labels))
confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64)
for gt, pred in zip(gt_labels, pred_labels):
confusion_mat[gt][pred] += 1
del_index = []
for i in range(max_index):
if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0:
del_index.append(i)
confusion_mat = np.delete(confusion_mat, del_index, axis=0)
confusion_mat = np.delete(confusion_mat, del_index, axis=1)
if normalize is not None:
confusion_mat = np.array(confusion_mat, dtype=np.float)
m, n = confusion_mat.shape
if normalize == 'true':
for i in range(m):
s = np.sum(confusion_mat[i], dtype=float)
if s == 0:
continue
confusion_mat[i, :] = confusion_mat[i, :] / s
print(confusion_mat[i, :])
elif normalize == 'pred':
for i in range(n):
s = sum(confusion_mat[:, i])
if s == 0:
continue
confusion_mat[:, i] = confusion_mat[:, i] / s
elif normalize == 'all':
s = np.sum(confusion_mat)
if s != 0:
confusion_mat /= s
return confusion_mat
def test_activitynet_localization():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_localization'))
gt_path = osp.join(data_prefix, 'gt.json')
result_path = osp.join(data_prefix, 'result.json')
localization = ActivityNetLocalization(gt_path, result_path)
results = localization.evaluate()
mAP = np.array([
0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222,
0.52083333, 0.52083333, 0.52083333, 0.5
])
average_mAP = 0.6177579365079365
assert_array_almost_equal(results[0], mAP)
assert_array_almost_equal(results[1], average_mAP)
def test_ava_detection():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_detection'))
gt_path = osp.join(data_prefix, 'gt.csv')
result_path = osp.join(data_prefix, 'pred.csv')
label_map = osp.join(data_prefix, 'action_list.txt')
# eval bbox
detection = ava_eval(result_path, 'mAP', label_map, gt_path, None)
assert_array_almost_equal(detection['[email protected]'], 0.09385522)
def test_confusion_matrix():
# custom confusion_matrix
gt_labels = [np.int64(random.randint(0, 9)) for _ in range(100)]
pred_labels = np.random.randint(10, size=100, dtype=np.int64)
for normalize in [None, 'true', 'pred', 'all']:
cf_mat = confusion_matrix(pred_labels, gt_labels, normalize)
gt_cf_mat = gt_confusion_matrix(gt_labels, pred_labels, normalize)
assert_array_equal(cf_mat, gt_cf_mat)
with pytest.raises(ValueError):
# normalize must be in ['true', 'pred', 'all', None]
confusion_matrix([1], [1], 'unsupport')
with pytest.raises(TypeError):
# y_pred must be list or np.ndarray
confusion_matrix(0.5, [1])
with pytest.raises(TypeError):
# y_real must be list or np.ndarray
confusion_matrix([1], 0.5)
with pytest.raises(TypeError):
# y_pred dtype must be np.int64
confusion_matrix([0.5], [1])
with pytest.raises(TypeError):
# y_real dtype must be np.int64
confusion_matrix([1], [0.5])
def test_topk():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# top1 acc
k = (1, )
top1_labels_0 = [3, 1, 1, 1]
top1_labels_25 = [2, 0, 4, 3]
top1_labels_50 = [2, 2, 3, 1]
top1_labels_75 = [2, 2, 2, 3]
top1_labels_100 = [2, 2, 2, 4]
res = top_k_accuracy(scores, top1_labels_0, k)
assert res == [0]
res = top_k_accuracy(scores, top1_labels_25, k)
assert res == [0.25]
res = top_k_accuracy(scores, top1_labels_50, k)
assert res == [0.5]
res = top_k_accuracy(scores, top1_labels_75, k)
assert res == [0.75]
res = top_k_accuracy(scores, top1_labels_100, k)
assert res == [1.0]
# top1 acc, top2 acc
k = (1, 2)
top2_labels_0_100 = [3, 1, 1, 1]
top2_labels_25_75 = [3, 1, 2, 3]
res = top_k_accuracy(scores, top2_labels_0_100, k)
assert res == [0, 1.0]
res = top_k_accuracy(scores, top2_labels_25_75, k)
assert res == [0.25, 0.75]
# top1 acc, top3 acc, top5 acc
k = (1, 3, 5)
top5_labels_0_0_100 = [1, 0, 3, 2]
top5_labels_0_50_100 = [1, 3, 4, 0]
top5_labels_25_75_100 = [2, 3, 0, 2]
res = top_k_accuracy(scores, top5_labels_0_0_100, k)
assert res == [0, 0, 1.0]
res = top_k_accuracy(scores, top5_labels_0_50_100, k)
assert res == [0, 0.5, 1.0]
res = top_k_accuracy(scores, top5_labels_25_75_100, k)
assert res == [0.25, 0.75, 1.0]
def test_mean_class_accuracy():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# test mean class accuracy in [0, 0.25, 1/3, 0.75, 1.0]
mean_cls_acc_0 = np.int64([1, 4, 0, 2])
mean_cls_acc_25 = np.int64([2, 0, 4, 3])
mean_cls_acc_33 = np.int64([2, 2, 2, 3])
mean_cls_acc_75 = np.int64([4, 2, 2, 4])
mean_cls_acc_100 = np.int64([2, 2, 2, 4])
assert mean_class_accuracy(scores, mean_cls_acc_0) == 0
assert mean_class_accuracy(scores, mean_cls_acc_25) == 0.25
assert mean_class_accuracy(scores, mean_cls_acc_33) == 1 / 3
assert mean_class_accuracy(scores, mean_cls_acc_75) == 0.75
assert mean_class_accuracy(scores, mean_cls_acc_100) == 1.0
def test_mmit_mean_average_precision():
# One sample
y_true = [np.array([0, 0, 1, 1])]
y_scores = [np.array([0.1, 0.4, 0.35, 0.8])]
map = mmit_mean_average_precision(y_scores, y_true)
precision = [2.0 / 3.0, 0.5, 1., 1.]
recall = [1., 0.5, 0.5, 0.]
target = -np.sum(np.diff(recall) * np.array(precision)[:-1])
assert target == map
def test_pairwise_temporal_iou():
target_segments = np.array([])
candidate_segments = np.array([])
with pytest.raises(ValueError):
pairwise_temporal_iou(target_segments, candidate_segments)
# test temporal iou
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou = pairwise_temporal_iou(candidate_segments, target_segments)
assert_array_equal(temporal_iou, [[0, 0], [1, 0.5]])
# test temporal overlap_self
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [[0, 0], [1, 1]])
# test temporal overlap_self when candidate_segments is 1d
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([2.5, 3])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [0, 1])
def test_average_recall_at_avg_proposals():
ground_truth1 = {
'v_test1': np.array([[0, 1], [1, 2]]),
'v_test2': np.array([[0, 1], [1, 2]])
}
ground_truth2 = {'v_test1': np.array([[0, 1]])}
proposals1 = {
'v_test1': np.array([[0, 1, 1], [1, 2, 1]]),
'v_test2': np.array([[0, 1, 1], [1, 2, 1]])
}
proposals2 = {
'v_test1': np.array([[10, 11, 0.6], [11, 12, 0.4]]),
'v_test2': np.array([[10, 11, 0.6], [11, 12, 0.4]])
}
proposals3 = {
'v_test1': np.array([[i, i + 1, 1 / (i + 1)] for i in range(100)])
}
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals1, 4))
assert_array_equal(recall, [[0.] * 49 + [0.5] * 50 + [1.]] * 10)
assert_array_equal(avg_recall, [0.] * 49 + [0.5] * 50 + [1.])
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 25.5
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals2, 4))
assert_array_equal(recall, [[0.] * 100] * 10)
assert_array_equal(avg_recall, [0.] * 100)
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 0
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth2, proposals3, 100))
assert_array_equal(recall, [[1.] * 100] * 10)
assert_array_equal(avg_recall, ([1.] * 100))
assert_array_almost_equal(
proposals_per_video, np.arange(1, 101, 1), decimal=10)
assert auc == 99.0
def test_get_weighted_score():
score_a = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
score_b = [
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]),
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])
]
weighted_score = get_weighted_score([score_a], [1])
assert np.all(np.isclose(np.array(score_a), np.array(weighted_score)))
coeff_a, coeff_b = 2., 1.
weighted_score = get_weighted_score([score_a, score_b], [coeff_a, coeff_b])
ground_truth = [
x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b)
]
assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score)))
def test_mean_average_precision():
def content_for_unittest(scores, labels, result):
gt = mean_average_precision(scores, labels)
assert gt == result
scores = [
np.array([0.1, 0.2, 0.3, 0.4]),
np.array([0.2, 0.3, 0.4, 0.1]),
np.array([0.3, 0.4, 0.1, 0.2]),
np.array([0.4, 0.1, 0.2, 0.3])
]
label1 = np.array([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]])
result1 = 2 / 3
label2 = np.array([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]])
result2 = np.mean([0.5, 0.5833333333333333, 0.8055555555555556, 1.0])
content_for_unittest(scores, label1, result1)
content_for_unittest(scores, label2, result2)
|
77224
|
import numpy as np
import sys
import os
import pytest
from knnFeat import _distance
sys.path.append(os.getcwd())
@pytest.mark.success
def test_distance():
a = np.array([0, 0])
b = np.array([3, 4])
expected = _distance(a, b)
actual = 5
assert expected == actual
|
77292
|
class BaseSubmission:
def __init__(self, team_name, player_names):
self.team_name = team_name
self.player_names = player_names
def get_actions(self, obs):
'''
Overview:
You must implement this function.
'''
raise NotImplementedError
|
77404
|
from plex_database.core import db
from plex_database.models.directory import Directory
from plex_database.models.media_item import MediaItem
from peewee import *
class MediaPart(Model):
class Meta:
database = db
db_table = 'media_parts'
media_item = ForeignKeyField(MediaItem, null=True, related_name='media_parts')
directory = ForeignKeyField(Directory, null=True, related_name='media_parts')
hash = CharField(null=True)
open_subtitle_hash = CharField(null=True)
file = CharField(null=True)
index = IntegerField(null=True)
size = BigIntegerField(null=True)
duration = IntegerField(null=True)
created_at = DateTimeField(null=True)
updated_at = DateTimeField(null=True)
deleted_at = DateTimeField(null=True)
extra_data = CharField(null=True)
|
77487
|
import numpy as np
import time
import math
# from cassie_env import CassieEnv
from cassiemujoco import *
from trajectory.trajectory import CassieTrajectory
import matplotlib.pyplot as plt
from matplotlib import style
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
def visualise_sim_graph(file_path, freq_of_sim):
traj = np.load(file_path)
# env = CassieEnv("walking")
# csim = CassieSim("./cassie/cassiemujoco/cassie.xml")
# vis = CassieVis(csim, "./cassie/cassiemujoco/cassie.xml")
u = pd_in_t()
# pelvisXYZ = traj.f.qpos_replay[:, 0:3]
# render_state = vis.draw(csim)
# saved_time = traj.f.time[:]
#################Graphing###########
log_time = traj.f.time[:]
y_val = traj.f.qpos_replay[:,2] #z - height
x_data= log_time
y_data = y_val
delt_x = (x_data[1] - x_data[0]) * 1000 #convert seconds to ms
num_frames = math.ceil(len(x_data) / 10)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
output = plt.plot([])
plt.close()
print(output[0])
x = np.linspace(0,2*np.pi, 100)
fig = plt.figure()
lines = plt.plot([])
line = lines[0]
#other setup //set x and y lims
plt.xlim(x_data.min(), x_data.max())
plt.ylim(y_data.min(), y_data.max())
def animate(frame):
#update
x = x_data[:frame*10]
y = y_data[:frame*10]
# y = np.sin(x + 2*np.pi * frame/100)
line.set_data((x,y))
anim = FuncAnimation(fig, animate, frames=num_frames, interval=(1/freq_of_sim * 1000 + (10 * delt_x))) #20 is 50 fps
anim.save('lines.mp4', writer=writer)
# html = display.HTML(video)
# display.display(html)
plt.close()
visualise_sim_graph("./outfile8.npz", 30)
|
77540
|
import pytest
from wemake_python_styleguide.violations.complexity import (
TooDeepAccessViolation,
)
from wemake_python_styleguide.visitors.ast.complexity.access import (
AccessVisitor,
)
# boundary expressions
subscript_access = 'my_matrix[0][0][0][0]'
attribute_access = 'self.attr.inner.wrapper.value'
mixed_access = 'self.attr[0].wrapper[0]'
mixed_with_calls_access = 'self.attr[0]().wrapper[0][0].bar().foo[0]()'
# correct expressions
call_chain = 'manager.filter().exclude().annotate().values().first()'
# incorrect expressions
deep_access = 'self.some.other.attr().first.second.third.fourth.boom'
@pytest.mark.parametrize('code', [
subscript_access,
attribute_access,
mixed_access,
mixed_with_calls_access,
call_chain,
])
def test_correct_access(
assert_errors,
parse_ast_tree,
code,
options,
mode,
):
"""Testing that expressions with correct access level work well."""
tree = parse_ast_tree(mode(code))
option_values = options(max_access_level=4)
visitor = AccessVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize(('code', 'access_level'), [
(subscript_access, 4),
(attribute_access, 4),
(mixed_access, 4),
(mixed_with_calls_access, 4),
(deep_access, 5),
])
def test_incorrect_access(
assert_errors,
assert_error_text,
parse_ast_tree,
code,
access_level,
options,
mode,
):
"""Testing that violations are raised when reaching too deep access."""
tree = parse_ast_tree(mode(code))
option_values = options(max_access_level=3)
visitor = AccessVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [TooDeepAccessViolation])
assert_error_text(
visitor,
access_level,
option_values.max_access_level,
)
|
77615
|
import logging
import numpy as np
from numpy.linalg import norm
from scipy.stats import moment
from scipy.special import cbrt
def common_usr(molecule, ctd=None, cst=None, fct=None, ftf=None, atoms_type=None):
"""Function used in USR and USRCAT function
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USR shape descriptor
ctd : numpy array or None (default = None)
Coordinates of the molecular centroid
If 'None', the point is calculated
cst : numpy array or None (default = None)
Coordinates of the closest atom to the molecular centroid
If 'None', the point is calculated
fct : numpy array or None (default = None)
Coordinates of the farthest atom to the molecular centroid
If 'None', the point is calculated
ftf : numpy array or None (default = None)
Coordinates of the farthest atom
to the farthest atom to the molecular centroid
If 'None', the point is calculated
atoms_type : str or None (default None)
Type of atoms to be selected from atom_dict
If 'None', all atoms are used to calculate shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (12)
Array describing shape of molecule
"""
if atoms_type is None:
atoms = molecule.atom_dict['coords']
else:
if atoms_type == 'ishydrophobe':
mask = (molecule.atom_dict['ishalogen'] |
molecule.atom_dict['ishydrophobe'] |
(molecule.atom_dict['atomicnum'] == 16))
else:
mask = molecule.atom_dict[atoms_type]
atoms = molecule.atom_dict[mask]['coords']
if len(atoms) == 0:
return np.zeros(12), ((0., 0., 0.),) * 4
if ctd is None:
ctd = atoms.mean(0)
distances_ctd = norm(atoms - ctd, axis=1)
if cst is None:
cst = atoms[distances_ctd.argmin()]
distances_cst = norm(atoms - cst, axis=1)
if fct is None:
fct = atoms[distances_ctd.argmax()]
distances_fct = norm(atoms - fct, axis=1)
if ftf is None:
ftf = atoms[distances_fct.argmax()]
distances_ftf = norm(atoms - ftf, axis=1)
distances_list = [distances_ctd, distances_cst, distances_fct, distances_ftf]
shape_descriptor = np.zeros(12)
for i, distances in enumerate(distances_list):
shape_descriptor[i * 3 + 0] = np.mean(distances)
shape_descriptor[i * 3 + 1] = np.var(distances)
shape_descriptor[i * 3 + 2] = moment(distances, moment=3)
return shape_descriptor, (ctd, cst, fct, ftf)
def usr(molecule):
"""Computes USR shape descriptor based on
<NAME>, <NAME> (2007). Ultrafast shape recognition to search
compound databases for similar molecular shapes. Journal of
computational chemistry, 28(10):1711-23.
http://dx.doi.org/10.1002/jcc.20681
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USR shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (12)
Array describing shape of molecule
"""
return common_usr(molecule)[0]
def usr_cat(molecule):
"""Computes USRCAT shape descriptor based on
<NAME>, <NAME> (2012). USRCAT: real-time ultrafast
shape recognition with pharmacophoric constraints. Journal of
Cheminformatics, 2012 4:27.
http://dx.doi.org/10.1186/1758-2946-4-27
Parameters
----------
molecule : oddt.toolkit.Molecule
Molecule to compute USRCAT shape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (60)
Array describing shape of molecule
"""
all_atoms_shape, points = common_usr(molecule)
ctd, cst, fct, ftf = points
hydrophobic_shape = common_usr(
molecule, ctd, cst, fct, ftf, 'ishydrophobe')[0]
aromatic_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isaromatic')[0]
acceptor_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isacceptor')[0]
donor_shape = common_usr(molecule, ctd, cst, fct, ftf, 'isdonor')[0]
cat_shape = np.hstack((all_atoms_shape, hydrophobic_shape,
aromatic_shape, acceptor_shape, donor_shape))
return np.nan_to_num(cat_shape)
def electroshape(mol):
"""Computes shape descriptor based on
<NAME> al. ElectroShape: fast molecular similarity
calculations incorporating shape, chirality and electrostatics.
J Comput Aided Mol Des 24, 789-801 (2010).
http://dx.doi.org/doi:10.1007/s10822-010-9374-0
Aside from spatial coordinates, atoms' charges are also used
as the fourth dimension to describe shape of the molecule.
Parameters
----------
mol : oddt.toolkit.Molecule
Molecule to compute Electroshape descriptor
Returns
-------
shape_descriptor : numpy array, shape = (15)
Array describing shape of molecule
"""
if (mol.atom_dict['coords'] == 0).all():
raise Exception('Molecule needs 3D coordinates')
if (mol.atom_dict['charge'] == 0).all():
logging.warning('All partial charges are zero. ElectroShape strongly relies on them.')
if np.isnan(mol.atom_dict['charge']).any():
logging.warning('Nan values in charge values of molecule ' + mol.title)
charge = np.nan_to_num(mol.atom_dict['charge'])
mi = 25 # scaling factor converting electron charges to Angstroms
four_dimensions = np.column_stack((mol.atom_dict['coords'], charge * mi))
c1 = four_dimensions.mean(0) # geometric centre of the molecule
distances_c1 = norm(four_dimensions - c1, axis=1)
c2 = four_dimensions[distances_c1.argmax()] # atom position furthest from c1
distances_c2 = norm(four_dimensions - c2, axis=1)
c3 = four_dimensions[distances_c2.argmax()] # atom position furthest from c2
distances_c3 = norm(four_dimensions - c3, axis=1)
vector_a = c2 - c1
vector_b = c3 - c1
vector_as = vector_a[:3] # spatial parts of these vectors -
vector_bs = vector_b[:3] # the first three coordinates
vector_c = ((norm(vector_a) /
(2 * norm(np.cross(vector_as, vector_bs))))
* np.cross(vector_as, vector_bs))
vector_c1s = c1[:3]
max_charge = np.array(np.amax(charge) * mi)
min_charge = np.array(np.amin(charge) * mi)
c4 = np.append(vector_c1s + vector_c, max_charge)
c5 = np.append(vector_c1s + vector_c, min_charge)
distances_c4 = norm(four_dimensions - c4, axis=1)
distances_c5 = norm(four_dimensions - c5, axis=1)
distances_list = [distances_c1, distances_c2, distances_c3,
distances_c4, distances_c5]
shape_descriptor = np.zeros(15)
i = 0
for distances in distances_list:
mean = np.mean(distances)
shape_descriptor[0 + i] = mean
shape_descriptor[1 + i] = np.std(distances)
shape_descriptor[2 + i] = cbrt(np.sum(((distances - mean) ** 3) / distances.size))
i += 3
return shape_descriptor
def usr_similarity(mol1_shape, mol2_shape, ow=1., hw=1., rw=1., aw=1., dw=1.):
"""Computes similarity between molecules
Parameters
----------
mol1_shape : numpy array
USR shape descriptor
mol2_shape : numpy array
USR shape descriptor
ow : float (default = 1.)
Scaling factor for all atoms
Only used for USRCAT, ignored for other types
hw : float (default = 1.)
Scaling factor for hydrophobic atoms
Only used for USRCAT, ignored for other types
rw : float (default = 1.)
Scaling factor for aromatic atoms
Only used for USRCAT, ignored for other types
aw : float (default = 1.)
Scaling factor for acceptors
Only used for USRCAT, ignored for other types
dw : float (default = 1.)
Scaling factor for donors
Only used for USRCAT, ignored for other types
Returns
-------
similarity : float from 0 to 1
Similarity between shapes of molecules,
1 indicates identical molecules
"""
if mol1_shape.shape[0] == 12 and mol2_shape.shape[0] == 12:
sim = 1. / (1. + (1. / 12) * np.sum(np.fabs(mol1_shape - mol2_shape)))
elif mol1_shape.shape[0] == 60 and mol2_shape.shape[0] == 60:
w = np.array([ow, hw, rw, aw, dw])
# Normalize weights
w = w / w.sum()
shape_diff = np.abs(mol1_shape - mol2_shape).reshape(-1, 12)
sim = 1. / (1 + (w * (1. / 12) * shape_diff.sum(axis=1)).sum())
elif mol1_shape.shape[0] == 15 and mol2_shape.shape[0] == 15:
sim = 1. / (1 + (1. / 15) * np.sum(np.fabs(mol1_shape - mol2_shape)))
else:
raise Exception('Given vectors are not valid USR shape descriptors '
'or come from different methods. Correct vector lengths'
'are: 12 for USR, 60 for USRCAT, 15 for Electroshape')
return sim
|
77653
|
import ui, console
import os
import math
def save_action(sender):
with open('image_file.png', 'wb') as fp:
fp.write(img.to_png())
console.hud_alert('image saved in the file image_file.png')
def showimage_action(sender):
img.show()
def make_polygon(num_sides, x=0, y=0, radius=100, phase=0, line_width=5):
path = ui.Path()
path.move_to(x,y)
path.line_width = line_width
for i in range(num_sides):
t = 2*math.pi*i/num_sides
x1, y1 = radius+radius*math.cos(t+phase), radius+radius*math.sin(t+phase)
if i:
path.line_to(x+x1, y+y1)
else:
path.move_to(x+x1,y+y1)
path.close()
return path
def create_image():
img = None
with ui.ImageContext(500, 500) as ctx:
ui.Image('test:Mandrill').draw(0,0,500,500)
path = make_polygon(6, 20,20, 225, math.pi/2)
rect = ui.Path.rect(0,0,500,500)
path.append_path(rect)
path.eo_fill_rule = True
path.add_clip()
ui.set_color('lightgreen')
rect.fill()
img = ctx.get_image()
return img
img = create_image()
#img.show()
main_view = ui.View(frame=(0,0,500,500))
imgview = ui.ImageView(frame=(0,0,500,500))
imgview.image = img
main_view.add_subview(imgview)
save_button = ui.ButtonItem()
save_button.title = 'Save'
save_button.action = save_action
show_button = ui.ButtonItem()
show_button.title = 'Show'
show_button.action = showimage_action
main_view.right_button_items = [save_button, show_button]
main_view.present('sheet')
|
77675
|
import torch
from .. import utils
MODULE = torch
FP16_FUNCS = [
# Low level functions wrapped by torch.nn layers.
# The wrapper layers contain the weights which are then passed in as a parameter
# to these functions.
'conv1d',
'conv2d',
'conv3d',
'conv_transpose1d',
'conv_transpose2d',
'conv_transpose3d',
'conv_tbc',
'prelu',
# BLAS
'addmm',
'addmv',
'addr',
'matmul',
'mm',
'mv',
]
FP32_FUNCS = [
# Pointwise
'acos',
'asin',
'cosh',
'erfinv',
'exp',
'expm1',
'log',
'log10',
'log2',
'reciprocal',
'rsqrt',
'sinh',
'tan',
# Other math
'pow',
# Reduction
'cumprod',
'cumsum',
'dist',
# 'mean',
'norm',
'prod',
'std',
'sum',
'var',
# Misc
'renorm'
]
version_strings = torch.__version__.split('.')
version_major = version_strings[0]
version_minor = version_strings[1]
version_num = float(version_major + "." + version_minor)
# Before torch 1.1, mean must be blacklisted.
if version_num < 1.1:
FP32_FUNCS.append('mean')
# Before CUDA 9.1, batched matmul was missing fast FP16 kernels. We
# check the CUDA version -- if at least 9.1, then put the bmm
# functions on the fp16 list. Otherwise, put them on the fp32 list.
_bmms = ['addbmm',
'baddbmm',
'bmm']
if utils.is_cuda_enabled():
# workaround https://github.com/facebookresearch/maskrcnn-benchmark/issues/802
if utils.get_cuda_version() >= (9, 1, 0):
FP16_FUNCS.extend(_bmms)
else:
FP32_FUNCS.extend(_bmms)
# Multi-tensor fns that may need type promotion
CASTS = [
# Multi-tensor math
'addcdiv',
'addcmul',
'atan2',
'cross',
'bilinear',
'dot',
# Element-wise _or_ tensor-wise math
'add',
'div',
'mul',
# Comparison
'eq',
'equal',
'ge',
'gt',
'le',
'lt',
'ne'
]
# Functions that take sequence arguments. We need to inspect the whole
# sequence and cast to the widest type.
SEQUENCE_CASTS = [
'cat',
'stack'
]
|
77712
|
import re
import uuid
from subprocess import run
from tempfile import NamedTemporaryFile
from typing import List, Optional
import conda_pack
import yaml
from ...utils import logger
from ..constants import MLServerEnvDeps, MLServerRuntimeEnvDeps
from ..metadata import ModelFramework
def _get_env(conda_env_file_path: str = None, env_name: str = None, platform: ModelFramework = None) -> dict:
if conda_env_file_path:
with open(conda_env_file_path) as file:
logger.info(f"Using found conda env: {conda_env_file_path}")
env = yaml.safe_load(file)
env = _add_required_deps_if_missing(env, platform)
else:
env = _get_environment(env_name=env_name)
env = _add_required_deps_if_missing(env, platform)
return env
def _add_required_deps_if_missing(env: dict, platform: Optional[ModelFramework]) -> dict:
if not _has_required_deps(env, platform):
logger.info(f"conda.yaml does not contain {MLServerEnvDeps}, adding them")
env = _add_required_deps(env, platform)
return env
def save_environment(
conda_pack_file_path: str, conda_env_file_path: str = None, env_name: str = None, platform: ModelFramework = None
) -> None:
if env_name:
# TODO: add mlserver deps here if not present?
_pack_environment(env_name, conda_pack_file_path)
else:
env = _get_env(conda_env_file_path, env_name, platform)
_create_and_pack_environment(env=env, file_path=conda_pack_file_path)
def _get_environment(env_name: str = None) -> dict:
cmd = "conda env export"
if env_name:
cmd += f" --name {env_name}"
proc = run(cmd, shell=True, check=True, capture_output=True)
return yaml.safe_load(proc.stdout)
def _has_required_deps(env: dict, platform: ModelFramework = None) -> bool:
if "dependencies" not in env:
return False
dependencies = env["dependencies"]
pip_deps = _get_pip_deps(dependencies)
if not pip_deps:
return False
deps = _get_mlserver_deps(platform)
for dep in deps:
if _is_dep_not_defined(dep, pip_deps["pip"]):
return False
return True
def _get_mlserver_deps(platform: Optional[ModelFramework]) -> List[str]:
runtime_deps = MLServerRuntimeEnvDeps.get(platform) # type: ignore
deps = MLServerEnvDeps
if runtime_deps:
return deps + runtime_deps
return deps
def _add_required_deps(env: dict, platform: ModelFramework = None) -> dict:
if "dependencies" not in env:
env["dependencies"] = []
dependencies = env["dependencies"]
pip_deps = _get_pip_deps(dependencies)
if not pip_deps:
pip_deps = {"pip": []}
dependencies.append(pip_deps)
deps = _get_mlserver_deps(platform)
for dep in deps:
if _is_dep_not_defined(dep, pip_deps["pip"]):
pip_deps["pip"].append(dep)
return env
def _is_dep_not_defined(dep: str, deps: List[str]) -> bool:
parts = re.split(r"==|>=|<=|~=|!=|>|<|==:", dep)
module = parts[0]
r = re.compile(fr"{module}$|({module}((==|>=|<=|~=|!=|>|<|==:)[0-9]+\.[0-9]+.[0-9]+))")
newlist = list(filter(r.match, deps))
return len(newlist) == 0
def _get_pip_deps(dependencies: dict) -> Optional[dict]:
for dep in dependencies:
if isinstance(dep, dict) and "pip" in dep:
# If entry is a dict, and has a `pip` key that's the one
return dep
return None
def _pack_environment(env_name: str, file_path: str):
logger.info(f"packing conda environment from {env_name} to {file_path}")
# Pack environment
conda_pack.pack(
name=env_name,
output=file_path,
force=True,
verbose=True,
ignore_editable_packages=False,
ignore_missing_files=True,
)
def _create_and_pack_environment(env: dict, file_path: str):
with NamedTemporaryFile(mode="w", suffix=".yml") as file:
# TODO: Save copy of environment.yaml alongside tarball
yaml.safe_dump(env, file)
# Create env
tmp_env_path = file.name
tmp_env_name = f"tempo-{uuid.uuid4()}"
cmd = f"conda env create --name {tmp_env_name} --file {tmp_env_path}"
logger.info("Creating conda env with: %s", cmd)
run(cmd, shell=True, check=True)
try:
_pack_environment(tmp_env_name, file_path)
finally:
# Remove environment
cmd = f"conda remove --name {tmp_env_name} --all --yes"
logger.info("Removing conda env with: %s", cmd)
run(cmd, shell=True, check=True)
|
77716
|
from insightconnect_plugin_runtime.exceptions import PluginException
from json import JSONDecodeError
import requests
from requests.auth import HTTPBasicAuth
from urllib.parse import urlsplit
class EasyVistaApi:
def __init__(self, client_login: dict, account: int, url: str):
self.base_url = f"{self.split_url(url)}/api/v1/{account}/"
self.username = client_login.get("username")
self.password = client_login.<PASSWORD>("password")
def _call_api(self, method: str, endpoint: str, json: dict = None):
response = requests.request(
url=self.base_url + endpoint,
method=method,
json=json,
auth=HTTPBasicAuth(self.username, self.password),
)
if response.status_code == 401:
raise PluginException(preset=PluginException.Preset.USERNAME_PASSWORD)
if response.status_code == 403:
raise PluginException(preset=PluginException.Preset.UNAUTHORIZED)
if response.status_code == 404:
raise PluginException(
cause="No results found. Invalid or unreachable endpoint provided.",
assistance="Please provide valid inputs or verify the endpoint/URL/hostname configured in your plugin"
" connection is correct.",
)
if 400 <= response.status_code < 500:
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=response.text)
if response.status_code >= 500:
raise PluginException(preset=PluginException.Preset.SERVER_ERROR, data=response.text)
try:
return response.json()
except JSONDecodeError:
raise PluginException(preset=PluginException.Preset.INVALID_JSON, data=response.text)
def ticket_action(self, method: str, payload: dict, rfc_number: str = None) -> dict:
if method == "POST":
endpoint = "requests"
else:
endpoint = f"requests/{rfc_number}"
return self.get_reference_number_and_href(self._call_api(method, endpoint, json=payload))
def search_tickets(self, query: str) -> dict:
return self._call_api("GET", f"requests?search={query}")
@staticmethod
def get_reference_number_and_href(response: dict) -> dict:
try:
href = response.get("HREF")
return {"href_hyperlink": href, "reference_number": href.split("/requests/")[1]}
except (AttributeError, IndexError) as e:
raise PluginException(
cause="EasyVista returned unexpected response.",
assistance="Please check that the provided inputs are correct and try again.",
data=e,
)
@staticmethod
def split_url(url: str) -> str:
scheme, netloc, paths, queries, fragments = urlsplit(url.strip())
return f"{scheme}://{netloc}"
|
77751
|
from unittesting import DeferrableTestCase
from GitSavvy.tests.parameterized import parameterized as p
from GitSavvy.core.commands.log_graph import describe_graph_line
examples = [
(
"|",
{},
None
),
(
"● a3062b2 (HEAD -> optimize-graph-render, origin/optimize-graph-render) Abort .. | Thu 21:07, herr kaste",
{"origin"},
{
"commit": "a3062b2",
"HEAD": "optimize-graph-render",
"branches": ["optimize-graph-render", "origin/optimize-graph-render"],
"local_branches": ["optimize-graph-render"]
}
),
(
"● a3062b2 (HEAD, origin/optimize-graph-render) Abort re.. | Thu 21:07, herr kaste",
{"origin"},
{
"commit": "a3062b2",
"HEAD": "a3062b2",
"branches": ["origin/optimize-graph-render"]
}
),
(
"● a3062b2 (HEAD -> optimize-graph-render, feat/optimize-graph-render) Abort .. | Thu 21:07, herr kaste",
{"origin"},
{
"commit": "a3062b2",
"HEAD": "optimize-graph-render",
"branches": ["optimize-graph-render", "feat/optimize-graph-render"],
"local_branches": ["optimize-graph-render", "feat/optimize-graph-render"]
}
),
(
"● ad6d88c (HEAD) Use view from the argument instead of on self | Thu 20:56, herr kaste",
{"origin"},
{
"commit": "ad6d88c",
"HEAD": "ad6d88c",
}
),
(
"● ad6d88c Use view from the argument instead of on self | Thu 20:56, herr kaste",
{"origin"},
{
"commit": "ad6d88c",
}
),
(
"| ● 153dca0 (HEAD, tag: 2.20.0) Merge branch 'dev' (2 months ago) <<NAME>>",
{"origin"},
{
"commit": "153dca0",
"HEAD": "153dca0",
"tags": ["2.20.0"]
}
),
]
class TestDescribeGraphLine(DeferrableTestCase):
@p.expand(examples)
def test_a(self, input_line, remotes, output):
self.assertEqual(output, describe_graph_line(input_line, remotes))
|
77772
|
from __future__ import annotations
from typing import TYPE_CHECKING
import random
from enum import Enum
from configuration import config
from src.genotype.mutagen.option import Option
from src.genotype.neat.gene import Gene
if TYPE_CHECKING:
pass
class NodeType(Enum):
INPUT = 0
HIDDEN = 1
OUTPUT = 2
class Node(Gene):
"""General neat node"""
def __init__(self, id, type: NodeType = NodeType.HIDDEN):
super().__init__(id)
self.node_type: NodeType = type
# TODO
self.lossy_aggregation = Option('lossy', False, True,
current_value=random.choices([False, True], weights=[1-config.lossy_chance,
config.lossy_chance])[0],
mutation_chance=0.3 if config.mutate_lossy_values else 0)
self.try_conv_aggregation = Option('conv_aggregation', False, True, current_value=random.choice([False, True]))
mult_chance = config.element_wise_multiplication_chance
mult_weights = [1-mult_chance, mult_chance]
self.element_wise_multiplication_aggregation = \
Option('element_wise_multiplication_aggregation', False, True, current_value=
random.choices([False, True], weights=mult_weights)[0],
mutation_chance= 0.2 if mult_chance > 0 else 0,
probability_weighting=mult_weights)
def is_output_node(self):
return self.node_type == NodeType.OUTPUT
def is_input_node(self):
return self.node_type == NodeType.INPUT
def get_all_mutagens(self):
return [self.lossy_aggregation, self.try_conv_aggregation]
def convert_node(self, **kwargs):
raise NotImplemented()
|
77781
|
import random
import pygame
from . import map_generator, traps
from .pathfinder import Pathfinder
from .tile import Tile
TILE_SIZE = 16
BARRIER_SIZE = 10
def grid_walk(start, end):
start = list(start)
dx = end[0] - start[0]
dy = end[1] - start[1]
nx = abs(dx)
ny = abs(dy)
sign_x = 1 if dx > 0 else -1
sign_y = 1 if dy > 0 else -1
points = []
ix = iy = 0
points.append((start[0], start[1]))
while (ix < nx) or (iy < ny):
if nx == 0:
iy += 1
start[1] += sign_y
continue
if ny == 0:
ix += 1
start[0] += sign_x
continue
if (0.5 + ix) / nx < (0.5 + iy) / ny:
ix += 1
start[0] += sign_x
else:
iy += 1
start[1] += sign_y
points.append((start[0], start[1]))
return points
class Terrain:
def __init__(self, game):
self.game = game
self.terrain = {}
self.tile_size = TILE_SIZE
self.barrier_size = BARRIER_SIZE
self.size = (20, 20)
self.pixel_size = (self.size[0] * TILE_SIZE, self.size[1] * TILE_SIZE)
self.boundaries = pygame.Rect(0, 0, 2, 2)
self.pathfinder = Pathfinder(self)
self.traps = []
self.trap_density = 0.02
self.trap_types = ["spinning_blades", "spinning_blades", "pit"]
def debug_map(self, overlay_data=[]):
return "\n".join(
[
"".join(
[("=" if v else "%") if (x, y) in overlay_data else ("-" if v else "/") for x, v in enumerate(row)]
)
for y, row in enumerate(self.pathfinding_array)
]
)
def gen_pathfinding_map(self):
x_coords = [t[0] for t in self.terrain]
y_coords = [t[1] for t in self.terrain]
x_tile_boundaries = (min(x_coords), max(x_coords))
y_tile_boundaries = (min(y_coords), max(y_coords))
self.tile_boundaries = (x_tile_boundaries, y_tile_boundaries)
self.pathfinding_array = []
for i in range(y_tile_boundaries[1] - y_tile_boundaries[0] + 1):
self.pathfinding_array.append([1] * (x_tile_boundaries[1] - x_tile_boundaries[0] + 1))
for loc in self.terrain:
for tile in self.terrain[loc]:
if tile.config["solid"]:
path_loc = self.loc_to_path(loc)
self.pathfinding_array[path_loc[1]][path_loc[0]] = 0
self.pathfinder.set_map(self.pathfinding_array)
def sight_line(self, start, end):
start_loc = self.px_to_loc(start)
end_loc = self.px_to_loc(end)
points = grid_walk(start_loc, end_loc)
for p in points:
if p in self.terrain:
for tile in self.terrain[p]:
if tile.config["sight_block"]:
return False
else:
return False
return True
def loc_to_path(self, loc):
return (loc[0] - self.tile_boundaries[0][0], loc[1] - self.tile_boundaries[1][0])
def px_to_loc(self, pos):
loc = (int(pos[0] // self.tile_size), int(pos[1] // self.tile_size))
return loc
def check_tile_solid(self, pos):
loc = self.px_to_loc(pos)
if loc in self.terrain:
for tile in self.terrain[loc]:
if tile.config["solid"]:
return True
return False
def check_tile_hoverable(self, pos):
loc = self.px_to_loc(pos)
if loc in self.terrain:
for tile in self.terrain[loc]:
if not tile.config["hoverable"]:
return False
else:
return False
return True
def tile_rect(self, loc):
if loc in self.terrain:
return pygame.Rect(loc[0] * self.tile_size, loc[1] * self.tile_size, self.tile_size, self.tile_size)
return None
def tile_rect_px(self, pos):
loc = self.px_to_loc(pos)
return self.tile_rect(loc)
def generate(self, biome):
map_generator.generate(self.game, self, biome)
self.gen_pathfinding_map()
def update(self, dt):
for trap in self.traps:
trap.update(dt)
def render(self, surface: pygame.Surface, offset=(0, 0)):
for loc in self.terrain:
for tile in self.terrain[loc]:
tile.render(self.game, surface, self.game.combat.camera.pos)
for trap in self.traps:
trap.render(surface, self.game.combat.camera.render_offset())
|
77841
|
import cv2
import tensorflow as tf
import numpy as np
OUTPUT_PATH = "../events/"
NUM_FILTERS = 10
FILTER_SIZE = (3, 3)
STRIDES = (1, 1)
def nn(input_node):
with tf.variable_scope('nn'):
w = tf.get_variable(
name='weight',
shape=[FILTER_SIZE[0], FILTER_SIZE[1], 3, NUM_FILTERS],
dtype=tf.float32)
b = tf.get_variable(
name='bias',
shape=[NUM_FILTERS],
dtype=tf.float32)
out = tf.nn.conv2d(input_node, filter=w, strides=(1, 1),
padding='SAME')
out = out + b
return out
def layer(input_node):
out = tf.layers.conv2d(input_node, NUM_FILTERS, FILTER_SIZE, strides=STRIDES, padding='same', name='layer')
return out
def slim(input_node):
out = tf.contrib.slim.conv2d(input_node, NUM_FILTERS, FILTER_SIZE, stride=STRIDES, padding='SAME',
activation_fn=None, scope='slim')
return out
def keras(input_node):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(NUM_FILTERS, FILTER_SIZE, strides=STRIDES, padding='same')
], name='keras')
return model(input_node)
if __name__ == '__main__':
node = tf.placeholder(shape=[None, 100, 100, 3], dtype=tf.float32)
nn_out = nn(node)
layer_out = layer(node)
slim_out = slim(node)
keras_out = keras(node)
tf.summary.FileWriter(OUTPUT_PATH, graph=tf.get_default_graph())
image = cv2.imread('ithome.jpg')
image = np.expand_dims(image, 0)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
nn_result, layer_result, slim_result, keras_result = \
sess.run([nn_out, layer_out, slim_out, keras_out], feed_dict={node: image})
print(f'nn shape: {nn_result.shape}')
print(f'layer shape: {layer_result.shape}')
print(f'slim shape: {slim_result.shape}')
print(f'keras shape: {keras_result.shape}')
|
77847
|
import csv
class Characters:
def getBrawlersID():
BrawlersID = []
with open('Logic/Files/assets/csv_logic/characters.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0 or line_count == 1:
line_count += 1
else:
if row[23] == 'Hero' and row[1].lower() != 'true' and row[2].lower() != 'true' and row[0] != "MechaDudeBig":
BrawlersID.append(line_count - 2)
line_count += 1
return BrawlersID
|
77856
|
from io import BytesIO
from PIL import Image, ImageDraw
from flask import send_file
from utils.endpoint import Endpoint, setup
from utils.textutils import wrap, render_text_with_emoji
@setup
class SneakyFox(Endpoint):
params = ['text']
def generate(self, avatars, text, usernames, kwargs):
base = Image.open(self.assets.get('assets/sneakyfox/sneakyfox.bmp'))
font = self.assets.get_font('assets/fonts/arimobold.ttf', size=36)
canv = ImageDraw.Draw(base)
try:
fox, otherthing = text.replace(' ,', ',', 1).split(',', 1)
except ValueError:
fox = 'Text that is not split with a comma'
otherthing = 'the bot'
fox = wrap(font, fox, 500)
otherthing = wrap(font, otherthing, 450)
render_text_with_emoji(base, canv, (300, 350), fox[:180], font=font, fill='Black')
render_text_with_emoji(base, canv, (670, 120), otherthing[:180], font=font, fill='Black')
base = base.convert('RGB')
b = BytesIO()
base.save(b, format='jpeg')
b.seek(0)
return send_file(b, mimetype='image/jpeg')
|
77858
|
from unittest import TestCase
from pypika import (
Table,
functions as fn,
)
import fireant as f
from fireant.tests.dataset.mocks import test_database
test_table = Table("test")
ds = f.DataSet(
table=test_table,
database=test_database,
fields=[
f.Field("date", definition=test_table.date, data_type=f.DataType.date),
f.Field("text", definition=test_table.text, data_type=f.DataType.text),
f.Field("number", definition=test_table.number, data_type=f.DataType.number),
f.Field("boolean", definition=test_table.boolean, data_type=f.DataType.boolean),
f.Field(
"aggr_number",
definition=fn.Sum(test_table.number),
data_type=f.DataType.number,
),
],
)
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
class ResultSetTests(TestCase):
maxDiff = None
def test_no_metric_is_removed_when_result_set_metric_filter_is_present(self):
queries = ds.query.widget(f.Pandas(ds.fields.aggr_number)).filter(f.ResultSet(ds.fields.aggr_number > 10)).sql
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN SUM(\"number\")>10 THEN 'set(SUM(number)>10)' "
"ELSE 'complement(SUM(number)>10)' END \"$set(SUM(number)>10)\","
'SUM("number") "$aggr_number" '
'FROM "test" '
'ORDER BY 1 '
'LIMIT 200000',
str(queries[0]),
)
def test_dimension_is_replaced_by_default_when_result_set_filter_is_present(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_is_replaced_by_default_in_the_target_dimension_place_when_result_set_filter_is_present(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.date)
.dimension(ds.fields.text)
.dimension(ds.fields.boolean)
.filter(f.ResultSet(ds.fields.text == "abc"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
'"date" "$date",'
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'"boolean" "$boolean",'
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date","$text","$boolean" '
'ORDER BY "$date","$text","$boolean" '
'LIMIT 200000',
str(queries[0]),
)
def test_dimension_with_dimension_modifier_is_replaced_by_default_when_result_set_filter_is_present(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.date)
.dimension(f.Rollup(ds.fields.boolean))
.filter(f.ResultSet(ds.fields.boolean == True))
.sql
)
self.assertEqual(len(queries), 2)
with self.subTest('base query is the same as without totals'):
self.assertEqual(
"SELECT "
'"date" "$date",'
"CASE WHEN \"boolean\"=true THEN 'set(boolean=true)' ELSE 'complement(boolean=true)' END \"$boolean\","
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date","$boolean" '
'ORDER BY "$date","$boolean" '
'LIMIT 200000',
str(queries[0]),
)
with self.subTest('totals dimension is replaced with _FIREANT_ROLLUP_VALUE_'):
self.assertEqual(
"SELECT "
'"date" "$date",'
'\'_FIREANT_ROLLUP_VALUE_\' "$boolean",'
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date" '
'ORDER BY "$date","$boolean" '
'LIMIT 200000',
str(queries[1]),
)
def test_dimension_is_inserted_before_conditional_dimension_when_result_set_filter_wont_ignore_dimensions(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc", will_replace_referenced_dimension=False))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$set(text='abc')\","
'"text" "$text",'
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$set(text=\'abc\')","$text" '
'ORDER BY "$set(text=\'abc\')","$text" '
'LIMIT 200000',
str(queries[0]),
)
def test_dimension_breaks_complement_down_when_result_set_filter_wont_group_complement(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc", will_group_complement=False))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE \"text\" END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_is_inserted_in_dimensions_even_when_not_selected(self):
queries = ds.query.widget(f.Pandas(ds.fields.aggr_number)).filter(f.ResultSet(ds.fields.text == "abc")).sql
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_is_inserted_as_last_dimension_when_not_selected(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.date)
.dimension(ds.fields.boolean)
.filter(f.ResultSet(ds.fields.text == "abc"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
'"date" "$date",'
'"boolean" "$boolean",'
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date","$boolean","$text" '
'ORDER BY "$date","$boolean","$text" '
'LIMIT 200000',
str(queries[0]),
)
def test_dimension_uses_set_label_kwarg_and_None_for_complement(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc", set_label="Text is ABC"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'Text is ABC' ELSE NULL END "
"\"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_breaks_complement_down_even_when_set_label_is_set_when_result_set_filter_wont_group_complement(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(
f.ResultSet(
ds.fields.text == "abc",
set_label="IS ABC",
will_group_complement=False,
)
)
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'IS ABC' ELSE \"text\" END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_breaks_complement_down_even_when_both_labels_are_set_but_wont_group_complement(
self,
):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(
f.ResultSet(
ds.fields.text == "abc",
set_label="IS ABC",
complement_label="OTHERS",
will_group_complement=False,
)
)
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'IS ABC' ELSE \"text\" END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_uses_complement_label_kwarg_and_None_for_set(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(f.ResultSet(ds.fields.text == "abc", complement_label="Text is NOT ABC"))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN NULL ELSE 'Text is NOT ABC' END "
"\"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_uses_both_set_and_complement_label_kwargs_when_available(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.text)
.filter(
f.ResultSet(
ds.fields.text == "abc",
set_label="Text is ABC",
complement_label="Text is NOT ABC",
)
)
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
"CASE WHEN \"text\"='abc' THEN 'Text is ABC' ELSE 'Text is NOT ABC' END "
"\"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
"GROUP BY \"$text\" "
"ORDER BY \"$text\" "
"LIMIT 200000",
str(queries[0]),
)
def test_dimension_is_replaced_when_references_are_present(self):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields.date)
.dimension(ds.fields.boolean)
.reference(f.WeekOverWeek(ds.fields.date))
.filter(f.ResultSet(ds.fields.text == "abc"))
.sql
)
self.assertEqual(len(queries), 2)
with self.subTest("base query"):
self.assertEqual(
"SELECT "
'"date" "$date",'
'"boolean" "$boolean",'
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number" '
'FROM "test" '
'GROUP BY "$date","$boolean","$text" '
'ORDER BY "$date","$boolean","$text" '
'LIMIT 200000',
str(queries[0]),
)
with self.subTest("ref query"):
self.assertEqual(
"SELECT "
'TIMESTAMPADD(week,1,"date") "$date",'
'"boolean" "$boolean",'
"CASE WHEN \"text\"='abc' THEN 'set(text=''abc'')' ELSE 'complement(text=''abc'')' END \"$text\","
'SUM("number") "$aggr_number_wow" '
'FROM "test" '
'GROUP BY "$date","$boolean","$text" '
'ORDER BY "$date","$boolean","$text" '
'LIMIT 200000',
str(queries[1]),
)
def test_dimension_filter_variations_with_sets(self):
for field_alias, fltr in [
('text', ds.fields.text.like("%abc%")),
('text', ds.fields.text.not_like("%abc%")),
('text', ds.fields.text.like("%abc%", "%cde%")),
('text', ds.fields.text.not_like("%abc%", "%cde%")),
('text', ds.fields.text.isin(["abc"])),
('text', ds.fields.text.notin(["abc"])),
('date', ds.fields.date.between('date1', 'date2')),
('number', ds.fields.number.between(5, 15)),
('number', ds.fields.number.isin([1, 2, 3])),
('number', ds.fields.number.notin([1, 2, 3])),
]:
fltr_sql = fltr.definition.get_sql(quote_char="")
with self.subTest(fltr_sql):
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields[field_alias])
.filter(f.ResultSet(fltr, set_label='set_A', complement_label='set_B'))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
f"CASE WHEN {fltr} THEN 'set_A' ELSE 'set_B' END \"${field_alias}\","
'SUM("number") "$aggr_number" '
'FROM "test" '
f"GROUP BY \"${field_alias}\" "
f"ORDER BY \"${field_alias}\" "
"LIMIT 200000",
str(queries[0]),
)
def test_deeply_nested_dimension_filter_with_sets(self):
field_alias = 'text'
fltr = ds.fields.text.like(
fn.Concat(
fn.Upper(fn.Trim(fn.Concat('%ab', ds.fields.number))),
ds.fields.aggr_number,
fn.Concat(ds.fields.date.between('date1', 'date2'), 'c%'),
)
)
queries = (
ds.query.widget(f.Pandas(ds.fields.aggr_number))
.dimension(ds.fields[field_alias])
.filter(f.ResultSet(fltr, set_label='set_A', complement_label='set_B'))
.sql
)
self.assertEqual(len(queries), 1)
self.assertEqual(
"SELECT "
f"CASE WHEN {fltr} THEN 'set_A' ELSE 'set_B' END \"${field_alias}\","
'SUM("number") "$aggr_number" '
'FROM "test" '
f"GROUP BY \"${field_alias}\" "
f"ORDER BY \"${field_alias}\" "
"LIMIT 200000",
str(queries[0]),
)
|
77918
|
import torch
import json
from os import PathLike
from typing import List, Tuple, Union, Optional
from allennlp.common.file_utils import cached_path
from allennlp.data import Vocabulary
from allennlp.data.tokenizers.tokenizer import Tokenizer
def _convert_word_to_ids_tensor(word, tokenizer, vocab, namespace, all_cases):
# function does NOT strip special tokens if tokenizer adds them
if all_cases:
words_list = [word.lower(), word.title(), word.upper()]
else:
words_list = [word]
ids = []
for w in words_list:
# if vocab is None, use tokenizer vocab (only works for Huggingface PreTrainedTokenizer)
if vocab:
tokens = tokenizer.tokenize(w)
ids.append(torch.tensor([vocab.get_token_index(t.text, namespace) for t in tokens]))
else:
ids.append(torch.tensor(tokenizer.tokenizer(w)["input_ids"]))
return ids
def load_words(
fname: Union[str, PathLike],
tokenizer: Tokenizer,
vocab: Optional[Vocabulary] = None,
namespace: str = "tokens",
all_cases: bool = True,
) -> List[torch.Tensor]:
"""
This function loads a list of words from a file,
tokenizes each word into subword tokens, and converts the
tokens into IDs.
# Parameters
fname : `Union[str, PathLike]`
Name of file containing list of words to load.
tokenizer : `Tokenizer`
Tokenizer to tokenize words in file.
vocab : `Vocabulary`, optional (default=`None`)
Vocabulary of tokenizer. If `None`, assumes tokenizer is of
type `PreTrainedTokenizer` and uses tokenizer's `vocab` attribute.
namespace : `str`
Namespace of vocab to use when tokenizing.
all_cases : `bool`, optional (default=`True`)
Whether to tokenize lower, title, and upper cases of each word.
# Returns
word_ids : `List[torch.Tensor]`
List of tensors containing the IDs of subword tokens for
each word in the file.
"""
word_ids = []
with open(cached_path(fname)) as f:
words = json.load(f)
for w in words:
word_ids.extend(_convert_word_to_ids_tensor(w, tokenizer, vocab, namespace, all_cases))
return word_ids
def load_word_pairs(
fname: Union[str, PathLike],
tokenizer: Tokenizer,
vocab: Optional[Vocabulary] = None,
namespace: str = "token",
all_cases: bool = True,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
This function loads a list of pairs of words from a file,
tokenizes each word into subword tokens, and converts the
tokens into IDs.
# Parameters
fname : `Union[str, PathLike]`
Name of file containing list of pairs of words to load.
tokenizer : `Tokenizer`
Tokenizer to tokenize words in file.
vocab : `Vocabulary`, optional (default=`None`)
Vocabulary of tokenizer. If `None`, assumes tokenizer is of
type `PreTrainedTokenizer` and uses tokenizer's `vocab` attribute.
namespace : `str`
Namespace of vocab to use when tokenizing.
all_cases : `bool`, optional (default=`True`)
Whether to tokenize lower, title, and upper cases of each word.
# Returns
word_ids : `Tuple[List[torch.Tensor], List[torch.Tensor]]`
Pair of lists of tensors containing the IDs of subword tokens for
words in the file.
"""
word_ids1 = []
word_ids2 = []
with open(cached_path(fname)) as f:
words = json.load(f)
for w1, w2 in words:
word_ids1.extend(
_convert_word_to_ids_tensor(w1, tokenizer, vocab, namespace, all_cases)
)
word_ids2.extend(
_convert_word_to_ids_tensor(w2, tokenizer, vocab, namespace, all_cases)
)
return word_ids1, word_ids2
|
77921
|
from util.fsm import StateMachine
if __name__ == "__main__":
machine = StateMachine('status', ["off", "fleft", "left", "bleft",
"fright", "right", "bright"], "left")
machine.create_trans("left", "fleft", "otherleft")
print machine.process("otherleft")
#>>> states = ["off", "fleft", "left", "bleft", "fright", "right", "bright"]
#>>> ops = ["buddy_icon_disabled", "buddy_icon_enabled"]
#>>> ops2 = ["enabled", "disabled", "off", "fleft", "left", "bleft", "fright", "right", "bright"]
#>>> ["status_" + op for op in ops2]
#['status_enabled', 'status_disabled', 'status_off', 'status_fleft', 'status_left', 'status_bleft', 'status_fright', 'status_right', 'status_bright']
#>>> ops3 = ["status_" + op for op in ops2]
#>>> for state in states:
#... for op in ops + ops3:
#... print "machine.create_transition('%s', '%s', None)" % (state, op)
import sys
sys.exit(0)
"""
below is what I believe to be a complete state machine definition for
a status or service icon, neglecting the off states.
so far, my idea for implementation is incomplete, it is lacking
the interaction between state machines necessary for the whole thing to
work. One can see, however, that the number of needed transitions is
much smaller than the possible ones.
"""
machine = None
to_state = None
'buddy_icon_disabled'
'buddy_icon_left'
'buddy_icon_right'
states = ['fleft', 'left', 'bleft_l', 'bright_l', 'bleft_r', 'bright_r', 'right', 'fright']
#definition of simple transitions (someone else wants my spot)
#I'm in the far left, other bumps me
machine.create_trans('fleft', 'left', 'other_fleft')
#I'm in the left, other bumps me
machine.create_trans('left', 'fleft', 'other_left')
#buddy icon is on the left
#badge on left
machine.create_trans('bleft_l', 'bright_l', 'other_bleft_l')
#badge on right
machine.create_trans('bright_l', 'bleft_l', 'other_bright_l')
#buddy icon is on the right
#badge on left
machine.create_trans('bleft_r', 'bright_r', 'other_bleft_r')
#badge on right
machine.create_trans('bright_r', 'bleft_r', 'other_bright_r')
#I'm in the far right, other bumps me
machine.create_trans('fright', 'right', 'other_fright')
#I'm in the right, other bumps me
machine.create_trans('right', 'fright', 'other_right')
#definition of buddy icon translation
#badge on left
machine.create_trans('bleft_l', 'bleft_r', 'buddy_icon_right')
#badge on right
machine.create_trans('bright_l', 'bright_r', 'buddy_icon_right')
#badge on left
machine.create_trans('bleft_r', 'bleft_l', 'buddy_icon_left')
#badge on right
machine.create_trans('bright_r', 'bright_l', 'buddy_icon_left')
#these are the hard ones
#buddy icon disabled. where to go
#ok, the definition is easy, the trouble is, you have to tell them in the
#correct order. If you do, the state machines do the heavy lifting for you
#else, you get the wrong result
machine.create_trans('bleft_l', 'left', 'buddy_icon_disabled')
machine.create_trans('bright_l', 'left', 'buddy_icon_disabled')
machine.create_trans('bleft_r', 'right', 'buddy_icon_disabled')
machine.create_trans('bright_r', 'right', 'buddy_icon_disabled')
#example
states1 = ['off', 'fleft', 'left', 'bleft_l', 'bright_l',
'bleft_r', 'bright_r', 'right', 'fright']
states2 = ['off', 'left', 'right']
manager = StateManager()
status_machine = StateMachine("status", states1, "off")
service_machine = StateMachine("service", states1, "off")
buddy_icon_machine = StateMachine("buddy_icon", states2, "off")
manager.add_machine(status_machine)
manager.add_machine(service_machine)
manager.add_machine(buddy_icon_machine)
status_machine.create_trans('fleft', 'left', 'service_fleft')
status_machine.create_trans('left', 'fleft', 'service_left')
status_machine.create_trans('bleft_l', 'bright_l', 'service_bleft_l')
status_machine.create_trans('bright_l', 'bleft_l', 'service_bright_l')
status_machine.create_trans('bleft_r', 'bright_r', 'service_bleft_r')
status_machine.create_trans('bright_r', 'bleft_r', 'service_bright_r')
status_machine.create_trans('fright', 'right', 'service_fright')
status_machine.create_trans('right', 'fright', 'service_right')
status_machine.create_trans('bleft_l', 'bleft_r', 'buddy_icon_right')
status_machine.create_trans('bright_l', 'bright_r', 'buddy_icon_right')
status_machine.create_trans('bleft_r', 'bleft_l', 'buddy_icon_left')
status_machine.create_trans('bright_r', 'bright_l', 'buddy_icon_left')
status_machine.create_trans('bleft_l', 'left', 'buddy_icon_off')
status_machine.create_trans('bright_l', 'left', 'buddy_icon_off')
status_machine.create_trans('bleft_r', 'right', 'buddy_icon_off')
status_machine.create_trans('bright_r', 'right', 'buddy_icon_off')
service_machine.create_trans('fleft', 'left', 'status_fleft')
service_machine.create_trans('left', 'fleft', 'status_left')
service_machine.create_trans('bleft_l', 'bright_l', 'status_bleft_l')
service_machine.create_trans('bright_l', 'bleft_l', 'status_bright_l')
service_machine.create_trans('bleft_r', 'bright_r', 'status_bleft_r')
service_machine.create_trans('bright_r', 'bleft_r', 'status_bright_r')
service_machine.create_trans('fright', 'right', 'status_fright')
service_machine.create_trans('right', 'fright', 'status_right')
service_machine.create_trans('bleft_l', 'bleft_r', 'buddy_icon_right')
service_machine.create_trans('bright_l', 'bright_r', 'buddy_icon_right')
service_machine.create_trans('bleft_r', 'bleft_l', 'buddy_icon_left')
service_machine.create_trans('bright_r', 'bright_l', 'buddy_icon_left')
service_machine.create_trans('bleft_l', 'left', 'buddy_icon_off')
service_machine.create_trans('bright_l', 'left', 'buddy_icon_off')
service_machine.create_trans('bleft_r', 'right', 'buddy_icon_off')
service_machine.create_trans('bright_r', 'right', 'buddy_icon_off')
|
77945
|
import torch
import numpy as np
from onnx import numpy_helper
from thop.vision.basic_hooks import zero_ops
from .counter import counter_matmul, counter_zero_ops,\
counter_conv, counter_mul, counter_norm, counter_pow,\
counter_sqrt, counter_div, counter_softmax, counter_avgpool
def onnx_counter_matmul(diction, node):
input1 = node.input[0]
input2 = node.input[1]
input1_dim = diction[input1]
input2_dim = diction[input2]
out_size = np.append(input1_dim[0:-1], input2_dim[-1])
output_name = node.output[0]
macs = counter_matmul(input1_dim, out_size[-2:])
return macs, out_size, output_name
def onnx_counter_add(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
out_size = diction[node.input[1]]
else:
out_size = diction[node.input[0]]
output_name = node.output[0]
macs = counter_zero_ops()
# if '140' in diction:
# print(diction['140'],output_name)
return macs, out_size, output_name
def onnx_counter_conv(diction, node):
# print(node)
# bias,kernelsize,outputsize
dim_bias = 0
input_count = 0
for i in node.input:
input_count += 1
if (input_count == 3):
dim_bias = 1
dim_weight = diction[node.input[1]]
else:
dim_weight = diction[node.input[1]]
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
if(attr.name == 'strides'):
dim_stride = attr.ints
if(attr.name == 'pads'):
dim_pad = attr.ints
if(attr.name == 'dilations'):
dim_dil = attr.ints
if(attr.name == 'group'):
group = attr.i
# print(dim_dil)
dim_input = diction[node.input[0]]
output_size = np.append(
dim_input[0:-np.array(dim_kernel).size-1], dim_weight[0])
hw = np.array(dim_input[-np.array(dim_kernel).size:])
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_dil[i] *
(dim_kernel[i]-1)-1)/dim_stride[i]+1)
output_size = np.append(output_size, hw)
macs = counter_conv(dim_bias, np.prod(dim_kernel),
np.prod(output_size), dim_weight[1], group)
output_name = node.output[0]
# if '140' in diction:
# print("conv",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_constant(diction, node):
# print("constant",node)
macs = counter_zero_ops()
output_name = node.output[0]
output_size = [1]
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_mul(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_mul(np.prod(input_size))
output_size = diction[node.input[0]]
output_name = node.output[0]
return macs, output_size, output_name
def onnx_counter_bn(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_relu(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
#print(macs, output_size, output_name)
# if '140' in diction:
# print("relu",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_reducemean(diction, node):
keep_dim = 0
for attr in node.attribute:
if('axes' in attr.name):
dim_axis = np.array(attr.ints)
elif('keepdims' in attr.name):
keep_dim = attr.i
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
if (keep_dim == 1):
output_size = input_size
else:
output_size = np.delete(input_size, dim_axis)
#output_size = input_size
return macs, output_size, output_name
def onnx_counter_sub(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pow(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_pow(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_sqrt(diction, node):
input_size = diction[node.input[0]]
macs = counter_sqrt(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_div(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_div(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_instance(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_softmax(diction, node):
input_size = diction[node.input[0]]
dim = node.attribute[0].i
nfeatures = input_size[dim]
batch_size = np.prod(input_size) / nfeatures
macs = counter_softmax(nfeatures, batch_size)
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pad(diction, node):
# # TODO add constant name and output real vector
# if
# if (np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size):
# input_size = diction[node.input[1]]
# else:
# input_size = diction[node.input[0]]
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_averagepool(diction, node):
# TODO add support of ceil_mode and floor
macs = counter_avgpool(np.prod(diction[node.input[0]]))
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_flatten(diction, node):
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
axis = node.attribute[0].i
input_size = diction[node.input[0]]
output_size = np.append(input_size[axis-1], np.prod(input_size[axis:]))
# print("flatten",output_size)
return macs, output_size, output_name
def onnx_counter_gemm(diction, node):
# print(node)
# Compute Y = alpha * A' * B' + beta * C
input_size = diction[node.input[0]]
dim_weight = diction[node.input[1]]
# print(input_size,dim_weight)
macs = np.prod(input_size) * dim_weight[1] + dim_weight[0]
output_size = np.append(input_size[0:-1], dim_weight[0])
output_name = node.output[0]
return macs, output_size, output_name
pass
def onnx_counter_maxpool(diction, node):
# TODO add support of ceil_mode and floor
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_globalaveragepool(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_concat(diction, node):
# print(node)
# print(diction[node.input[0]])
axis = node.attribute[0].i
input_size = diction[node.input[0]]
for i in node.input:
dim_concat = diction[i][axis]
output_size = input_size
output_size[axis] = dim_concat
output_name = node.output[0]
macs = counter_zero_ops()
return macs, output_size, output_name
def onnx_counter_clip(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
onnx_operators = {
'MatMul': onnx_counter_matmul,
'Add': onnx_counter_add,
'Conv': onnx_counter_conv,
'Mul': onnx_counter_mul,
'Constant': onnx_counter_constant,
'BatchNormalization': onnx_counter_bn,
'Relu': onnx_counter_relu,
'ReduceMean': onnx_counter_reducemean,
'Sub': onnx_counter_sub,
'Pow': onnx_counter_pow,
'Sqrt': onnx_counter_sqrt,
'Div': onnx_counter_div,
'InstanceNormalization': onnx_counter_instance,
'Softmax': onnx_counter_softmax,
'Pad': onnx_counter_pad,
'AveragePool': onnx_counter_averagepool,
'MaxPool': onnx_counter_maxpool,
'Flatten': onnx_counter_flatten,
'Gemm': onnx_counter_gemm,
'GlobalAveragePool': onnx_counter_globalaveragepool,
'Concat': onnx_counter_concat,
'Clip': onnx_counter_clip,
None: None,
}
|
77960
|
from torch import nn
class FeedForwardNet(nn.Module):
def __init__(self, inp_dim, hidden_dim, outp_dim, n_layers, nonlinearity, dropout=0):
super().__init__()
layers = []
d_in = inp_dim
for i in range(n_layers):
module = nn.Linear(d_in, hidden_dim)
self.reset_parameters(module)
layers.append(module)
if dropout > 0:
layers.append(nn.Dropout(dropout))
if nonlinearity == 'relu':
nonlin = nn.ReLU(inplace=True)
elif nonlinearity == 'tanh':
nonlin = nn.Tanh()
elif nonlinearity == 'elu':
nonlin = nn.ELU(inplace=True)
elif nonlinearity != 'none':
raise NotImplementedError('only relu, tanh, and elu nonlinearities have been implemented')
if nonlinearity != 'none':
layers.append(nonlin)
d_in = hidden_dim
module = nn.Linear(d_in, outp_dim)
self.reset_parameters(module)
layers.append(module)
self.network = nn.Sequential(*layers)
def reset_parameters(self, module):
init_range = 0.07
module.weight.data.uniform_(-init_range, init_range)
module.bias.data.zero_()
def forward(self, x):
return self.network(x)
|
77964
|
from toga_winforms.libs import WinForms
from .base import Widget
class Tree(Widget):
def create(self):
self.native = WinForms.TreeView()
def row_data(self, item):
self.interface.factory.not_implemented('Tree.row_data()')
def on_select(self, selection):
self.interface.factory.not_implemented('Tree.on_select()')
def change_source(self, source):
self.interface.factory.not_implemented('Tree.change_source()')
def insert(self, parent, index, item):
self.interface.factory.not_implemented('Tree.insert()')
def change(self, item):
self.interface.factory.not_implemented('Tree.change()')
def remove(self, parent, index, item):
self.interface.factory.not_implemented('Tree.remove()')
def clear(self):
self.interface.factory.not_implemented('Tree.clear()')
def get_selection(self):
self.interface.factory.not_implemented('Tree.get_selection()')
def set_on_select(self, handler):
self.interface.factory.not_implemented('Tree.set_on_select()')
def set_on_double_click(self, handler):
self.interface.factory.not_implemented('Table.set_on_double_click()')
def scroll_to_node(self, node):
self.interface.factory.not_implemented('Tree.scroll_to_node()')
|
78001
|
import logging
from ...util import none_or
from .collection import Collection
logger = logging.getLogger("mw.database.collections.pages")
class Pages(Collection):
def get(self, page_id=None, namespace_title=None, rev_id=None):
"""
Gets a single page based on a legitimate identifier of the page. Note
that namespace_title expects a tuple of namespace ID and title.
:Parameters:
page_id : int
Page ID
namespace_title : ( int, str )
the page's namespace ID and title
rev_id : int
a revision ID included in the page's history
:Returns:
iterator over result rows
"""
page_id = none_or(page_id, int)
namespace_title = none_or(namespace_title, tuple)
rev_id = none_or(rev_id, int)
query = """
SELECT page.*
FROM page
"""
values = []
if page_id is not None:
query += """
WHERE page_id = %s
"""
values.append(page_id)
if namespace_title is not None:
namespace, title = namespace_title
query += " WHERE page_namespace = %s and page_title = %s "
values.extend([int(namespace), str(title)])
elif rev_id is not None:
query += """
WHERE page_id = (SELECT rev_page FROM revision WHERE rev_id = %s)
"""
values.append(rev_id)
else:
raise TypeError("Must specify a page identifier.")
cursor = self.db.shared_connection.cursor()
cursor.execute(
query,
values
)
for row in cursor:
return row
|
78033
|
from rxbp.flowables.controlledzipflowable import ControlledZipFlowable
from rxbp.indexed.selectors.bases.numericalbase import NumericalBase
from rxbp.subscriber import Subscriber
from rxbp.testing.testcasebase import TestCaseBase
from rxbp.testing.testflowable import TestFlowable
from rxbp.testing.tobserver import TObserver
from rxbp.testing.tscheduler import TScheduler
class TestControlledZipFlowable(TestCaseBase):
"""
"""
def setUp(self):
self.scheduler = TScheduler()
self.sink = TObserver()
def test_selector_dictionary(self):
b1 = NumericalBase(1)
b2 = NumericalBase(2)
b3 = NumericalBase(3)
b4 = NumericalBase(4)
b5 = NumericalBase(5)
s1 = TestFlowable(base=b1, selectors={b3: None, b5: None})
s2 = TestFlowable(base=b2, selectors={b4: None})
flowable = ControlledZipFlowable(
left=s1,
right=s2,
request_left=lambda l, r: True,
request_right=lambda l, r: True,
match_func=lambda l, r: True,
)
subscription = flowable.unsafe_subscribe(Subscriber(
scheduler=self.scheduler,
subscribe_scheduler=self.scheduler,
))
assert b1 in subscription.info.selectors
assert b2 in subscription.info.selectors
assert b3 in subscription.info.selectors
assert b4 in subscription.info.selectors
assert b5 in subscription.info.selectors
|
78063
|
from dataclasses import dataclass
from apischema.json_schema import deserialization_schema
@dataclass
class Bar:
baz: str
@dataclass
class Foo:
bar1: Bar
bar2: Bar
assert deserialization_schema(Foo, all_refs=False) == {
"$schema": "http://json-schema.org/draft/2020-12/schema#",
"$defs": {
"Bar": {
"additionalProperties": False,
"properties": {"baz": {"type": "string"}},
"required": ["baz"],
"type": "object",
}
},
"additionalProperties": False,
"properties": {"bar1": {"$ref": "#/$defs/Bar"}, "bar2": {"$ref": "#/$defs/Bar"}},
"required": ["bar1", "bar2"],
"type": "object",
}
assert deserialization_schema(Foo, all_refs=True) == {
"$schema": "http://json-schema.org/draft/2020-12/schema#",
"$defs": {
"Bar": {
"additionalProperties": False,
"properties": {"baz": {"type": "string"}},
"required": ["baz"],
"type": "object",
},
"Foo": {
"additionalProperties": False,
"properties": {
"bar1": {"$ref": "#/$defs/Bar"},
"bar2": {"$ref": "#/$defs/Bar"},
},
"required": ["bar1", "bar2"],
"type": "object",
},
},
"$ref": "#/$defs/Foo",
}
|
78065
|
import grpc
import time
from bettermq_pb2 import *
from bettermq_pb2_grpc import *
host = '127.0.0.1:8404'
with grpc.insecure_channel(host) as channel:
client = PriorityQueueStub(channel)
i = 1
while True:
req = DequeueRequest(
topic = "root",
count = 1,
)
i+=1
rsps = client.Dequeue(req)
if len(rsps.items) == 0:
print("nothing...")
time.sleep(5)
print(rsps)
|
78070
|
import numpy as np
def prefix_search(mat: np.ndarray, chars: str) -> str:
"""Prefix search decoding.
See dissertation of Graves, p63-66.
Args:
mat: Output of neural network of shape TxC.
chars: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
blank_idx = len(chars)
max_T, max_C = mat.shape
# g_n and g_b: gamma in paper
g_n = []
g_b = []
# p(y|x) and p(y...|x), where y is a prefix (not p as in paper to avoid confusion with probability)
prob = {}
prob_ext = {}
# Init: 1-6
for t in range(max_T):
g_n.append({'': 0})
last = g_b[t - 1][''] if t > 0 else 1
g_b.append({'': last * mat[t, blank_idx]})
# init for empty prefix
prob[''] = g_b[max_T - 1]['']
prob_ext[''] = 1 - prob['']
l_star = y_star = ''
Y = {''}
# Algorithm: 8-31
while prob_ext[y_star] > prob[l_star]:
prob_remaining = prob_ext[y_star]
# for all chars
for k in range(max_C - 1):
y = y_star + chars[k]
g_n[0][y] = mat[0, k] if len(y_star) == 0 else 0
g_b[0][y] = 0
prefix_prob = g_n[0][y]
# for all time steps
for t in range(1, max_T):
new_label_prob = g_b[t - 1][y_star] + (
0 if y_star != '' and y_star[-1] == chars[k] else g_n[t - 1][y_star])
g_n[t][y] = mat[t, k] * (new_label_prob + g_n[t - 1][y])
g_b[t][y] = mat[t, blank_idx] * (g_b[t - 1][y] + g_n[t - 1][y])
prefix_prob += mat[t, k] * new_label_prob
prob[y] = g_n[max_T - 1][y] + g_b[max_T - 1][y]
prob_ext[y] = prefix_prob - prob[y]
prob_remaining -= prob_ext[y]
if prob[y] > prob[l_star]:
l_star = y
if prob_ext[y] > prob[l_star]:
Y.add(y)
if prob_remaining <= prob[l_star]:
break
# 30
Y.remove(y_star)
# 31
best_y = None
best_prob_ext = 0
for y in Y:
if prob_ext[y] > best_prob_ext:
best_prob_ext = prob_ext[y]
best_y = y
y_star = best_y
# terminate if no more prefix exists
if best_y is None:
break
# Termination: 33-34
return l_star
def prefix_search_heuristic_split(mat: np.ndarray, chars: str) -> str:
"""Prefix search decoding with heuristic to speed up the algorithm.
Speed up prefix computation by splitting sequence into subsequences as described by Graves (p66).
Args:
mat: Output of neural network of shape TxC.
chars: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
blank_idx = len(chars)
max_T, _ = mat.shape
# split sequence into 3 subsequences, splitting points should be roughly placed at 1/3 and 2/3
split_targets = [int(max_T * 1 / 3), int(max_T * 2 / 3)]
best = [{'target': s, 'bestDist': max_T, 'bestIdx': s} for s in split_targets]
# find good splitting points (blanks above threshold)
thres = 0.9
for t in range(max_T):
for b in best:
if mat[t, blank_idx] > thres and abs(t - b['target']) < b['bestDist']:
b['bestDist'] = abs(t - b['target'])
b['bestIdx'] = t
break
# splitting points plus begin and end of sequence
ranges = [0] + [b['bestIdx'] for b in best] + [max_T]
# do prefix search for each subsequence and concatenate results
res = ''
for i in range(len(ranges) - 1):
beg = ranges[i]
end = ranges[i + 1]
res += prefix_search(mat[beg: end, :], chars)
return res
|
78096
|
from struct import Struct
def read_records(format, f):
record_struct = Struct(format)
chunks = iter(lambda: f.read(record_struct.size), b'')
return (record_struct.unpack(chunk) for chunk in chunks)
# Example
if __name__ == '__main__':
with open('data.b','rb') as f:
for rec in read_records('<idd', f):
# Process rec
print(rec)
|
78107
|
from typing import Dict, List
from dataclasses import dataclass, field
import tvm
from tvm import relay
import pickle
import random
import numpy as np
import random
from copy import deepcopy
from .tvmpass import PassDependenceGraph, PassNode
# TODO: Add parameters.
# TODO: Add more passes.
_RELAY_FUNCTION_HARD_PASSES_ = [ # Note these are types.
relay.transform.RemoveUnusedFunctions,
relay.transform.Inline,
relay.transform.PartitionGraph,
relay.transform.ToGraphNormalForm,
relay.transform.SimplifyInference,
relay.transform.FoldConstant,
relay.transform.AnnotateSpans,
relay.transform.DefuseOps,
relay.transform.FuseOps,
relay.transform.SimplifyExpr,
# relay.transform.ToBasicBlockNormalForm,
relay.transform.BatchingOps,
relay.transform.AlterOpLayout,
relay.transform.FoldScaleAxis,
relay.transform.CanonicalizeOps,
relay.transform.CanonicalizeCast,
relay.transform.DeadCodeElimination,
relay.transform.EliminateCommonSubexpr,
relay.transform.CombineParallelConv2D,
relay.transform.CombineParallelDense,
relay.transform.CombineParallelBatchMatmul,
relay.transform.FastMath,
relay.transform.DynamicToStatic,
relay.transform.FoldExplicitPadding,
]
_RANDOM_WALK_MAP_ = np.ones((len(_RELAY_FUNCTION_HARD_PASSES_), len(_RELAY_FUNCTION_HARD_PASSES_)))
_RANDOM_WALK_MAP_[_RELAY_FUNCTION_HARD_PASSES_.index(relay.transform.AnnotateSpans)][_RELAY_FUNCTION_HARD_PASSES_.index(relay.transform.FuseOps)] = 0
graph = PassDependenceGraph(tvm.target.Target('llvm'))
_ALL_DIR_PASS_NODES_ = list(graph.tir_pass_nodes.values())
@dataclass
class CompileConfig:
target :tvm.target.Target = None
relay_pass_types :List[relay.transform.FunctionPass] = None # actually, there're some module passes...
tir_pass_nodes :List[PassNode] = None
def mutate(self):
# TODO: Think about better mutation strategies.
# Target
self.target = random.choice(self._target_space())
# Passes
n_pass = random.randint(1, len(_RELAY_FUNCTION_HARD_PASSES_) - 1)
self.relay_pass_types = []
pidx = random.randint(1, len(_RELAY_FUNCTION_HARD_PASSES_) - 1)
for _ in range(n_pass):
self.relay_pass_types.append(_RELAY_FUNCTION_HARD_PASSES_[pidx])
candidates_idx = _RANDOM_WALK_MAP_[pidx].nonzero()[0]
if len(candidates_idx) == 0:
break
pidx = candidates_idx[random.randint(1, len(candidates_idx) - 1)]
self.tir_pass_nodes = graph.random_tir_passes(n_pass)
def hard_relay_passes() -> List[relay.transform.FunctionPass]:
"""passes that do not leverage (great) approximation.
"""
return _RELAY_FUNCTION_HARD_PASSES_
def get_device(self):
if self.target.export()['kind'] == 'cuda':
return tvm.cuda()
if self.target.export()['kind'] == 'rocm':
return tvm.rocm()
return tvm.cpu()
def check(self):
assert self.target != None
assert self.relay_pass_types != None
@staticmethod
def _target_space():
# To get "-mcpu=?", do "cat /proc/cpuinfo". Then search the `model name` on ark.intel.com
# There can more targets... Let's forget it for a while.
# tvm.target.Target('c') is too weak...
_targets = [tvm.target.Target('llvm')]
# TODO: Allow devices.
# if tvm.cuda().exist:
# _targets.append(tvm.target.cuda())
# if cudnn.exists():
# _targets.append(tvm.target.Target('cuda -libs=cudnn'))
# if tvm.rocm().exist:
# _targets.append(tvm.target.rocm())
return _targets
# When using CHI distribution on [0, +inf)
_SAMPLE_CHI_DIST_DF_ = 3
_MAX_SAMPLE_SIZE_ = 64
_MAX_TEST_BATCH_ = _MAX_SAMPLE_SIZE_
_MIN_TEST_HW_ = 128
_MAX_TEST_HW_ = 1024
_HW_NORMAL_DIST_MU_ = (_MIN_TEST_HW_ + _MAX_TEST_HW_ * 3 // 5) // 2
# 3 sigma is hard... we make it 4...
_HW_NORMAL_DIST_SIGMA_ = _HW_NORMAL_DIST_MU_ // 4
@dataclass
class ExecutionConfig:
module :tvm.IRModule
params :Dict
n_inp_node :int
exe_mode :str = None
inputs :List[List[tvm.nd.array]] = field(default_factory=list)
oracle :List[List[tvm.nd.array]] = None # None if not required.
oracle_name :str = "NOT_SET"
def from_keras(self, model, shape=None, layout="NCHW"):
self.module, self.params = relay.frontend.from_keras(model, shape, layout)
@staticmethod
def exe_mode_space(dynamic_shape=False):
if dynamic_shape:
return ['vm', 'debug']
else:
return ['vm', 'graph', 'debug']
def check(self):
assert isinstance(self.module, tvm.IRModule)
assert self.params is not None
assert self.n_inp_node > 0
assert self.exe_mode != None
assert self.inputs
def mutate(self):
# TODO: Think about better mutation strategies.
# Create some inputs...
input_shapes = self.module['main'].checked_type.arg_types[:self.n_inp_node]
dynamic_batch_input_id = []
dynamic_input_ids = []
for i, s in enumerate(input_shapes):
if relay.ty.is_dynamic(s):
dynamic_input_ids.append(i)
if isinstance(s.shape[0], tvm.tir.Any):
dynamic_batch_input_id.append(i)
dy_batch_size_list = [] # if we support dynamic batch.
n_sample = 1 # if len(dynamic_input_ids) == 0
# else: np.random.chisquare
# We use chisquare dist which give more probability on small samples (faster).
# See: https://en.wikipedia.org/wiki/Chi-square_distribution
# Normal dist: \mu and \sigma
# Chi dist: \mu, \sigma, v
if len(dynamic_input_ids) != 0:
n_sample = max(1, int(np.random.chisquare(3)))
n_sample = min(n_sample, _MAX_SAMPLE_SIZE_)
if len(dynamic_batch_input_id) != 0:
start = 0
for _ in range(n_sample):
start += int(np.random.chisquare(_SAMPLE_CHI_DIST_DF_))
if start <= _MAX_TEST_BATCH_:
dy_batch_size_list.append(start)
else:
dynamic_input_ids.append(1)
# From small to big. Crash in small batch is fast path.
dynamic_input_ids.sort()
# We assume there's a batch dim
# TODO: Make it more genral...
def _concretize_non_batch_dim(shape :relay.TensorType):
concrete_shape = []
for idx, x in enumerate(shape.shape):
if isinstance(x, tvm.tir.Any):
if idx == 0:
concrete_shape.append(tvm.tir.Any())
else:
dim = int(np.random.uniform(_HW_NORMAL_DIST_MU_, _HW_NORMAL_DIST_SIGMA_))
dim = min(dim, _MAX_TEST_HW_)
dim = max(dim, _MIN_TEST_HW_)
concrete_shape.append(dim)
else:
concrete_shape.append(int(x))
return relay.TensorType(shape=concrete_shape, dtype=shape.dtype)
# clear inputs
self.inputs = []
for i in range(n_sample):
this_input = []
for shape in input_shapes:
shape_type = _concretize_non_batch_dim(shape)
shape_ = list(shape_type.shape)
dtype_ = shape_type.dtype
if relay.ty.is_dynamic(shape_type):
# Still dynamic means batch dim is dynamic
shape_[0] = dy_batch_size_list[i]
# nd.array empty is dangerous! (causing inf)
shape_ = [int(x) for x in shape_]
data = np.zeros(shape=shape_, dtype=dtype_)
this_input.append(tvm.nd.array(data))
self.inputs.append(this_input)
self.exe_mode = 'graph' # TODO: Test more runtimes.
# random.choice(self.exe_mode_space(len(dynamic_input_ids) != 0))
def __deepcopy__(self, meno):
module = tvm.parser.parse(self.module.astext())
params = {k:tvm.nd.array(v.numpy()) for k,v in self.params.items()}
n_inp_node = self.n_inp_node
exe_mode = deepcopy(self.exe_mode, meno)
inputs = [[tvm.nd.array(i.numpy()) for i in inp]for inp in self.inputs]
oracle = None if self.oracle is None else [[tvm.nd.array(i.numpy()) for i in inp]for inp in self.oracle]
oracle_name = deepcopy(self.oracle_name, meno)
return ExecutionConfig(
module, params, n_inp_node, exe_mode, inputs, oracle, oracle_name
)
@dataclass
class Context:
"""Top-level configuration of fuzzer.
"""
runtime :ExecutionConfig
compile :CompileConfig
def dump(self, path): # Fix this ...
to_store_params = {}
for k, v in self.runtime.params.items():
to_store_params[k] = v.numpy()
with open(path, 'wb') as f:
runtime_conf = {
'module': self.runtime.module.astext(),
'params': to_store_params,
'n_inp_node': self.runtime.n_inp_node,
'exe_mode': self.runtime.exe_mode,
'inputs': [[x.numpy() for x in inp] for inp in self.runtime.inputs],
'oracle': self.runtime.oracle,
'oracle_name': self.runtime.oracle_name
}
compile_conf = {
'target': self.compile.target,
'relay_pass_types': self.compile.relay_pass_types,
'tir_pass_nodes': graph.export_name(self.compile.tir_pass_nodes)
}
pickle.dump({
'runtime': runtime_conf,
'compile': compile_conf
}, f, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, path):
with open(path, 'rb') as f:
data = pickle.load(f)
self.compile.target = data['compile']['target']
self.compile.relay_pass_types = data['compile']['relay_pass_types']
self.compile.tir_pass_nodes = graph.recover(data['compile']['tir_pass_nodes'])
for k, v in data['runtime'].items():
if k == 'module':
self.runtime.module = tvm.parser.fromtext(v)
elif k == 'params':
self.runtime.params = {}
for k_, v_ in v.items():
self.runtime.params[k_] = tvm.nd.array(v_)
elif k == 'inputs':
self.runtime.inputs = [[tvm.nd.array(x) for x in inp] for inp in v],
else:
setattr(self.runtime, k, v)
def mutate(self):
self.runtime.mutate()
self.compile.mutate()
def check(self):
self.runtime.check()
self.compile.check()
|
78113
|
import json
import os
import unittest
import shutil
from satstac import __version__, Catalog, STACError, Item
testpath = os.path.dirname(__file__)
class Test(unittest.TestCase):
path = os.path.join(testpath, 'test-catalog')
@classmethod
def tearDownClass(cls):
""" Remove test files """
if os.path.exists(cls.path):
shutil.rmtree(cls.path)
@classmethod
def get_catalog(cls):
""" Open existing test catalog """
return Catalog.open(os.path.join(testpath, 'catalog/catalog.json'))
@classmethod
def create_catalog(cls, name):
path = os.path.join(cls.path, name)
return Catalog.create(path)
def test_init(self):
with open(os.path.join(testpath, 'catalog/catalog.json')) as f:
data = json.loads(f.read())
cat = Catalog(data)
assert(cat.id == 'stac-catalog')
def test_open(self):
""" Initialize Catalog with a file """
cat = self.get_catalog()
assert(len(cat._data.keys()) == 4)
assert(cat.id == 'stac-catalog')
assert(len(cat.links())==3)
def test_properties(self):
cat = self.get_catalog()
assert(cat.stac_version == '1.0.0-beta.1')
assert(cat.description == 'An example STAC catalog')
def test_create(self):
""" Create new catalog file """
cat = Catalog.create()
assert(cat.id == 'stac-catalog')
def test_create_with_keywords(self):
path = os.path.join(testpath, 'test-catalog', 'create_with_keywords')
desc = 'this is a catalog'
cat = Catalog.create(path, description=desc)
assert(cat.description == desc)
def test_links(self):
root = self.get_catalog()
child = [c for c in root.children()][0]
assert(child.parent().id == root.id)
def test_get_catalogs(self):
catalogs = [i for i in self.get_catalog().catalogs()]
assert(len(catalogs) == 4)
def test_get_collections(self):
collections = [i for i in self.get_catalog().collections()]
assert(len(collections) == 2)
assert(collections[0].id in ['landsat-8-l1', 'sentinel-s2-l1c'])
assert(collections[1].id in ['landsat-8-l1', 'sentinel-s2-l1c'])
def test_get_items(self):
items = [i for i in self.get_catalog().items()]
assert(len(items) == 2)
def test_add_catalog(self):
cat = Catalog.create(root='http://my.cat').save(os.path.join(self.path, 'catalog.json'))
col = Catalog.open(os.path.join(testpath, 'catalog/eo/landsat-8-l1/catalog.json'))
cat.add_catalog(col)
child = [c for c in cat.children()][0]
assert(child.id == col.id)
def test_add_catalog_without_saving(self):
cat = Catalog.create()
with self.assertRaises(STACError):
cat.add_catalog({})
|
78172
|
load("@bazel_skylib//lib:dicts.bzl", _dicts = "dicts")
load(
"//rules/scala_proto:private/core.bzl",
_scala_proto_library_implementation = "scala_proto_library_implementation",
_scala_proto_library_private_attributes = "scala_proto_library_private_attributes",
)
scala_proto_library = rule(
attrs = _dicts.add(
_scala_proto_library_private_attributes,
{
"deps": attr.label_list(
doc = "The proto_library targets you wish to generate Scala from",
providers = [ProtoInfo],
),
"_zipper": attr.label(cfg = "host", default = "@bazel_tools//tools/zip:zipper", executable = True),
},
),
doc = """
Generates Scala code from proto sources. The output is a `.srcjar` that can be passed into other rules for compilation.
See example use in [/tests/proto/BUILD](/tests/proto/BUILD)
""",
toolchains = [
"@rules_scala_annex//rules/scala_proto:compiler_toolchain_type",
],
outputs = {
"srcjar": "%{name}.srcjar",
},
implementation = _scala_proto_library_implementation,
)
def _scala_proto_toolchain_implementation(ctx):
return [platform_common.ToolchainInfo(
compiler = ctx.attr.compiler,
compiler_supports_workers = ctx.attr.compiler_supports_workers,
)]
scala_proto_toolchain = rule(
attrs = {
"compiler": attr.label(
doc = "The compiler to use to generate Scala form proto sources",
allow_files = True,
executable = True,
cfg = "host",
),
"compiler_supports_workers": attr.bool(default = False),
},
doc = """
Specifies a toolchain of the `@rules_scala_annex//rules/scala_proto:compiler_toolchain_type` toolchain type.
This rule should be used with an accompanying `toolchain` that binds it and specifies constraints
(See the official documentation for more info on [Bazel Toolchains](https://docs.bazel.build/versions/master/toolchains.html))
For example:
```python
scala_proto_toolchain(
name = "scalapb_toolchain_example",
compiler = ":worker",
compiler_supports_workers = True,
visibility = ["//visibility:public"],
)
toolchain(
name = "scalapb_toolchain_example_linux",
toolchain = ":scalapb_toolchain_example",
toolchain_type = "@rules_scala_annex//rules/scala_proto:compiler_toolchain_type",
exec_compatible_with = [
"@bazel_tools//platforms:linux",
"@bazel_tools//platforms:x86_64",
],
target_compatible_with = [
"@bazel_tools//platforms:linux",
"@bazel_tools//platforms:x86_64",
],
visibility = ["//visibility:public"],
)
```
""",
implementation = _scala_proto_toolchain_implementation,
)
|
78194
|
import math
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
"""
Turtle drawings
Once the functions reset(), turn(), turnTo() and forw() there is a possibility to
program a path. In essence this is very similar to using polar coordinates relative
to the last set point. Meaning you define the angle and the length over which a line
should be drawn. First an example will be given containing the full source, next will
will only focus on the display function since the same primitives will be used.
http://www.de-brauwer.be/wiki/wikka.php?wakka=PyOpenGLTurtle
"""
curX = 0.0
curY = 0.0
angle = 0.0
def reset():
""" Reset the position to the origin """
global curX
global curY
global angle
curX = 0.0
curY = 0.0
angle = 0.0
def turnTo(deg):
""" Turn to a certain angle """
global angle
angle = deg
def turn(deg):
""" Turn a certain number of degrees """
global angle
angle += deg
def forw(len, visible):
""" Move forward over a certain distance """
global curX
global curY
tmpX = curX
tmpY = curY
curX = curX + len * math.cos(math.radians(angle))
curY = curY + len * math.sin(math.radians(angle))
if visible:
glBegin(GL_LINE_STRIP)
glVertex2f(tmpX, tmpY)
glVertex2f(curX, curY)
glEnd()
def initFun():
glClearColor(1.0, 1.0, 1.0, 0.0)
glColor3f(0.0, 0.0, 0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(-100, 100, -100, 100)
def reshapeFun(w, h):
glViewport(0, 0, w, h)
# if w > h:
# glViewport((w-h)/2,0,h,h)
# else:
# glViewport(0,(h-w)/2,w,w)
def turtle_1():
glClear(GL_COLOR_BUFFER_BIT)
reset()
glColor3f(0.0, 0.0, 1.0)
L = 30
turnTo(0)
for i in range(0, 4):
forw(3 * L, True)
turn(90)
forw(L, True)
turn(90)
forw(L, True)
turn(90)
glFlush()
def turtle_2():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
length = 0
increment = 1
for i in range(0, 100):
forw(length, True)
turn(60)
length += increment
glFlush()
def turtle_3():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
length = 0
increment = 1
for i in range(0, 200):
forw(length, True)
turn(89.5)
length += increment
glFlush()
def turtle_4():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
length = 0
increment = 1
for i in range(0, 200):
forw(length, True)
turn(-144)
length += increment
glFlush()
def turtle_5():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
length = 0
increment = 1
for i in range(0, 200):
forw(length, True)
turn(170)
length += increment
glFlush()
def turtle_6():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
L = 10
length = L
for i in range(0, 10):
for j in range(0, 4):
forw(length, True)
turn(90)
length += L
glFlush()
def turtle_7():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
L = 3
length = L
for i in range(0, 100):
forw(length, True)
turn(90)
length += L
glFlush()
def turtle_8():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
forw(100, True)
turn(120)
forw(100, True)
turn(120)
forw(50, True)
turn(120)
forw(50, True)
turn(-120)
forw(50, True)
turn(-120)
forw(50, True)
turn(120)
forw(50, True)
glFlush()
def turtle_9():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
L = 50
for i in range(0, 3):
forw(L, True)
turn(-60)
forw(L, True)
turn(-120)
forw(L, True)
turn(-60)
forw(L, True)
glFlush()
def turtle_10():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
L = 30
for i in range(0, 3):
forw(L, True)
turn(60)
forw(L, True)
turn(60)
forw(L, True)
turn(60)
forw(L, True)
turn(-60)
glFlush()
if __name__ == '__main__':
glutInit()
glutInitWindowSize(400, 400)
glutCreateWindow(b"Turtle")
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutDisplayFunc(turtle_1)
# glutDisplayFunc(turtle_2)
# glutDisplayFunc(turtle_3)
# glutDisplayFunc(turtle_4)
# glutDisplayFunc(turtle_5)
# glutDisplayFunc(turtle_6)
# glutDisplayFunc(turtle_7)
# glutDisplayFunc(turtle_8)
# glutDisplayFunc(turtle_9)
# glutDisplayFunc(turtle_10)
glutReshapeFunc(reshapeFun)
initFun()
glutMainLoop()
|
78201
|
import os
from unittest import mock
import pytest
from kubernetes.client.exceptions import ApiException
from task_processing.plugins.kubernetes.kube_client import ExceededMaxAttempts
from task_processing.plugins.kubernetes.kube_client import KubeClient
def test_KubeClient_no_kubeconfig():
with mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_config.load_kube_config",
autospec=True
), mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_client",
autospec=True
), pytest.raises(ValueError):
KubeClient()
def test_KubeClient_kubeconfig_init():
with mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_config.load_kube_config",
autospec=True
), mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_client",
autospec=True
) as mock_kube_client:
client = KubeClient(kubeconfig_path="/some/kube/config.conf")
assert client.core == mock_kube_client.CoreV1Api()
def test_KubeClient_kubeconfig_env_var():
with mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_config.load_kube_config",
autospec=True
), mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_client",
autospec=True
) as mock_kube_client, mock.patch.dict(os.environ, {"KUBECONFIG": "/another/kube/config.conf"}):
client = KubeClient()
assert client.core == mock_kube_client.CoreV1Api()
def test_KubeClient_kubeconfig_init_overrides_env_var():
with mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_config.load_kube_config",
autospec=True
) as mock_load_config, mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_client",
autospec=True
) as mock_kube_client, mock.patch.dict(os.environ, {"KUBECONFIG": "/another/kube/config.conf"}):
mock_config_path = "/OVERRIDE.conf"
client = KubeClient(kubeconfig_path=mock_config_path)
assert client.core == mock_kube_client.CoreV1Api()
mock_load_config.assert_called_once_with(config_file=mock_config_path, context=None)
def test_KubeClient_get_pod_too_many_failures():
with mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_config.load_kube_config",
autospec=True
), mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_client",
autospec=True
) as mock_kube_client, mock.patch.dict(
os.environ, {"KUBECONFIG": "/another/kube/config.conf"}
), pytest.raises(ExceededMaxAttempts):
mock_config_path = "/OVERRIDE.conf"
mock_kube_client.CoreV1Api().read_namespaced_pod.side_effect = [ApiException, ApiException]
client = KubeClient(kubeconfig_path=mock_config_path)
client.get_pod(namespace='ns', pod_name='pod-name', attempts=2)
assert mock_kube_client.CoreV1Api().read_namespaced_pod.call_count == 2
def test_KubeClient_get_pod_unknown_exception():
with mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_config.load_kube_config",
autospec=True
), mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_client",
autospec=True
) as mock_kube_client, mock.patch.dict(
os.environ, {"KUBECONFIG": "/another/kube/config.conf"}
), pytest.raises(Exception):
mock_config_path = "/OVERRIDE.conf"
mock_kube_client.CoreV1Api().read_namespaced_pod.side_effect = [Exception]
client = KubeClient(kubeconfig_path=mock_config_path)
client.get_pod(namespace='ns', pod_name='pod-name', attempts=2)
def test_KubeClient_get_pod():
with mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_config.load_kube_config",
autospec=True
), mock.patch(
"task_processing.plugins.kubernetes.kube_client.kube_client",
autospec=True
) as mock_kube_client, mock.patch.dict(
os.environ, {"KUBECONFIG": "/another/kube/config.conf"}
):
mock_config_path = "/OVERRIDE.conf"
mock_kube_client.CoreV1Api().read_namespaced_pod.return_value = mock.Mock()
client = KubeClient(kubeconfig_path=mock_config_path)
client.get_pod(namespace='ns', pod_name='pod-name', attempts=1)
mock_kube_client.CoreV1Api().read_namespaced_pod.assert_called_once_with(
namespace='ns', name='pod-name'
)
|
78223
|
from joerd.util import BoundingBox
import joerd.download as download
import joerd.check as check
import joerd.srs as srs
import joerd.mask as mask
from joerd.mkdir_p import mkdir_p
from shutil import copyfileobj
import os.path
import os
import requests
import logging
import re
import tempfile
import sys
import traceback
import subprocess
import glob
from osgeo import gdal
class GMTEDTile(object):
def __init__(self, parent, x, y):
self.url = parent.url
self.download_options = parent.download_options
self.base_dir = parent.base_dir
self.x = x
self.y = y
def __key(self):
return (self.x, self.y)
def __eq__(a, b):
return isinstance(b, type(a)) and \
a.__key() == b.__key()
def __hash__(self):
return hash(self.__key())
def _res(self):
return '300' if self.y == -90 else '075'
def _file_name(self):
res = self._res()
xname = "%03d%s" % (abs(self.x), "E" if self.x >= 0 else "W")
yname = "%02d%s" % (abs(self.y), "N" if self.y >= 0 else "S")
return "%(y)s%(x)s_20101117_gmted_mea%(res)s.tif" % \
dict(res=res, x=xname, y=yname)
def urls(self):
dir = "%s%03d" % ("E" if self.x >= 0 else "W", abs(self.x))
res = self._res()
dname = "/%(res)sdarcsec/mea/%(dir)s/" % dict(res=res, dir=dir)
return [self.url + dname + self._file_name()]
def verifier(self):
return check.is_gdal
def options(self):
return self.download_options
def output_file(self):
fname = self._file_name()
return os.path.join(self.base_dir, fname)
def unpack(self, store, tmp):
with store.upload_dir() as target:
mkdir_p(os.path.join(target, self.base_dir))
output_file = os.path.join(target, self.output_file())
mask.negative(tmp.name, "GTiff", output_file)
def freeze_dry(self):
return dict(type='gmted', x=self.x, y=self.y)
class GMTED(object):
def __init__(self, options={}):
self.num_download_threads = options.get('num_download_threads')
self.base_dir = options.get('base_dir', 'gmted')
self.url = options['url']
self.xs = options['xs']
self.ys = options['ys']
self.download_options = options
def get_index(self):
# GMTED is a static set of files - there's no need for an index, but we
# do need a directory to store stuff in.
if not os.path.isdir(self.base_dir):
os.makedirs(self.base_dir)
def existing_files(self):
for base, dirs, files in os.walk(self.base_dir):
for f in files:
if f.endswith('tif'):
yield os.path.join(base, f)
def rehydrate(self, data):
assert data.get('type') == 'gmted', \
"Unable to rehydrate %r from GMTED." % data
return GMTEDTile(self, data['x'], data['y'])
def downloads_for(self, tile):
tiles = set()
# if the tile scale is greater than 20x the GMTED scale, then there's no
# point in including GMTED, it'll be far too fine to make a difference.
# GMTED is 7.5 arc seconds at best (30 at the poles).
if tile.max_resolution() > 20 * 7.5 / 3600:
return tiles
# buffer by 0.1 degrees (48px) to grab neighbouring tiles to ensure
# that there's no tile edge artefacts.
tile_bbox = tile.latlon_bbox().buffer(0.1)
for y in self.ys:
for x in self.xs:
bbox = BoundingBox(x, y, x + 30, y + 20)
if tile_bbox.intersects(bbox):
tiles.add(GMTEDTile(self, x, y))
return tiles
def vrts_for(self, tile):
"""
Returns a list of sets of tiles, with each list element intended as a
separate VRT for use in GDAL.
The reason for this is that GDAL doesn't do any compositing _within_
a single VRT, so if there are multiple overlapping source rasters in
the VRT, only one will be chosen. This isn't often the case - most
raster datasets are non-overlapping apart from deliberately duplicated
margins.
"""
return [self.downloads_for(tile)]
def srs(self):
return srs.wgs84()
def filter_type(self, src_res, dst_res):
# seems like GRA_Lanczos has trouble with nodata, which is causing
# "ringing" near the edges of the data.
return gdal.GRA_Bilinear if src_res > dst_res else gdal.GRA_Cubic
def _parse_bbox(self, ns_deg, is_ns, ew_deg, is_ew, res):
bottom = int(ns_deg)
left = int(ew_deg)
if is_ns == 'S':
bottom = -bottom
if is_ew == 'W':
left = -left
b = BoundingBox(left, bottom, left + 30, bottom + 20)
return b
def create(options):
return GMTED(options)
|
78243
|
import logging
from functools import wraps
from celery import states
from . import events
from .application import celery_application
from .events import RUNTIME_METADATA_ATTR
winnow_task_logger = logging.getLogger(__name__)
class WinnowTask(celery_application.Task):
def update_metadata(self, meta=None, task_id=None):
"""Update task runtime metadata."""
if task_id is None and self.request is not None:
task_id = self.request.id
if task_id is None:
raise RuntimeError("task_id is None")
fields = {RUNTIME_METADATA_ATTR: meta.asdict()}
self.send_event(type_=events.TASK_METADATA, **fields)
def winnow_task(*args, base=None, **opts):
"""Decorator to declare winnow Celery tasks.
The winnow_task decorator ensures that the correct celery application and
correct task base class (WinnowTask) are used. The WinnowTask API is
available for @winnow_task() tasks.
"""
if base is not None:
raise ValueError("Winnow tasks cannot override base class.")
def create_winnow_task_decorator(*arguments, **options):
"""Create an actual decorator."""
def decorator(task):
"""Wrap task routine."""
@wraps(task)
def wrapper(self, *task_args, **task_kwargs):
"""Task wrapper that ensures the bootstrapping behavior."""
if options.get("bind", False):
task_args = (self,) + task_args
self.update_state(state=states.STARTED)
# Ensure correct logging setup
logging.getLogger("winnow").setLevel(logging.INFO)
winnow_task_logger.info(
f"Initiating task '{self.name}[{self.request.id}]': args={task_args}, kwargs={task_kwargs}"
)
try:
return task(*task_args, **task_kwargs)
except Exception as exc:
winnow_task_logger.exception(f"Task raised exception: {exc}")
raise
return celery_application.task(base=WinnowTask, *arguments, **options)(wrapper)
return decorator
if len(args) == 1 and callable(args[0]):
return create_winnow_task_decorator(**opts)(args[0])
return create_winnow_task_decorator(*args, **opts)
|
78283
|
import os
from gitaccount.gitaccounthelpers.gitaccounthelpers import (
get_repos_from_url, clone, pull, already_cloned,
get_gists_from_url)
class GitAccount:
"""GitAccount class provides clone_repos and update_repos methods"""
def __init__(self, account_type, userName):
self._userName = userName
if not (account_type == 'user' or account_type == 'org'):
raise ValueError('Invalid accountType argument: {}'.format(
account_type))
self._account_type = account_type
self._repo_url = self._get_repo_url()
self._gist_url = self._get_gist_url()
self._repos_dir = os.path.join(self._userName, 'repos')
self._gists_dir = os.path.join(self._userName, 'gists')
try:
os.mkdir(self._userName)
except FileExistsError:
print('{0} folder exists. Changing working directory to {0}'.
format(self._userName))
os.chdir(self._userName)
def _get_repo_url(self):
urlPlaceholder = self._account_type + 's'
url = 'https://api.github.com/{}/{}/repos'.format(
urlPlaceholder, self._userName)
return url
def _get_gist_url(self):
url = 'https://api.github.com/users/{}/gists'.format(self._userName)
return url
def get_repos(self):
self._repos = get_repos_from_url(self._repo_url)
return self._repos
def get_gists(self):
self._gists = get_gists_from_url(self._gist_url)
return self._gists
def clone_repos(self):
repos = self.get_repos()
try:
os.mkdir('repos')
os.chdir('repos')
except:
print('Failed to create directory {}.'.format(self._userName))
print('Make sure you have the right permissions.')
raise
print('{} repositories to clone'.format(len(repos)))
print('Private repos have been excluded!')
for index, repo in enumerate(repos, start=1):
print('%2d - %s' % (index, repo['full_name']))
print() # Empty line print to improve readability
for index, repo in enumerate(repos, start=1):
print('Cloning {} - {}'.format(index, repo['full_name']))
clone(repo['clone_url'])
print('All repositories have been cloned successfully to {}!'.format(
os.path.abspath('.')))
# Back to default directory after getting result
os.chdir('..')
def clone_gists(self):
gists = self.get_gists()
try:
os.mkdir('gists')
except FileExistsError:
pass
except:
print('Failed to create directory {}.'.format(self._gists_dir))
print('Make sure you have the right permissions.')
raise
os.chdir('gists')
print('{} gists to clone'.format(len(gists)))
# print('Private repos have been excluded!')
for index, gist in enumerate(gists, start=1):
print('%2d - %s' % (index, gist["id"]))
print() # Empty line print to improve readability
for index, gist in enumerate(gists, start=1):
print('Cloning {} - {}'.format(index, gist["id"]))
clone(gist["git_pull_url"])
print('All gists have been cloned successfully to {}!'.format(
os.path.abspath('.')))
# Back to default directory after getting result
os.chdir('..')
def get_already_cloned_repos(self):
self._already_cloned = already_cloned('repos')
return self._already_cloned
def get_already_cloned_gists(self):
self._already_cloned = already_cloned('gists')
return self._already_cloned
def pull_repos(self):
already_cloned = self.get_already_cloned_repos()
try:
os.chdir('repos')
except FileNotFoundError:
print('You have not cloned any repositories from {} yet'.format(
self._userName))
print('Follow app instructions and clone first!')
return
except:
raise
print('{} repos to pull/update\n'.format(len(already_cloned)))
for index, reponame in enumerate(already_cloned, start=1):
print('{} - {}'.format(index, reponame))
pull(reponame)
os.chdir('..')
def pull_gists(self):
already_cloned = self.get_already_cloned_gists()
try:
os.chdir('gists')
except FileNotFoundError:
print('You have not cloned any gists from {} yet'.format(
self._userName))
print('Follow app instructions and clone first!')
return
except:
raise
print('{} gists to pull/update\n'.format(len(already_cloned)))
for index, reponame in enumerate(already_cloned, start=1):
print('{} - {}'.format(index, reponame))
pull(reponame)
os.chdir('..')
|
78307
|
from backend.serializers.user_model_serializer import UserModelDetailSerializer
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserModelDetailSerializer(user, context={'request':request}).data
}
|
78309
|
from lyrebird import application
from .. import checker
class CustomDecoder:
def __call__(self, rules=None, *args, **kw):
def func(origin_func):
func_type = checker.TYPE_DECODER
if not checker.scripts_tmp_storage.get(func_type):
checker.scripts_tmp_storage[func_type] = []
checker.scripts_tmp_storage[func_type].append({
'name': origin_func.__name__,
'func': origin_func,
'rules': rules
})
return origin_func
return func
@staticmethod
def register(func_info):
application.decoder.append(func_info)
@staticmethod
def unregister(func_info):
if func_info in application.decoder:
application.decoder.remove(func_info)
decoder = CustomDecoder()
|
78314
|
from mongoengine import Document, IntField, DoesNotExist, MultipleObjectsReturned, StringField
class SwapTrackerObject(Document):
nonce = IntField(required=True)
src = StringField(required=True, unique=True)
@classmethod
def last_processed(cls, src: str):
"""
Returns last processed contract tx sequence number
:param src: int enum describing src network (i.e: secret20, eth)
"""
try:
doc = cls.objects.get(src=src)
except DoesNotExist:
doc = cls(nonce=-1, src=src).save()
except MultipleObjectsReturned as e: # Corrupted DB
raise e
return doc.nonce
@classmethod
def get_or_create(cls, src: str):
"""
Returns last processed contract tx sequence number
:param src: int enum describing src network (i.e: secret20, eth)
"""
try:
doc = cls.objects.get(src=src)
except DoesNotExist:
doc = cls(nonce=-1, src=src).save()
except MultipleObjectsReturned as e: # Corrupted DB
raise e
return doc
@classmethod
def update_last_processed(cls, src: str, update_val: int):
doc = cls.objects.get(src=src)
doc.nonce = update_val
doc.save()
# class Source(Enum):
# ETH = 1
# SCRT = 2
|
78412
|
from bge import logic
class HeartContainer:
def __init__(self, startHeart, maxHeart):
self.isLow = False
# if heartContainer not exist init then
if not 'heartContainer' in logic.globalDict['Player']:
logic.globalDict['Player']['heartContainer'] = {'heart' : startHeart, 'maxHeart' : maxHeart}
def calculLow(self):
heartContainer = logic.globalDict['Player']['heartContainer']
percent = (heartContainer['heart']/heartContainer['maxHeart']) * 100
if (percent < 40):
self.isLow = True
else:
self.isLow = False
logic.playerHUD.low_healt(self.isLow)
def load(self, startHeart, maxHeart):
heartContainer = logic.globalDict['Player']['heartContainer']
heartContainer['heart'] = startHeart
heartContainer['maxHeart'] = maxHeart
def gainHeart(self, qte):
"""
Gain heart, if the gain exceeds then set to max heart
"""
heartContainer = logic.globalDict['Player']['heartContainer']
next_heart = heartContainer['heart'] + qte
if (next_heart > heartContainer['maxHeart']):
heartContainer['heart'] = heartContainer['maxHeart']
else:
heartContainer['heart'] = next_heart
# Update hud and lowHeart state
self.calculLow()
logic.playerHUD.updateHeart()
def loseHeart(self, qte):
"""
Lost heart, if the lose exceeds then set to zero
"""
heartContainer = logic.globalDict['Player']['heartContainer']
next_heart = heartContainer['heart'] - qte
if (next_heart < 0):
heartContainer['heart'] = 0
else:
# calculLow
self.calculLow()
heartContainer['heart'] = next_heart
# Update hud and lowHeart state
logic.playerHUD.updateHeart()
def notHaveHeart(self):
heartContainer = logic.globalDict['Player']['heartContainer']
if ( heartContainer['heart'] == 0 ) :
return True
else:
return False
class RupeeContainer:
def __init__(self, startRupee, maxRupee):
# if heartContainer not exist init then
if not 'rupeeContainer' in logic.globalDict['Player']:
logic.globalDict['Player']['rupeeContainer'] = {'rupee' : startRupee, 'maxRupee' : maxRupee}
def gainRupee(self, qte):
"""
Gain heart, if the gain exceeds then set to max heart
"""
rupeeContainer = logic.globalDict['Player']['rupeeContainer']
next_rupee = rupeeContainer['rupee'] + qte
if (next_rupee > rupeeContainer['maxRupee']):
rupeeContainer['rupee'] = rupeeContainer['maxRupee']
else:
rupeeContainer['rupee'] = next_rupee
# Update hud
logic.playerHUD.updateRupee()
|
78457
|
class Token:
TOP_LEFT = "┌"
TOP_RIGHT = "┐"
BOTTOM_LEFT = "└"
BOTTOM_RIGHT = "┘"
HORIZONTAL = "─"
BOX_START = TOP_LEFT + HORIZONTAL
VERTICAL = "│"
INPUT_PORT = "┼"
OUTPUT_PORT = "┼"
FUNCTION = "ƒ"
COMMENT = "/*...*/"
SINGLE_QUOTE = "'"
DOUBLE_QUOTE = '"'
LEFT_PAREN = "("
FUNCTION_START = FUNCTION + LEFT_PAREN
RIGHT_PAREN = ")"
KEYWORD_BRANCH = "[Branch]"
KEYWORD_FOR_LOOP = "[For Loop]"
KEYWORD_FOR_EACH = "[For Each]"
KEYWORD_WHILE_LOOP = "[While Loop]"
KEYWORD_RETURN = "[Return]"
KEYWORD_BREAK = "[Break]"
KEYWORD_CONTINUE = "[Continue]"
KEYWORD_SET = "[Set]"
DATA_FLOW_PORT = "○"
CONTROL_FLOW_PORT = "►"
|
78509
|
class IndexingError(Exception):
"""Exception raised for errors in the indexing flow.
Attributes:
type -- One of 'user', 'user_replica_set', 'user_library', 'tracks', 'social_features', 'playlists'
blocknumber -- block number of error
blockhash -- block hash of error
txhash -- transaction hash of error
message -- error message
"""
def __init__(self, type, blocknumber, blockhash, txhash, message):
super().__init__(message)
self.type = type
self.blocknumber = blocknumber
self.blockhash = blockhash
self.txhash = txhash
self.message = message
|
78516
|
import itertools
import logging
import numpy as np
import pandas as pd
import scipy.stats
def create_regression_dataset(metafeatures, experiments):
X = []
X_indices = []
Y = []
for dataset_name in experiments:
experiment = experiments[dataset_name]
mf = metafeatures.loc[dataset_name]
for i, run in enumerate(experiment):
x1 = pd.Series(
data=[run.params[param] for param in run.params],
index=run.params.keys())
x2 = mf
X.append(x1.append(x2))
X_indices.append('%s_%d' % (dataset_name, i))
Y.append(run.result)
X = pd.DataFrame(X, index=X_indices)
Y = pd.DataFrame(Y, index=X_indices)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
return X, Y
def create_predict_spearman_rank(metafeatures, experiments, iterator):
X = []
Y = []
Y_names = []
# Calculate the pairwise ranks between datasets
dataset_names = [name for name in metafeatures.index]
cross_product = []
if iterator == 'combination':
for cross in itertools.combinations_with_replacement(
dataset_names, r=2):
cross_product.append(cross)
elif iterator == 'permutation':
for cross in itertools.permutations(dataset_names, r=2):
cross_product.append(cross)
else:
raise NotImplementedError(iterator)
logging.info('Create spearman rank dataset without CV data and %s',
iterator)
logging.info('Using %d datasets', len(dataset_names))
logging.info('This will results in %d training points', len(cross_product))
# Create inputs and targets
for cross in cross_product:
name = '%s_%s' % (cross[0], cross[1])
mf_1 = metafeatures.loc[cross[0]]
mf_2 = metafeatures.loc[cross[1]]
assert mf_1.dtype == np.float64
assert mf_2.dtype == np.float64
x = np.hstack((mf_1, mf_2))
columns = metafeatures.columns.values
index = np.hstack(('0_' + columns, '1_' + columns))
x = pd.Series(data=x, name=name, index=index)
X.append(x)
experiments_1 = experiments[cross[0]]
experiments_2 = experiments[cross[1]]
assert len(experiments_1) == len(experiments_2), name
responses_1 = np.zeros((len(experiments_1)), dtype=np.float64)
responses_2 = np.zeros((len(experiments_1)), dtype=np.float64)
for idx, zipped in enumerate(
zip(
sorted(experiments_1, key=lambda t: str(t.configuration)),
sorted(experiments_2,
key=lambda t: str(t.configuration)))):
# Test if the order of the params is the same
exp_1, exp_2 = zipped
print(exp_1.configuration, exp_2.configuration)
assert exp_1.configuration == exp_2.configuration,\
(experiments_1, experiments_2)
responses_1[idx] = exp_1.result if np.isfinite(exp_1.result) else 1
responses_2[idx] = exp_2.result if np.isfinite(exp_2.result) else 1
rho, p = scipy.stats.spearmanr(responses_1, responses_2)
# rho, p = scipy.stats.kendalltau(responses_1, responses_2)
if not np.isfinite(rho):
rho = 0
Y.append(rho)
Y_names.append(name)
X = pd.DataFrame(X)
Y = pd.Series(Y, index=Y_names)
logging.info('Metafeatures %s', metafeatures.shape)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
assert X.shape == (len(cross_product), metafeatures.shape[1] * 2), \
(X.shape, (len(cross), metafeatures.shape[1] * 2))
assert Y.shape == (len(cross_product), )
# train sklearn regressor (tree) with 10fold CV
indices = range(len(X))
np_rs = np.random.RandomState(42)
np_rs.shuffle(indices)
X = X.iloc[indices]
Y = Y.iloc[indices]
return X, Y
def create_predict_spearman_rank_with_cv(cv_metafeatures, cv_experiments,
iterator):
X = []
Y = []
Y_names = []
# Calculate the pairwise ranks between datasets
dataset_names = [name for name in cv_metafeatures]
cross_product = []
folds_product = []
if iterator == 'combination':
for cross in itertools.combinations_with_replacement(
dataset_names, r=2):
cross_product.append(cross)
for folds in itertools.combinations_with_replacement(range(10), r=2):
folds_product.append(folds)
elif iterator == 'permutation':
for cross in itertools.permutations(dataset_names, r=2):
cross_product.append(cross)
for folds in itertools.permutations(range(10), r=2):
folds_product.append(folds)
else:
raise NotImplementedError()
logging.info('Create spearman rank dataset with CV data %s', iterator)
logging.info('Using %d datasets', len(dataset_names))
logging.info('This will results in %d training points',
len(cross_product) * len(folds_product))
logging.info('Length of dataset crossproduct %s', len(cross_product))
logging.info('Length of folds crossproduct %s', len(folds_product))
# Create inputs and targets
for i, cross in enumerate(cross_product):
print('%d/%d: %s' % (i, len(cross_product), cross), )
for folds in folds_product:
name = '%s-%d_%s-%d' % (cross[0], folds[0], cross[1], folds[1])
mf_1 = cv_metafeatures[cross[0]][folds[0]]
mf_2 = cv_metafeatures[cross[1]][folds[1]]
assert mf_1.dtype == np.float64
assert mf_2.dtype == np.float64
x = np.hstack((mf_1, mf_2))
columns = cv_metafeatures[cross[0]][folds[0]].index.values
index = np.hstack(('0_' + columns, '1_' + columns))
x = pd.Series(data=x, name=name, index=index)
X.append(x)
experiments_1 = cv_experiments[cross[0]][folds[0]]
experiments_2 = cv_experiments[cross[1]][folds[1]]
assert len(experiments_1) == len(experiments_2)
responses_1 = np.zeros((len(experiments_1)), dtype=np.float64)
responses_2 = np.zeros((len(experiments_1)), dtype=np.float64)
for idx, zipped in enumerate(zip(experiments_1, experiments_2)):
# Test if the order of the params is the same
exp_1, exp_2 = zipped
assert exp_1.params == exp_2.params
responses_1[idx] = exp_1.result
responses_2[idx] = exp_2.result
rho, p = scipy.stats.spearmanr(responses_1, responses_2)
# A nan is produced if all values of one of the response lists
# are equal. This results in a division by zero. Because there is
# no correlation if all values are the same, rho is replaced by
# zero...
# It would probably be better to assign random ranks for equal
# values, but scipy doesn't support this...
if not np.isfinite(rho):
rho = 0
Y.append(rho)
Y_names.append(name)
X = pd.DataFrame(X)
Y = pd.Series(Y, index=Y_names)
logging.info('CV_Metafeatures %s', cv_metafeatures.shape)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
# train sklearn regressor (tree) with 10fold CV
indices = range(len(X))
np_rs = np.random.RandomState(42)
np_rs.shuffle(indices)
X = X.iloc[indices]
Y = Y.iloc[indices]
return X, Y
"""
def create_smac_warmstart_files(context, dataset, output_dir, num_warmstarts):
runs_and_results = StringIO.StringIO()
runs_and_results.write("Run Number,Run History Configuration ID,Instance ID,"
"Response Value (y),Censored?,Cutoff Time Used,Seed,"
"Runtime,Run Length,Run Result Code,Run Quality,SMAC"
" Iteration,SMAC Cumulative Runtime,Run Result,"
"Additional Algorithm Run Data,Wall Clock Time,\n")
paramstrings = StringIO.StringIO()
best_hyperparameters, distances = metalearner.metalearn_base(context)
hp_list, name_list, dist_list = metalearner.assemble_best_hyperparameters_list(
best_hyperparameters, distances)
for i in range(len(hp_list)):
print hp_list[i], name_list[i], dist_list[i]
def create_smac_files_file(cv_metafeatures, cv_experiments, dataset,
output_dir):
runs_and_results = StringIO.StringIO()
runs_and_results.write("Run Number,Run History Configuration ID,Instance ID,"
"Response Value (y),Censored?,Cutoff Time Used,Seed,"
"Runtime,Run Length,Run Result Code,Run Quality,SMAC"
" Iteration,SMAC Cumulative Runtime,Run Result,"
"Additional Algorithm Run Data,Wall Clock Time,\n")
paramstrings = StringIO.StringIO()
train_instances_file = StringIO.StringIO()
feature_file = StringIO.StringIO()
scenario_file = StringIO.StringIO()
run_number = 1
instance_number = 1
# TODO: is it possible to get_value the openml dataset id?
for dataset_number, name in enumerate(cv_experiments):
for fold in cv_experiments[name]:
configuration_id = 1
iteration = int(run_number/2)
# if name == dataset, we don't want to put the rundata in there
# because we want to optimize for name
if name != dataset:
for exp in cv_experiments[name][fold]:
str = "%s,%s,%s,%f,0,108000,-1,%f,1,1,%f,%d,%f,SAT,Aditional data,%f" \
% (run_number, configuration_id, instance_number, exp.result, 1.0,
exp.result, iteration, float(run_number), 1.0)
runs_and_results.write(str + "\n")
run_number += 1
configuration_id += 1
train_instances_file.write("%d-%d\n" % (dataset_number, fold))
instance_number += 1
if instance_number > 100:
break
configuration_id = 1
for exp in cv_experiments[name][0]:
paramstring = ", ".join(["%s='%s'" % (re.sub("^-", "",param),
exp.params[param]) for param in exp.params])
paramstrings.write("%d: %s\n" % (configuration_id, paramstring))
with open(os.path.join(output_dir, "runs_and_results-it%d.csv" %
iteration), "w") as fh:
runs_and_results.seek(0)
for line in runs_and_results:
fh.write(line)
with open(os.path.join(output_dir, "paramstrings-it%d.txt" % iteration),
"w") as fh:
paramstrings.seek(0)
for line in paramstrings:
fh.write(line)
with open(os.path.join(output_dir, "instances-train.txt"),
"w") as fh:
train_instances_file.seek(0)
for line in train_instances_file:
fh.write(line)
"""
if __name__ == '__main__':
pass
"""
# TODO: right now, this is only done for one split, namely the split of
# the directory we're inside...
# TODO: this only works in a directory, in which a metaexperiment was
# already run...
parser = ArgumentParser()
parser.add_argument("target_directory", type=str)
args = parser.parse_args()
target_directory = args.target_directory
if not os.path.exists(target_directory):
raise ValueError("Target directory %s does not exist." % target_directory)
# Important, change into some directory in which an experiment was already
# performed...
context = metalearner.setup(None)
metafeatures = context["metafeatures"]
#cv_metafeatures = context["cv_metafeatures"]
meta_base = context["meta_base"]
#cv_meta_base = context["cv_meta_base"]
savefile_prefix = "testfold_%d-%d" % (context["test_fold"],
context["test_folds"])
# Use only the pfahringer subset of the available metafeatures
#columns = list()
#columns.extend(mf.subsets["pfahringer_2000_experiment1"])
#print columns
#metafeatures = metafeatures.loc[:,columns]
#for key in cv_metafeatures:
# cv_metafeatures[key] = cv_metafeatures[key].loc[columns,:]
# savefile_prefix += "_pfahringer"
# Remove class_probability_max from the set of metafeatures
# columns = list()
# metafeature_list = mf.subsets["all"]
# metafeature_list.remove("class_probability_max")
# metafeature_list.remove("class_probability_min")
# metafeature_list.remove("class_probability_mean")
# metafeature_list.remove("class_probability_std")
# columns.extend(metafeature_list)
# metafeatures = metafeatures.loc[:,columns]
# for key in cv_metafeatures:
# cv_metafeatures[key] = cv_metafeatures[key].loc[columns,:]
# savefile_prefix += "_woclassprobs"
# Experiment is an OrderedDict, which has dataset names as keys
# The values are lists of experiments(OrderedDict of params, response)
experiments = meta_base.experiments
#cv_experiments = cv_meta_base.experiments
"""
"""
# Build the warmstart directory for SMAC, can be called with
# ./smac --scenario-file <file> --seed 0 --warmstart <foldername>
# needs paramstrings.txt and runs_and_results.txt
# plain
smac_bootstrap_output = "smac_bootstrap_plain"
for dataset in cv_metafeatures:
bootstraps = (2, 5, 10)
distance = ("l1", "l2", "learned_distance")
metafeature_subset = mf.subsets
for num_bootstrap, dist, subset in itertools.product(
bootstraps, distance, metafeature_subset, repeat=1):
context["distance_measure"] = dist
# TODO: somehow only get_value a metafeature subset
dataset_output_dir = os.path.join(target_directory,
smac_bootstrap_output, dataset +
"_bootstrapped%d_%s_%s" % (num_bootstrap, dist, subset))
if not os.path.exists(dataset_output_dir):
os.mkdirs(dataset_output_dir)
create_smac_warmstart_files(context, dataset, dataset_output_dir,
num_warmstarts=num_bootstrap)
break
# with the adjustment of Yogotama and Mann
"""
# X, Y = create_regression_dataset(metafeatures, experiments)
# with open("regression_dataset.pkl", "w") as fh:
# cPickle.dump((X, Y, metafeatures), fh, -1)
"""
# Calculate the metafeatures without the 10fold CV
X, Y = create_predict_spearman_rank(metafeatures, experiments,
iterator="permutation")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_perm.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
X, Y = create_predict_spearman_rank(metafeatures, experiments,
iterator="combination")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_comb.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
print
# Calculate the metafeatures for the 10fold CV...
X, Y = create_predict_spearman_rank_with_cv(cv_metafeatures,
cv_experiments,
iterator="combination")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_cv_comb.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
X, Y = create_predict_spearman_rank_with_cv(cv_metafeatures,
cv_experiments,
iterator="permutation")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_cv_perm.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
"""
|
78604
|
from dataclasses import dataclass
from typing import Tuple
from manim import config
from manim import DEFAULT_MOBJECT_TO_EDGE_BUFFER
from manim import DEFAULT_MOBJECT_TO_MOBJECT_BUFFER
from manim import DOWN
from manim import LEFT
from manim import Mobject
from manim import np
from manim import ORIGIN
from manim import Polygon
from manim import RIGHT
from manim import UP
from wrapt import ObjectProxy
WIDTH_THIRD = (config["frame_x_radius"] * 2) / 3
@dataclass
class Bounds:
ul: Tuple[float, float, float] = None
ur: Tuple[float, float, float] = None
dr: Tuple[float, float, float] = None
dl: Tuple[float, float, float] = None
@property
def width(self):
return abs(self.ul[0] - self.ur[0])
@property
def height(self):
return abs(self.ur[1] - self.dr[1])
def as_mobject(self) -> Polygon:
return Polygon(self.ul, self.ur, self.dr, self.dl)
class AutoScaled(ObjectProxy):
"""
Autoscales whatever it wraps on changes in placement including:
* `next_to`
* `to_edge`
* `set_x`
* `set_y`
* `move_to`
"""
def __init__(self, delegate: Mobject, rescale: bool = True):
"""
Args:
delegate: The object to scale
rescale: Whether to rescale the object immediately or not
"""
super().__init__(delegate)
self._overall_scale_factor: float = 1
self._bounds = Bounds()
self.reset_bounds()
if rescale:
self.autoscale(ORIGIN)
def scale(self, scale_factor, **kwargs):
self._overall_scale_factor *= scale_factor
self.__wrapped__.scale(scale_factor, **kwargs)
return self
def copy(self):
result = self.__wrapped__.copy()
wrapper = AutoScaled(result, False)
wrapper._bounds = self._bounds
wrapper._overall_scale_factor = self._overall_scale_factor
return wrapper
def next_to(self, mobject_or_point, direction=RIGHT, **kwargs):
self.__wrapped__.next_to(mobject_or_point, direction, **kwargs)
self._update_bounds_to_direction(direction * -1)
self.autoscale(direction * -1)
return self
def move_to(self, point_or_mobject, aligned_edge=ORIGIN, coor_mask=np.array([1, 1, 1])):
self.__wrapped__.move_to(point_or_mobject, aligned_edge, coor_mask)
self._update_bounds_to_direction(aligned_edge)
self.autoscale(aligned_edge)
return self
def set_x(self, x, direction=ORIGIN):
self.__wrapped__.set_x(x, direction)
self._update_bounds_to_direction(direction)
self.autoscale(direction)
return self
def fill_between_x(self, x_left: float, x_right: float):
"""
Autoscales between two X values
"""
self._bounds.ur = np.array((x_right, self._bounds.ur[1], self._bounds.ur[2]))
self._bounds.dr = np.array((x_right, self._bounds.dr[1], self._bounds.dr[2]))
self.set_x(x_left, LEFT)
self._update_bounds_to_direction(LEFT)
self.autoscale(LEFT)
return self
def set_y(self, y, direction=ORIGIN):
self.__wrapped__.set_y(y)
self._update_bounds_to_direction(direction)
self.autoscale(direction)
return self
def full_size(self):
"""
Resets the scaling to full screen
"""
self.reset_bounds()
self.__wrapped__.center()
self.autoscale(ORIGIN)
return self
def reset_bounds(self):
x_rad = config["frame_x_radius"]
y_rad = config["frame_y_radius"]
buff = DEFAULT_MOBJECT_TO_MOBJECT_BUFFER
self._bounds.ul = np.array((x_rad * -1 + buff, y_rad - buff, 0))
self._bounds.ur = np.array((x_rad - buff, y_rad - buff, 0))
self._bounds.dr = np.array((x_rad - buff, y_rad * -1 + buff, 0))
self._bounds.dl = np.array((x_rad * -1 + buff, y_rad * -1 + buff, 0))
return self
def to_edge(self, edge=LEFT, buff=DEFAULT_MOBJECT_TO_EDGE_BUFFER):
self.__wrapped__.to_edge(edge, buff)
self._update_bounds_to_direction(edge)
self.autoscale(edge)
return self
def autoscale(self, direction: np.array):
"""
Manually autoscales in a given direction
Args:
direction: The direction to scale in
"""
if not self.__wrapped__.get_width() or not self.__wrapped__.get_height():
return
x_scale = self._bounds.width / self.__wrapped__.get_width()
y_scale = self._bounds.height / self.__wrapped__.get_height()
self.scale(min(x_scale, y_scale), about_point=self._bounds.as_mobject().get_critical_point(direction))
def _update_bounds_to_direction(self, direction: np.array):
if direction[0] == -1:
new_x = self.__wrapped__.get_x(LEFT)
self._bounds.ul = np.array((new_x, self._bounds.ul[1], self._bounds.ul[2]))
self._bounds.dl = np.array((new_x, self._bounds.dl[1], self._bounds.dl[2]))
elif direction[0] == 1:
new_x = self.__wrapped__.get_x(RIGHT)
self._bounds.ur = np.array((new_x, self._bounds.ur[1], self._bounds.ur[2]))
self._bounds.dr = np.array((new_x, self._bounds.dr[1], self._bounds.dr[2]))
if direction[1] == -1:
new_y = self.__wrapped__.get_y(DOWN)
self._bounds.dr = np.array((self._bounds.dr[0], new_y, self._bounds.dr[2]))
self._bounds.dl = np.array((self._bounds.dl[0], new_y, self._bounds.dl[2]))
elif direction[1] == 1:
new_y = self.__wrapped__.get_y(UP)
self._bounds.ur = np.array((self._bounds.ur[0], new_y, self._bounds.ur[2]))
self._bounds.ul = np.array((self._bounds.ul[0], new_y, self._bounds.ul[2]))
|
78612
|
import unittest
import numpy as np
from audiomentations.augmentations.transforms import Mp3Compression
from audiomentations.core.composition import Compose
class TestMp3Compression(unittest.TestCase):
def test_apply_mp3_compression_pydub(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_lameenc(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_pydub(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_lameenc(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_invalid_argument_combination(self):
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=400, max_bitrate=800)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=2, max_bitrate=4)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=64, max_bitrate=8)
|
78614
|
from . import mysqldb
from physical.models import Instance
class MySQLPercona(mysqldb.MySQL):
def get_default_instance_type(self):
return Instance.MYSQL_PERCONA
@classmethod
def topology_name(cls):
return ['mysql_percona_single']
class MySQLPerconaFOXHA(mysqldb.MySQLFOXHA):
def get_default_instance_type(self):
return Instance.MYSQL_PERCONA
@classmethod
def topology_name(cls):
return ['mysql_percona_foxha']
|
78641
|
import argparse
import configparser
import torch
import os
import numpy as np
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from model import ValueNetwork
from env import ENV
from train import run_one_episode
def visualize(model_config, env_config, weight_path, case, save):
state_dim = model_config.getint('model', 'state_dim')
gamma = model_config.getfloat('model', 'gamma')
bxmin = env_config.getfloat('sim', 'xmin')
bxmax = env_config.getfloat('sim', 'xmax')
bymin = env_config.getfloat('sim', 'ymin')
bymax = env_config.getfloat('sim', 'ymax')
xmin = env_config.getfloat('visualization', 'xmin')
xmax = env_config.getfloat('visualization', 'xmax')
ymin = env_config.getfloat('visualization', 'ymin')
ymax = env_config.getfloat('visualization', 'ymax')
crossing_radius = env_config.getfloat('sim', 'crossing_radius')
kinematic = env_config.getboolean('agent', 'kinematic')
radius = env_config.getfloat('agent', 'radius')
device = torch.device('cpu')
test_env = ENV(config=env_config, phase='test')
test_env.reset(case)
model = ValueNetwork(state_dim=state_dim, fc_layers=[100, 100, 100], kinematic=kinematic)
model.load_state_dict(torch.load(weight_path, map_location=lambda storage, loc: storage))
_, state_sequences, _, _ = run_one_episode(model, 'test', test_env, gamma, None, kinematic, device)
positions = list()
colors = list()
counter = list()
line_positions = list()
for i in range(len(state_sequences[0])):
counter.append(i)
if state_sequences[0][i] is None:
p0 = positions[-4][0]
c0 = 'tab:red'
h0 = 0
else:
p0 = (state_sequences[0][i].px, state_sequences[0][i].py)
c0 = 'tab:blue'
h0 = state_sequences[0][i].theta
xdata0 = [p0[0], p0[0]+radius*np.cos(h0)]
ydata0 = [p0[1], p0[1]+radius*np.sin(h0)]
if state_sequences[1][i] is None:
p1 = positions[-4][1]
c1 = 'tab:red'
h1 = 0
else:
p1 = (state_sequences[1][i].px, state_sequences[1][i].py)
c1 = 'tab:gray'
h1 = state_sequences[1][i].theta
xdata1 = [p1[0], p1[0]+radius*np.cos(h1)]
ydata1 = [p1[1], p1[1]+radius*np.sin(h1)]
if i == len(state_sequences[0])-1:
c0 = c1 = 'tab:red'
positions.append([p0, p1])
colors.append([c0, c1])
line_positions.append([[xdata0, ydata0], [xdata1, ydata1]])
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.add_artist(plt.Circle((0, 0), crossing_radius, fill=False, edgecolor='g', lw=1))
ax.add_artist(plt.Rectangle((bxmin, bymin), bxmax-bxmin, bymax-bymin, fill=False, linestyle='dashed', lw=1))
agent0 = plt.Circle(positions[0][0], radius, fill=True, color='b')
agent1 = plt.Circle(positions[0][1], radius, fill=True, color='c')
line0 = plt.Line2D(line_positions[0][0][0], line_positions[0][0][1], color='tab:red')
line1 = plt.Line2D(line_positions[0][1][0], line_positions[0][1][1], color='tab:red')
text = plt.text(0, 8, 'Step: {}'.format(counter[0]), fontsize=12)
ax.add_artist(agent0)
ax.add_artist(agent1)
ax.add_artist(line0)
ax.add_artist(line1)
ax.add_artist(text)
def update(frame_num):
agent0.center = positions[frame_num][0]
agent1.center = positions[frame_num][1]
agent0.set_color(colors[frame_num][0])
agent1.set_color(colors[frame_num][1])
line0.set_xdata(line_positions[frame_num][0][0])
line0.set_ydata(line_positions[frame_num][0][1])
line1.set_xdata(line_positions[frame_num][1][0])
line1.set_ydata(line_positions[frame_num][1][1])
text.set_text('Step: {}'.format(counter[frame_num]))
anim = animation.FuncAnimation(fig, update, frames=len(positions), interval=800)
if save:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=1800)
output_file = 'data/output.mp4'
anim.save(output_file, writer=writer)
plt.show()
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--output_dir', type=str)
parser.add_argument('--init', default=False, action='store_true')
parser.add_argument('--case', default=0, type=int)
parser.add_argument('--save', default=False, action='store_true')
args = parser.parse_args()
args.output_dir = "data/model/";
config_file = os.path.join(args.output_dir, 'model.config')
if args.init:
weight_file = os.path.join(args.output_dir, 'initialized_model.pth')
else:
weight_file = os.path.join(args.output_dir, 'trained_model.pth')
model_config = configparser.RawConfigParser()
model_config.read(config_file)
env_config = configparser.RawConfigParser()
env_config.read('configs/env.config')
visualize(model_config, env_config, weight_file, args.case, args.save)
if __name__ == '__main__':
main()
|
78660
|
import ast
from amqp import ChannelError
from kombu import Connection
from kombu.common import uuid, maybe_declare
from mock import patch, ANY
from cell.actors import Actor, ActorProxy
from cell.exceptions import WrongNumberOfArguments
from cell.results import AsyncResult
from cell.tests.utils import Case, Mock, with_in_memory_connection
from cell.actors import ACTOR_TYPE
from kombu.compression import compress
from kombu.entity import Exchange
from kombu.messaging import Consumer
from cell.utils import qualname
class A(Actor):
pass
class RRActor(Actor):
type = (ACTOR_TYPE.RR, )
class ScatterActor(Actor):
type = (ACTOR_TYPE.RR, )
class MyCustomException(Exception):
pass
def clean_up_consumers(consumers):
for c in consumers:
for q in c.queues:
q.purge()
def get_next_msg(consumer):
for q in consumer.queues:
next_msg = q.get()
if next_msg:
consumer.channel.basic_ack(next_msg.delivery_tag)
return next_msg
def get_test_message(method='foo', args={'bar': 'foo_arg'},
class_name=None, reply_to=None, delivery_tag=None):
with Connection('memory://') as conn:
ch = conn.channel()
body = {'method': method, 'args': args, 'class': class_name}
data = ch.prepare_message(body)
data['properties']['reply_to'] = reply_to
delivery_tag = delivery_tag or uuid()
data['properties']['delivery_tag'] = delivery_tag
return body, ch.message_to_python(data)
def get_encoded_test_message(method='foo', args={'bar': 'foo_arg'},
class_name=A.__class__.__name__,
reply_to=None, delivery_tag=None):
with Connection('memory://') as conn:
ch = conn.channel()
body = {'method': method, 'args': args, 'class': class_name}
c_body, compression = compress(str(body), 'gzip')
data = ch.prepare_message(c_body, content_type='application/json',
content_encoding='utf-8',
headers={'compression': compression})
data['properties']['reply_to'] = reply_to
delivery_tag = delivery_tag or uuid()
data['properties']['delivery_tag'] = delivery_tag
return body, ch.message_to_python(data)
class test_Actor(Case):
def assertNextMsgDataEqual(self, consumer, expected_data):
next_msg = get_next_msg(consumer)
msg = next_msg.decode()
self.assertDictContainsSubset(expected_data, msg)
def test_init(self):
"""test that __init__ sets fields"""
a1 = A()
self.assertIsNotNone(a1.exchange)
self.assertIsNotNone(a1.outbox_exchange)
self.assertTrue(a1.type_to_queue)
self.assertTrue(a1.type_to_exchange)
self.assertIsNotNone(a1.state)
self.assertIsNotNone(a1.log)
def test_init_construct(self):
"""test that __init__ calls construct callback"""
class Constructed(Actor):
construct_called = False
def construct(self):
self.construct_called = True
self.assertTrue(Constructed().construct_called)
def test_bind(self):
"""test when Actor.bind(connection)"""
a = A()
self.assertTrue(a.id)
with Connection('memory://') as conn:
bound = a.bind(conn)
self.assertIsNot(a, bound, 'bind returns new instance')
self.assertIs(bound.connection, conn)
self.assertEqual(bound.id, a.id)
self.assertEqual(bound.exchange, a.exchange)
self.assertEqual(bound.name, a.name)
self.assertIsNone(bound.agent)
# ---------------------------------------------------------------------
# Test all API send-like methods call cast with the correct arguments
# ---------------------------------------------------------------------
def test_bind_with_agent(self):
"""test when Actor.bind(connection, agent)"""
a = A()
agent = Mock(id=uuid())
with Connection('memory://') as conn:
bound = a.bind(conn, agent=agent)
self.assertIs(bound.agent, agent)
self.assertIs(bound.state.agent, agent)
def test_contributes_to_state(self):
"""test that Actor.contributes_to_state"""
class Stateful(Actor):
class state(object):
foo = 3
class OverridesStateful(Actor):
class state(object):
def contribute_to_state(self, actor):
self.contributed = 1, actor
return self
a1 = Stateful()
self.assertIsNotNone(a1.state)
self.assertIsInstance(a1.state, Stateful.state)
self.assertEqual(a1.state.foo, 3)
self.assertIs(a1.state.actor, a1)
self.assertIsNone(a1.state.agent)
self.assertIs(a1.state.connection, a1.connection)
self.assertIs(a1.state.log, a1.log)
self.assertIs(a1.state.Next, a1.Next)
self.assertIs(a1.state.NoRouteError, a1.NoRouteError)
self.assertIs(a1.state.NoReplyError, a1.NoReplyError)
self.assertTrue(callable(a1.state.add_binding))
self.assertTrue(a1.state.add_binding.__self__, a1)
self.assertTrue(callable(a1.state.remove_binding))
self.assertTrue(a1.state.remove_binding.__self__, a1)
a2 = OverridesStateful()
self.assertIsNotNone(a2.state)
self.assertIsInstance(a2.state, OverridesStateful.state)
self.assertTupleEqual(a2.state.contributed, (1, a2))
with self.assertRaises(AttributeError):
a2.state.actor
# -----------------------------------------------------------------
# Test the API for invoking a remote method
# -----------------------------------------------------------------
def test_throw(self):
# Set Up
method, args, return_val = 'foo', {'args': 'foo_args'}, 'result'
a = A()
a.call_or_cast = Mock(return_value=Mock())
a.call_or_cast.return_value.get = Mock(return_value=return_val)
a.call_or_cast.return_value.result = Mock(return_value=return_val)
# when throw is invoked,
# all its arguments are passed to call_ot_cast and result is returned
result = a.throw(method, args)
a.call_or_cast.assert_called_once_with(method, args,
type=ACTOR_TYPE.RR,
nowait=False)
self.assertEquals(result.result(), return_val)
a.call_or_cast.reset_mock()
# when throw is invoked with no_wait=True, no result is returned
result = a.throw(method, args, nowait=True)
a.call_or_cast.assert_called_once_with(method, args,
type=ACTOR_TYPE.RR,
nowait=True)
self.assertIsNone(result)
a.call_or_cast.reset_mock()
# when throw is invoked without arguments
# empty list is passed to call_or_cast
a.throw(method)
a.call_or_cast.assert_called_once_with(method, {},
type=ACTOR_TYPE.RR,
nowait=False)
def test_send(self):
# Set Up
method, args, return_val = 'foo', {'args': 'foo_args'}, 'bar'
a = A()
a.call_or_cast = Mock(return_value=Mock())
a.call_or_cast.return_value.get = Mock(return_value=return_val)
# when send is invoke all its arguments are passed to call_and_cast
result = a.send(method, args)
a.call_or_cast.assert_called_once_with(method, args,
nowait=False,
routing_key=a.id)
self.assertIs(result, return_val)
a.call_or_cast.reset_mock()
# when send is invoke with nowait=True, result is returned
result = a.send(method, args, nowait=False)
a.call_or_cast.assert_called_with(method, args,
nowait=False,
routing_key=a.id)
self.assertIs(result, return_val)
a.call_or_cast.reset_mock()
# when send is invoke with nowait=True, no result is returned
result = a.send(method, args, nowait=True)
a.call_or_cast.assert_called_once_with(method, args,
nowait=True,
routing_key=a.id)
self.assertIsNone(result)
a.call_or_cast.reset_mock()
# when send is invoke without arguments
# empty list is passed to call_or_cast
result = a.send(method, nowait=True)
a.call_or_cast.assert_called_with(method, {},
nowait=True,
routing_key=a.id)
self.assertIsNone(result)
def test_scatter(self):
# Set Up
method, args, = 'foo', {'args': 'foo_args'}
return_val, timeout, default_timeout = 'res', 1, 2
a = A()
a.default_timeout = default_timeout
a.call_or_cast = Mock(return_value=Mock())
a.call_or_cast.return_value.gather = Mock(return_value=return_val)
# when scatter is invoked with default arguments
result = a.scatter(method, args)
a.call_or_cast.assert_called_once_with(method, args,
type=ACTOR_TYPE.SCATTER,
nowait=False,
timeout=default_timeout)
self.assertEquals(result, return_val)
a.call_or_cast.reset_mock()
# when scatter is invoked with explicit nowait and timeout
nowait = False
result = a.scatter(method, args, nowait, **{'timeout': timeout})
a.call_or_cast.assert_called_once_with(method, args,
type=ACTOR_TYPE.SCATTER,
nowait=nowait,
timeout=timeout)
self.assertEquals(result, return_val)
a.call_or_cast.reset_mock()
# when scatter is invoked with explicit nowait set to True
nowait = True
result = a.scatter(method, args, nowait)
a.call_or_cast.assert_called_once_with(method, args,
type=ACTOR_TYPE.SCATTER,
nowait=nowait,
timeout=default_timeout)
self.assertIsNone(result, None)
a.call_or_cast.reset_mock()
# when scatter is invoked without args param set
result = a.scatter(method)
a.call_or_cast.assert_called_once_with(method, {},
type=ACTOR_TYPE.SCATTER,
nowait=False,
timeout=default_timeout)
def test_emit(self):
method, args, retry = 'foo', {'args': 'foo_args'}, True
a = A()
a.cast = Mock()
# when emit is invoked with default arguments
a.emit(method)
result = a.cast.assert_called_once_with(method, {},
retry=None,
exchange=a.outbox)
self.assertIsNone(result)
a.cast.reset_mock()
# when emit is invoked with explicit arguments
a.emit(method, args, retry)
result = a.cast.assert_called_once_with(method, args,
retry=retry,
exchange=a.outbox)
self.assertIsNone(result)
def test_call_or_cast(self):
a = A()
method, args, return_val = 'foo', {'args': 'foo_args'}, 'bar'
a.call = Mock(return_value=return_val)
a.cast = Mock()
# when call_or_cast is invoke with default arguments:
# call is invoked, result is returned
result = a.call_or_cast(method, args)
a.call.assert_called_once_with(method, args)
self.assertEquals(result, return_val)
a.call.reset_mock()
# when call_or_cast is invoke with nowait=True:
# call is invoked, no result is returned
result = a.call_or_cast(method, args, nowait=False)
a.call.assert_called_once_with(method, args)
self.assertEquals(result, return_val)
a.call.reset_mock()
# when call_or_cast is invoke with nowait=True:
# cast is invoked, no result is returned
result = a.call_or_cast(method, args, nowait=True)
a.cast.assert_called_once_with(method, args)
@patch('cell.actors.uuid')
def test_call(self, new_uuid):
dummy_method, dummy_args, ticket = 'foo', {'foo': 1}, '12345'
new_uuid.return_value = ticket
a = A()
a.cast = Mock()
# when call is invoked:
# cast is invoked with correct reply_to argument
res = a.call(dummy_method, dummy_args)
self.assertTrue(a.cast.called)
(method, args), kwargs = a.cast.call_args
self.assertEqual(method, dummy_method)
self.assertEqual(args, dummy_args)
self.assertDictContainsSubset({'reply_to': ticket}, kwargs)
# returned result is correct
self.assertIsInstance(res, AsyncResult)
self.assertEquals(res.ticket, ticket)
# -----------------------------------------------------------------
# Test the cast method
# -----------------------------------------------------------------
@patch('kombu.transport.memory.Channel.basic_publish')
def assert_cast_calls_basic_publish_with(self, a, routing_key,
exchange, type, mocked_publish):
method, args = 'foo', {'bar': 'foo_arg'}
type = type or ACTOR_TYPE.DIRECT
ticket = uuid()
expected_body, _ = get_test_message(
method, args, a.__class__.__name__, reply_to=ticket)
a.cast(method, args, type=type)
mocked_publish.assert_called_once_with(
ANY, immediate=ANY, mandatory=ANY,
exchange=exchange, routing_key=routing_key)
(message, ), _ = mocked_publish.call_args
body = ast.literal_eval(message.get('body'))
self.assertDictEqual(body, expected_body)
@with_in_memory_connection
def test_cast_calls_basic_publish_with_correct_exchange(self, conn):
a = A(conn)
rk = a.routing_key
direct_exchange = a.type_to_exchange[ACTOR_TYPE.DIRECT]()
self.assert_cast_calls_basic_publish_with(
a, rk, direct_exchange.name, ACTOR_TYPE.DIRECT)
fanout_exchange = a.type_to_exchange[ACTOR_TYPE.SCATTER]()
self.assert_cast_calls_basic_publish_with(
a, rk, fanout_exchange.name, ACTOR_TYPE.SCATTER)
rr_exchange = a.type_to_exchange[ACTOR_TYPE.RR]()
self.assert_cast_calls_basic_publish_with(
a, rk, rr_exchange.name, ACTOR_TYPE.RR)
@with_in_memory_connection
def test_cast_calls_basic_publish_with_correct_rk(self, conn):
a = A(conn)
exch = a.type_to_exchange[ACTOR_TYPE.DIRECT]().name
a = A(conn)
rk = a.routing_key
self.assert_cast_calls_basic_publish_with(a, rk, exch, None)
agent = Mock(id=uuid())
a = A(conn, agent=agent)
rk = a.routing_key
self.assertNotEqual(a.routing_key, agent.id)
self.assert_cast_calls_basic_publish_with(a, rk, exch, None)
agent = Mock(id=uuid())
id = '1234'
a = A(conn, agent=agent, id=id)
rk = a.routing_key
self.assertEqual(a.routing_key, id)
self.assert_cast_calls_basic_publish_with(a, rk, exch, None)
self.assertEquals(a.routing_key, rk)
self.assert_cast_calls_basic_publish_with(a, rk, exch, None)
a = A(conn)
a.default_routing_key = 'fooooooooo'
rk = a.default_routing_key
self.assertEquals(a.routing_key, rk)
self.assert_cast_calls_basic_publish_with(a, rk, exch, None)
@with_in_memory_connection
def test_cast_not_supported_type(self, conn):
a = A(conn)
with self.assertRaises(Exception):
a.cast(method='foo', args={}, type='my_type')
@with_in_memory_connection
def test_cast_direct(self, conn):
a = A(conn)
b = A(conn)
data_no_args = {'method': 'foo', 'args': {},
'class': a.__class__.__name__}
data_with_args = {'method': 'foo', 'args': {'foo': 'foo_arg'},
'class': a.__class__.__name__}
a_con = a.Consumer(conn.channel())
b_con = b.Consumer(conn.channel())
# when cast is invoked with default arguments:
# the message is delivered only to its actor and to no one else,
a.cast(method=data_no_args['method'], args=data_no_args['args'])
self.assertNextMsgDataEqual(a_con, data_no_args)
self.assertIsNone(get_next_msg(b_con))
# when cast is invoked with type = ACTOR_TYPE_DIRECT:
# the message is delivered only to its actor and to no one else,
a.cast(method=data_with_args['method'], args=data_with_args['args'],
type=ACTOR_TYPE.DIRECT)
self.assertNextMsgDataEqual(a_con, data_with_args)
self.assertIsNone(get_next_msg(b_con))
clean_up_consumers([a_con, b_con])
@with_in_memory_connection
def test_cast_scatter(self, conn):
class AnotherRRActor(Actor):
type = (ACTOR_TYPE.RR, )
a = ScatterActor(conn)
data_with_args = {'method': 'foo', 'args': {'foo': 'foo_arg'},
'class': a.__class__.__name__}
b, c = ScatterActor(conn), A(conn)
d, e = RRActor(conn), AnotherRRActor(conn)
a_con = a.Consumer(conn.channel())
b_con = b.Consumer(conn.channel())
c_con = c.Consumer(conn.channel())
d_con = d.Consumer(conn.channel())
e_con = e.Consumer(conn.channel())
# when cast is invoked for broadcast:
# all consumers for that actor class get the message and
# the message are not consumed by consumers for other actor classes
a.cast(method=data_with_args['method'], args=data_with_args['args'],
type=ACTOR_TYPE.SCATTER)
self.assertNextMsgDataEqual(a_con, data_with_args)
self.assertNextMsgDataEqual(b_con, data_with_args)
self.assertIsNone(get_next_msg(c_con))
self.assertIsNone(get_next_msg(d_con))
self.assertIsNone(get_next_msg(e_con))
clean_up_consumers([a_con, b_con, c_con, d_con, e_con])
@with_in_memory_connection
def test_cast_round_robin_send_once(self, conn):
# when cast is invoked once,
# excatly one consumer should receive the message
a, b, c = RRActor(conn), RRActor(conn), A()
data_with_args = {'method': 'foo', 'args': {'foo': 'foo_arg'},
'class': a.__class__.__name__}
a_con = a.Consumer(conn.channel())
b_con = b.Consumer(conn.channel())
c_con = c.Consumer(conn.channel())
# when cast is invoked for round-robin:
# only one consumer for that actor class receives the message and
# messages are consumed by consumers for other actor classes
a.cast(method=data_with_args['method'], args=data_with_args['args'],
type=ACTOR_TYPE.RR)
a_msg = get_next_msg(a_con)
b_msg = get_next_msg(b_con)
self.assertTrue((a_msg or b_msg) and (not(a_msg and b_msg)))
self.assertIsNone(get_next_msg(c_con))
clean_up_consumers([a_con, b_con, c_con])
@with_in_memory_connection
def test_cast_round_robin_send_repeatedly(self, conn):
# when cast is invoked many time,
# eventually all consumers should consume at least one message
a, b, c = RRActor(conn), RRActor(conn), A()
data_with_args = {'method': 'foo', 'args': {'foo': 'foo_arg'},
'class': a.__class__.__name__}
a_con = a.Consumer(conn.channel())
b_con = b.Consumer(conn.channel())
c_con = c.Consumer(conn.channel())
for i in range(1, 5):
a.cast(method=data_with_args['method'],
args=data_with_args['args'],
type=ACTOR_TYPE.RR)
self.assertNextMsgDataEqual(a_con, data_with_args)
self.assertNextMsgDataEqual(b_con, data_with_args)
self.assertIsNone(get_next_msg(c_con))
clean_up_consumers([a_con, b_con, c_con])
# -----------------------------------------------------------------
# Test functionality for correct dispatch of method calls
# --------------------------------------------------------------------------
def test_on_message_when_reply_to_is_set(self):
class Foo(Actor):
class state():
foo_called = False
def foo(self, bar):
self.foo_called = True
return (bar, ret_val)
args, ret_val = {'bar': 'foo_arg'}, 'foooo'
ticket = uuid()
body, message = get_test_message(
'foo', args, Foo.__class__.__name__, reply_to=[ticket])
a = Foo()
a.reply = Mock()
# when the property reply_to is set, reply is called
a._on_message(body, message)
self.assertTrue(a.state.foo_called)
a.reply.assert_called_oncce()
def test_on_message_when_reply_to_not_set(self):
ret_val = 'fooo'
class Foo(Actor):
class state():
foo_called = False
def foo(self, bar):
self.foo_called = True
return (bar, ret_val)
# when the property reply_to is not set, reply is not called
body, message = get_test_message(
'foo', {'bar': 'foo_arg'}, Foo.__class__.__name__)
message.ack = Mock()
a = Foo()
a.reply = Mock()
result = a._on_message(body, message)
self.assertTrue(a.state.foo_called)
self.assertEquals(a.reply.call_count, 0)
# message should be acknowledged after the method is executed
message.ack.assert_called_once()
# no result should be returned
self.assertIsNone(result)
def test_on_message_invokes_on_dispatch_when_reply_to_not_set(self):
ret_val = 'fooo'
body, message = get_test_message('foo', {'bar': 'foo_arg'},
A.__class__.__name__)
a = A()
a.reply = Mock()
a._DISPATCH = Mock(return_value=ret_val)
# when reply_to is not set:
# dispatch result should be ignored
result = a._on_message(body, message)
a._DISPATCH.assert_called_once_wiith(message, body)
self.assertIsNone(result)
self.assertEqual(a.reply.call_count, 0)
def test_on_message_invokes_on_dispatch_when_reply_to_set(self):
ret_val = 'fooo'
ticket = uuid()
body, message = get_test_message('foo', {'bar': 'foo_arg'},
A.__class__.__name__,
reply_to=ticket)
a = A()
a.reply = Mock()
a._DISPATCH = Mock(return_value=ret_val)
# when reply_to is set:
# dispatch result should be ignored
a._on_message(body, message)
a._DISPATCH.assert_called_once_with(body, ticket=ticket)
a.reply.assert_called_once_with(message, ret_val)
def test_on_message_when_no_method_is_passed(self):
args, ret_val = {'bar': 'foo_arg'}, 'fooo'
class Foo(Actor):
class state():
def foo(self, bar):
self.foo_called = True
return (bar, ret_val)
body, message = get_test_message('', {'bar': 'foo_arg'},
Foo.__class__.__name__)
message.ack = Mock()
a = Foo()
a.default_receive = Mock()
result = a._on_message(body, message)
a.default_receive.assert_called_once(args)
# message should be acknowledged even when the method does not exist
message.ack.assert_called_once_with()
self.assertIsNone(result)
def test_on_message_when_private_method_is_passed(self):
body, message = get_test_message('_foo', {},
A.__class__.__name__)
message.ack = Mock()
a = A()
a.state._foo = Mock()
a._on_message(body, message)
self.assertEqual(a.state._foo.call_count, 0)
# message should be acknowledged even when method is not invoked
message.ack.assert_called_once_with()
def test_on_message_when_unexisted_method_is_passed(self):
body, message = get_test_message('bar', {'bar': 'foo_arg'},
A.__class__.__name__)
message.ack = Mock()
a = A()
a.default_receive = Mock()
result = a._on_message(body, message)
# message should be acknowledged even when the method does not exist
message.ack.assert_called_once_with()
self.assertIsNone(result)
def test_on_message_delegated_to_agent(self):
body, message = get_test_message('bar', {'bar': 'foo_arg'},
A.__class__.__name__)
a = A()
a.agent = Mock()
a.on_message(body, message)
a.agent.process_message.assert_called_once_with(a, body, message)
def assert_on_message_exception_raise(self, exception_cls, ack_count):
body, message = get_test_message('bar', {'bar': 'foo_arg'},
A.__class__.__name__)
a = A()
message.ack = Mock()
a.handle_cast = Mock(side_effect=exception_cls('Boom'))
with self.assertRaises(exception_cls):
a._on_message(body, message)
self.assertEquals(message.ack.call_count, ack_count)
a.handle_cast.reset_mock()
message.ack.reset_mock()
message.ack = Mock()
a.handle_call = Mock(side_effect=exception_cls('Boom'))
body, message = get_test_message('bar', {'bar': 'foo_arg'},
A.__class__.__name__,
reply_to=[uuid])
with self.assertRaises(exception_cls):
a._on_message(body, message)
self.assertEquals(message.ack.call_count, ack_count)
def test_on_message_when_base_exception_occurs(self):
# Do not ack the message if an exceptional error occurs,
self.assert_on_message_exception_raise(Exception, 0)
# but do ack the message if BaseException
# (SystemExit or KeyboardInterrupt)
# is raised, as this is probably intended.
self.assert_on_message_exception_raise(BaseException, 1)
def test_dispatch_return_values(self):
"""In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
# when result is correct
ret_val = 'foooo'
a = A()
body, message = get_test_message('bar', {'bar': 'foo_arg'},
a.__class__.__name__)
expected_result = {'ok': ret_val}
a.state.bar = Mock(return_value=ret_val)
result = a._DISPATCH(body)
self.assertDictContainsSubset(expected_result, result)
self.assertNotIn('nok', result)
# when method called does not return a result
a.state.bar.reset_mock()
a.state.bar = Mock(return_value=None)
expected_result = {'ok': None}
result = a._DISPATCH(body)
self.assertDictContainsSubset(expected_result, result)
self.assertNotIn('nok', result)
# when method does not exist
body, message = get_test_message(
'foo', {'bar': 'foo_arg'}, a.__class__.__name__)
result = a._DISPATCH(body)
self.assertIn('nok', result)
self.assertIn("KeyError('foo',)", result['nok'])
# when calling a private method
body, message = get_test_message(
'_foo', {'bar': 'foo_arg'}, a.__class__.__name__)
a._foo = Mock()
result = a._DISPATCH(body)
self.assertIn('nok', result)
self.assertIn("KeyError('_foo',)", result['nok'])
# when calling a private method
body, message = get_test_message(
'__foo', {'bar': 'foo_arg'}, a.__class__.__name__)
a.__foo = Mock()
result = a._DISPATCH(body)
self.assertIn('nok', result)
self.assertIn("KeyError('__foo',)", result['nok'])
# when method called raises an exception
body, message = get_test_message('foo_with_exception',
{'bar': 'foo_arg'},
a.__class__.__name__)
a.foo_with_exception = Mock(side_effect=Exception('FooError'))
result = a._DISPATCH(body)
self.assertIn('nok', result)
self.assertIn("KeyError('foo_with_exception',)", result['nok'])
@with_in_memory_connection
def test_on_message_is_sending_to_reply_queue(self, conn):
ret_result = 'foooo'
class Foo(A):
class state:
def bar(self, my_bar):
return ret_result
a = Foo(conn)
ticket = uuid()
delivery_tag = uuid()
body, message = get_encoded_test_message('bar', {'my_bar': 'bar_arg'},
A.__class__.__name__,
reply_to=ticket,
delivery_tag=delivery_tag)
# Set up a reply queue to read from
# reply_q and reply_exchange should be set the sender
a.reply_exchange = a.reply_exchange.bind(a.connection.default_channel)
maybe_declare(a.reply_exchange)
reply_q = a.get_reply_queue(ticket)
reply_q(a.connection.default_channel).declare()
a._on_message(body, message)
a_con = Consumer(conn.channel(), reply_q)
self.assertNextMsgDataEqual(a_con, {'ok': ret_result})
@with_in_memory_connection
def test_reply_queue_is_declared_after_call(self, conn):
ticket = uuid()
with patch('cell.actors.uuid') as new_uuid:
new_uuid.return_value = ticket
a = A(conn)
reply_q = a.get_reply_queue(ticket)
a.get_reply_queue = Mock(return_value=reply_q)
with self.assertRaises(ChannelError):
reply_q(conn.channel()).queue_declare(passive=True)
a.call(method='foo', args={}, type=ACTOR_TYPE.DIRECT)
a.get_reply_queue.assert_called_once_with(ticket)
self.assertTrue(
reply_q(conn.channel()).queue_declare(passive=True))
@with_in_memory_connection
def test_reply_send_correct_msg_body_to_the_reply_queue(self, conn):
a = A(conn)
ticket = uuid()
delivery_tag = 2
body, message = get_encoded_test_message('bar', {'my_bar': 'bar_arg'},
a.__class__.__name__,
reply_to=ticket,
delivery_tag=delivery_tag)
# Set up a reply queue to read from
# reply_q and reply_exchange should be set the sender
a.reply_exchange.maybe_bind(a.connection.default_channel)
maybe_declare(a.reply_exchange)
reply_q = a.get_reply_queue(ticket)
reply_q(a.connection.default_channel).declare()
a.reply(message, body)
a_con = Consumer(conn.channel(), reply_q)
reply_msg = get_next_msg(a_con)
reply_body = reply_msg.decode()
self.assertEquals(reply_body, body)
# -----------------------------------------------------------------
# Test actor to actor binding functionality (add_binding, remove_binding)
# ----------------------------------------------------------------
def mock_exchange(self, actor, type):
exchange = actor.type_to_exchange[type]()
exchange.bind_to = Mock()
exchange.exchange_unbind = Mock()
exchange.declare = Mock()
actor.type_to_exchange[type] = Mock(return_value=exchange)
return exchange
def mock_queue(self, actor, type):
queue = actor.type_to_queue[type]()
queue.bind_to = Mock()
queue.unbind_from = Mock()
queue.declare = Mock()
actor.type_to_queue[type] = Mock(return_value=queue)
return queue
@with_in_memory_connection
def test_add_remove_binding_for_direct_type(self, conn):
# Add binding between the inbox queue
# of one actor to the outbox queue of another
a, b = A(conn), A(conn)
routing_key = 'foooo'
mock_entity_type = ACTOR_TYPE.DIRECT
inbox_queue = self.mock_queue(a, mock_entity_type)
source_ex = b.outbox
a._add_binding(source_ex.as_dict(), routing_key, mock_entity_type)
inbox_queue.bind_to.assert_called_with(
exchange=b.outbox, routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
inbox_queue.unbind_from.assert_called_with(
exchange=source_ex, routing_key=routing_key)
@with_in_memory_connection
def test_add_remove_binding_for_scatter_type(self, conn):
a, b = A(conn), A(conn)
routing_key, mock_entity_type = 'foooo', ACTOR_TYPE.SCATTER
dest_ex = self.mock_exchange(a, mock_entity_type)
source_ex = b.outbox
a._add_binding(source_ex.as_dict(),
routing_key=routing_key,
inbox_type=mock_entity_type)
dest_ex.bind_to.assert_called_with(exchange=source_ex,
routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
dest_ex.exchange_unbind.assert_called_with(
exchange=source_ex, routing_key=routing_key)
@with_in_memory_connection
def test_add_remove_binding_for_rr_type(self, conn):
a, b = A(conn), A(conn)
routing_key, mock_entity_type = 'foooo', ACTOR_TYPE.RR
dest_exchange = self.mock_exchange(a, mock_entity_type)
source_ex = b.outbox
a._add_binding(source_ex.as_dict(), routing_key, mock_entity_type)
dest_exchange.bind_to.assert_called_with(
exchange=source_ex, routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
dest_exchange.exchange_unbind.assert_called_with(
exchange=source_ex, routing_key=routing_key)
@with_in_memory_connection
def test_add_binding_when_actor_for_not_supported_type(self, conn):
a, b = A(conn), A(conn)
entity_type = 'test'
self.assertNotIn(entity_type, a.types)
with self.assertRaises(Exception):
a._add_binding(b.outbox.as_dict(),
routing_key=b.routing_key, inbox_type=entity_type)
@with_in_memory_connection
def test_add_remove_binding_when_routing_key_is_empty(self, conn):
a = A(conn)
routing_key, mock_entity_type = "", ACTOR_TYPE.SCATTER
source_ex = Exchange('bar.foo.bar', mock_entity_type)
exchange = self.mock_exchange(a, mock_entity_type)
a._add_binding(source_ex.as_dict(), routing_key, mock_entity_type)
exchange.bind_to.assert_called_with(exchange=source_ex,
routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
exchange.exchange_unbind.assert_called_with(exchange=source_ex,
routing_key=routing_key)
class As(Actor):
class state():
def foo(self, who=None):
pass
def meth(self):
pass
class test_ActorProxy(Case):
@with_in_memory_connection
def test_init(self, conn):
"""test that __init__ sets fields"""
id = uuid()
ag, res = Mock(), Mock()
# we need to have wait for result,
a1 = ActorProxy(qualname(A), id, connection=conn, agent=ag)
self.assertEqual(a1.id, id)
self.assertIsNone(a1.async_start_result)
self.assertIsInstance(a1._actor, A)
self.assertEqual(a1._actor.name, A().__class__.__name__)
self.assertEqual(a1._actor.agent, ag)
self.assertEqual(a1._actor.id, a1.id)
self.assertEqual(a1._actor.connection, conn)
a1 = ActorProxy(qualname(A), id, res, connection=conn, agent=ag)
self.assertEqual(a1.id, id)
self.assertEqual(a1.async_start_result, res)
self.assertEqual(a1._actor.id, a1.id)
self.assertIsInstance(a1._actor, A)
self.assertEqual(a1._actor.name, A().__class__.__name__)
self.assertEqual(a1._actor.agent, ag)
self.assertEqual(a1._actor.connection, conn)
def assert_actor_method_called(self, meth, func):
args = ['foo', {'who': 'the quick brown...'}]
kwargs = {'nowait': True}
func(*args, **kwargs)
meth.assert_called_once_with(*args, **kwargs)
args = ['bar', {'who': 'the quick brown...'}]
kwargs = {'nowait': True}
# bar method is not supported so error is thorwm
with self.assertRaises(AttributeError):
func(*args, **kwargs)
with self.assertRaises(WrongNumberOfArguments):
func()
def assert_actor_method_called_with_par_foo(
self, mock_meth, func):
args, kwargs = [{'who': 'the quick brown...'}], {'nowait': True}
func.foo(*args, **kwargs)
mock_meth.assert_called_once_with('foo', *args, **kwargs)
with self.assertRaises(AttributeError):
func.bar(*args, **kwargs)
@patch.object(Actor, 'call', return_value=None)
def test_call_dot(self, call):
a1 = ActorProxy(qualname(As), uuid)
self.assert_actor_method_called_with_par_foo(call, a1.call)
@patch.object(Actor, 'call', return_value=None)
def test_call(self, call):
a1 = ActorProxy(qualname(As), uuid())
self.assert_actor_method_called(call, a1.call)
@patch.object(Actor, 'send', return_value=None)
def test_send(self, send):
a1 = ActorProxy(qualname(As), uuid())
self.assert_actor_method_called(send, a1.send)
@patch.object(Actor, 'send', return_value=None)
def test_send_dot(self, send):
a1 = ActorProxy(qualname(As), uuid())
self.assert_actor_method_called_with_par_foo(send, a1.send)
@patch.object(Actor, 'throw', return_value=None)
def test_throw(self, throw):
a1 = ActorProxy(qualname(As), uuid())
self.assert_actor_method_called(throw, a1.throw)
@patch.object(Actor, 'throw', return_value=None)
def test_throw_dot(self, throw):
a1 = ActorProxy(qualname(As), uuid())
self.assert_actor_method_called_with_par_foo(throw, a1.throw)
@patch.object(Actor, 'scatter', return_value=None)
def test_scatter(self, scatter):
a1 = ActorProxy(qualname(As), uuid())
self.assert_actor_method_called(scatter, a1.scatter)
@patch.object(Actor, 'scatter', return_value=None)
def test_scatter_dot(self, scatter):
a1 = ActorProxy(qualname(As), uuid())
self.assert_actor_method_called_with_par_foo(scatter, a1.scatter)
@patch.object(As, 'meth', return_value=None)
def test_arbitrary_actor_method(self, meth):
a1 = ActorProxy(qualname(As), uuid())
a1.meth()
meth.assert_called_once_with()
meth.reset_mock()
args = ['bar']
a1.meth(*args)
meth.assert_called_once_with(*args)
def test_non_existing_actor_method(self):
a1 = ActorProxy(qualname(As), uuid())
with self.assertRaises(AttributeError):
a1.bar()
|
78707
|
from datetime import datetime
from mongoengine import DateTimeField, Document, UUIDField
from mongoengine.fields import DictField, ListField, ReferenceField
from vim_adaptor.models.vims import BaseVim
class ServiceInstance(Document):
"""
Document class to store data related to a service instance
"""
id = UUIDField(primary_key=True)
created_at = DateTimeField(default=datetime.utcnow)
vims = ListField(ReferenceField(BaseVim))
# A dict that maps VIM ids to their details dicts
vim_details = DictField(required=True)
|
78756
|
import base64
import urllib2
import json
import os
import logging
from celery import task
from django.conf import settings
from django.utils.timezone import now
from github.GithubObject import NotSet
from github import Github, GithubException, InputGitTreeElement
from ide.git import git_auth_check, get_github
from ide.models.build import BuildResult
from ide.models.project import Project
from ide.tasks import do_import_archive, run_compile
from ide.utils.git import git_sha, git_blob
from ide.utils.project import find_project_root_and_manifest, BaseProjectItem, InvalidProjectArchiveException
from ide.utils.sdk import generate_manifest_dict, generate_manifest, generate_wscript_file, manifest_name_for_project
from utils.td_helper import send_td_event
__author__ = 'katharine'
logger = logging.getLogger(__name__)
@task(acks_late=True)
def do_import_github(project_id, github_user, github_project, github_branch, delete_project=False):
try:
url = "https://github.com/%s/%s/archive/%s.zip" % (github_user, github_project, github_branch)
if file_exists(url):
u = urllib2.urlopen(url)
return do_import_archive(project_id, u.read())
else:
raise Exception("The branch '%s' does not exist." % github_branch)
except Exception as e:
try:
project = Project.objects.get(pk=project_id)
user = project.owner
except:
project = None
user = None
if delete_project and project is not None:
try:
project.delete()
except:
pass
send_td_event('cloudpebble_github_import_failed', data={
'data': {
'reason': e.message,
'github_user': github_user,
'github_project': github_project,
'github_branch': github_branch
}
}, user=user)
raise
def file_exists(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
try:
urllib2.urlopen(request)
except:
return False
else:
return True
@git_auth_check
def github_push(user, commit_message, repo_name, project):
g = Github(user.github.token, client_id=settings.GITHUB_CLIENT_ID, client_secret=settings.GITHUB_CLIENT_SECRET)
repo = g.get_repo(repo_name)
try:
branch = repo.get_branch(project.github_branch or repo.master_branch)
except GithubException:
raise Exception("Unable to get branch.")
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
next_tree = {x.path: InputGitTreeElement(path=x.path, mode=x.mode, type=x.type, sha=x.sha) for x in tree.tree}
try:
root, manifest_item = find_project_root_and_manifest([GitProjectItem(repo, x) for x in tree.tree])
except InvalidProjectArchiveException:
root = ''
manifest_item = None
expected_paths = set()
def update_expected_paths(new_path):
# This adds the path *and* its parent directories to the list of expected paths.
# The parent directories are already keys in next_tree, so if they aren't present in expected_paths
# then, when iterating over next_tree to see which files have been deleted, we would have to treat
# directories as special cases.
split_path = new_path.split('/')
expected_paths.update('/'.join(split_path[:p]) for p in range(2, len(split_path) + 1))
project_sources = project.source_files.all()
has_changed = False
for source in project_sources:
repo_path = os.path.join(root, source.project_path)
update_expected_paths(repo_path)
if repo_path not in next_tree:
has_changed = True
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob',
content=source.get_contents())
logger.debug("New file: %s", repo_path)
else:
sha = next_tree[repo_path]._InputGitTreeElement__sha
our_content = source.get_contents()
expected_sha = git_sha(our_content)
if expected_sha != sha:
logger.debug("Updated file: %s", repo_path)
next_tree[repo_path]._InputGitTreeElement__sha = NotSet
next_tree[repo_path]._InputGitTreeElement__content = our_content
has_changed = True
# Now try handling resource files.
resources = project.resources.all()
resource_root = project.resources_path
for res in resources:
for variant in res.variants.all():
repo_path = os.path.join(resource_root, variant.path)
update_expected_paths(repo_path)
if repo_path in next_tree:
content = variant.get_contents()
if git_sha(content) != next_tree[repo_path]._InputGitTreeElement__sha:
logger.debug("Changed resource: %s", repo_path)
has_changed = True
blob = repo.create_git_blob(base64.b64encode(content), 'base64')
logger.debug("Created blob %s", blob.sha)
next_tree[repo_path]._InputGitTreeElement__sha = blob.sha
else:
logger.debug("New resource: %s", repo_path)
has_changed = True
blob = repo.create_git_blob(base64.b64encode(variant.get_contents()), 'base64')
logger.debug("Created blob %s", blob.sha)
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob', sha=blob.sha)
# Manage deleted files
src_root = os.path.join(root, 'src')
worker_src_root = os.path.join(root, 'worker_src')
for path in next_tree.keys():
if not (any(path.startswith(root+'/') for root in (src_root, resource_root, worker_src_root))):
continue
if path not in expected_paths:
del next_tree[path]
logger.debug("Deleted file: %s", path)
has_changed = True
# Compare the resource dicts
remote_manifest_path = root + manifest_name_for_project(project)
remote_wscript_path = root + 'wscript'
if manifest_item:
their_manifest_dict = json.loads(manifest_item.read())
their_res_dict = their_manifest_dict.get('resources', their_manifest_dict.get('pebble', their_manifest_dict).get('resources', {'media': []}))
# If the manifest needs a new path (e.g. it is now package.json), delete the old one
if manifest_item.path != remote_manifest_path:
del next_tree[manifest_item.path]
else:
their_manifest_dict = {}
their_res_dict = {'media': []}
our_manifest_dict = generate_manifest_dict(project, resources)
our_res_dict = our_manifest_dict.get('resources', our_manifest_dict.get('pebble', our_manifest_dict).get('resources', {'media': []}))
if our_res_dict != their_res_dict:
logger.debug("Resources mismatch.")
has_changed = True
# Try removing things that we've deleted, if any
to_remove = set(x['file'] for x in their_res_dict['media']) - set(x['file'] for x in our_res_dict['media'])
for path in to_remove:
repo_path = resource_root + path
if repo_path in next_tree:
logger.debug("Deleted resource: %s", repo_path)
del next_tree[repo_path]
# This one is separate because there's more than just the resource map changing.
if their_manifest_dict != our_manifest_dict:
has_changed = True
if remote_manifest_path in next_tree:
next_tree[remote_manifest_path]._InputGitTreeElement__sha = NotSet
next_tree[remote_manifest_path]._InputGitTreeElement__content = generate_manifest(project, resources)
else:
next_tree[remote_manifest_path] = InputGitTreeElement(path=remote_manifest_path, mode='100644', type='blob',
content=generate_manifest(project, resources))
if project.project_type == 'native' and remote_wscript_path not in next_tree:
next_tree[remote_wscript_path] = InputGitTreeElement(path=remote_wscript_path, mode='100644', type='blob',
content=generate_wscript_file(project, True))
has_changed = True
# Commit the new tree.
if has_changed:
logger.debug("Has changed; committing")
# GitHub seems to choke if we pass the raw directory nodes off to it,
# so we delete those.
for x in next_tree.keys():
if next_tree[x]._InputGitTreeElement__mode == '040000':
del next_tree[x]
logger.debug("removing subtree node %s", x)
logger.debug([x._InputGitTreeElement__mode for x in next_tree.values()])
git_tree = repo.create_git_tree(next_tree.values())
logger.debug("Created tree %s", git_tree.sha)
git_commit = repo.create_git_commit(commit_message, git_tree, [commit])
logger.debug("Created commit %s", git_commit.sha)
git_ref = repo.get_git_ref('heads/%s' % (project.github_branch or repo.master_branch))
git_ref.edit(git_commit.sha)
logger.debug("Updated ref %s", git_ref.ref)
project.github_last_commit = git_commit.sha
project.github_last_sync = now()
project.save()
return True
send_td_event('cloudpebble_github_push', data={
'data': {
'repo': project.github_repo
}
}, user=user)
return False
def get_root_path(path):
path, extension = os.path.splitext(path)
return path.split('~', 1)[0] + extension
class GitProjectItem(BaseProjectItem):
def __init__(self, repo, tree_item):
self.repo = repo
self.git_item = tree_item
def read(self):
return git_blob(self.repo, self.git_item.sha)
@property
def path(self):
return self.git_item.path
@git_auth_check
def github_pull(user, project):
g = get_github(user)
repo_name = project.github_repo
if repo_name is None:
raise Exception("No GitHub repo defined.")
repo = g.get_repo(repo_name)
# If somehow we don't have a branch set, this will use the "master_branch"
branch_name = project.github_branch or repo.master_branch
try:
branch = repo.get_branch(branch_name)
except GithubException:
raise Exception("Unable to get the branch.")
if project.github_last_commit == branch.commit.sha:
# Nothing to do.
return False
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = {x.path: x for x in tree.tree}
paths_notags = {get_root_path(x) for x in paths}
# First try finding the resource map so we don't fail out part-done later.
try:
root, manifest_item = find_project_root_and_manifest([GitProjectItem(repo, x) for x in tree.tree])
except ValueError as e:
raise ValueError("In manifest file: %s" % str(e))
resource_root = root + project.resources_path + '/'
manifest = json.loads(manifest_item.read())
media = manifest.get('resources', {}).get('media', [])
project_type = manifest.get('projectType', 'native')
for resource in media:
path = resource_root + resource['file']
if project_type == 'pebblejs' and resource['name'] in {
'MONO_FONT_14', 'IMAGE_MENU_ICON', 'IMAGE_LOGO_SPLASH', 'IMAGE_TILE_SPLASH'}:
continue
if path not in paths_notags:
raise Exception("Resource %s not found in repo." % path)
# Now we grab the zip.
zip_url = repo.get_archive_link('zipball', branch_name)
u = urllib2.urlopen(zip_url)
# And wipe the project!
# TODO: transaction support for file contents would be nice...
project.source_files.all().delete()
project.resources.all().delete()
# This must happen before do_import_archive or we'll stamp on its results.
project.github_last_commit = branch.commit.sha
project.github_last_sync = now()
project.save()
import_result = do_import_archive(project.id, u.read())
send_td_event('cloudpebble_github_pull', data={
'data': {
'repo': project.github_repo
}
}, user=user)
return import_result
@task
def do_github_push(project_id, commit_message):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_push(project.owner, commit_message, project.github_repo, project)
@task
def do_github_pull(project_id):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_pull(project.owner, project)
@task
def hooked_commit(project_id, target_commit):
project = Project.objects.select_related('owner__github').get(pk=project_id)
did_something = False
logger.debug("Comparing %s versus %s", project.github_last_commit, target_commit)
if project.github_last_commit != target_commit:
github_pull(project.owner, project)
did_something = True
if project.github_hook_build:
build = BuildResult.objects.create(project=project)
run_compile(build.id)
did_something = True
return did_something
|
78792
|
import json
import sys
def main():
argv_file = sys.argv[1]
with open(argv_file, "wt") as fp:
json.dump(sys.argv, fp)
sys.exit(0)
if __name__ == "__main__":
main()
|
78793
|
import asyncio
import json
import signal
from callosum.rpc import Peer
from callosum.ordering import (
KeySerializedAsyncScheduler,
)
from callosum.lower.zeromq import ZeroMQAddress, ZeroMQRPCTransport
async def handle_echo(request):
print('echo start')
await asyncio.sleep(1)
print('echo done')
return {
'received': request.body['sent'],
}
async def handle_add(request):
print('add start')
await asyncio.sleep(0.5)
print('add done')
return {
'result': request.body['a'] + request.body['b'],
}
async def handle_delimeter(request):
print('------')
async def serve() -> None:
peer = Peer(
bind=ZeroMQAddress('tcp://127.0.0.1:5010'),
transport=ZeroMQRPCTransport,
scheduler=KeySerializedAsyncScheduler(),
serializer=lambda o: json.dumps(o).encode('utf8'),
deserializer=lambda b: json.loads(b))
peer.handle_function('echo', handle_echo)
peer.handle_function('add', handle_add)
peer.handle_function('print_delim', handle_delimeter)
print('echo() will take 1 second and add() will take 0.5 second.')
print('You can confirm the effect of scheduler '
'and the ordering key by the console logs.\n')
loop = asyncio.get_running_loop()
forever = loop.create_future()
loop.add_signal_handler(signal.SIGINT, forever.cancel)
loop.add_signal_handler(signal.SIGTERM, forever.cancel)
async with peer:
try:
print('server started')
await forever
except asyncio.CancelledError:
pass
print('server terminated')
if __name__ == '__main__':
asyncio.run(serve())
|
78828
|
from collections import OrderedDict as odict
import time
import numpy as np
from .json_encoder import JsonNumEncoder
import os
class Profiler:
"""This class provides a very simple yet light implementation of function profiling.
It is very easy to use:
>>> profiler.reset()
>>> profiler.start("loop")
>>> for i in range(100000):
... print(i)
...
>>> profiler.lapse("loop")
>>> print(profiler)
>> loop [1x, 27.1s]
Alternatively, you may use ``profiler`` with :class:`KeepTime`:
>>> with KeepTime("loop2"):
... for i in range(100000):
... print(i)
...
>>> print(profiler)
>> loop2 [1x, 0.0s]
Note:
The number of callings to :func:`start` and :func:`lapse` should be the same.
"""
def __init__(self):
self.filename = None
self.reset()
def reset(self):
self.data = odict()
def start(self, name):
if name in self.data:
assert self.data[name]["starts"] == None, "The previous start should be lapsed first for [{:s}]".format(name)
else:
self.data[name] = {"starts":None, "occurs":None, "totals":None}
self.data[name]["starts"] = time.time()
def lapse(self, name):
assert name in self.data and self.data[name]["starts"] is not None, "You should first start for [{:s}]".format(name)
elapsed = time.time() - self.data[name]["starts"]
if self.data[name]["totals"] is None:
self.data[name]["totals"] = elapsed
self.data[name]["occurs"] = 1
else:
self.data[name]["totals"] += elapsed
self.data[name]["occurs"] += 1
self.data[name]["starts"] = None
def get_time_average(self, name):
assert name in self.data
return self.data[name]["totals"]/self.data[name]["occurs"]
def get_time_overall(self, name):
assert name in self.data
return self.data[name]["totals"]
def get_occurence(self, name):
assert name in self.data
return self.data[name]["occurs"]
def get_keys(self):
return list(self.data.keys())
def __repr__(self):
res = ""
for k in self.get_keys():
res += (">> {:s} [{:d}x, {:.1f}s]\n".format(k, self.get_occurence(k), self.get_time_overall(k)))
return res
def set_output_file(self, path):
self.filename = path
def dump(self, meta = odict()):
if self.filename:
f = open(self.filename, 'a')
out = {"meta":meta,"data":self.data}
jsonstring = JsonNumEncoder(out)
print(jsonstring, flush=True, file=f)
f.close()
# Global profiler object (use KeepTime to interact with this object):
profiler = Profiler()
class KeepTime(object):
##################
# Static Methods #
##################
_stack = []
_level = -1
def set_level(level):
KeepTime._level = level
def get_level():
return KeepTime._level
def get_full_path():
return os.path.join(*KeepTime._stack)
def get_current_level():
path = KeepTime.get_full_path()
if path == "/":
return 0
return path.count("/")
def add_name(name):
KeepTime._stack.append(name)
def pop_name():
KeepTime._stack.pop()
######################
# Non-static Methods #
######################
def __init__(self, name):
self.name = name
self.enabled = False
def __enter__(self):
KeepTime.add_name(self.name)
if (KeepTime.get_current_level() <= KeepTime.get_level()) or (KeepTime.get_level() == -1):
name = KeepTime.get_full_path()
profiler.start(name)
self.enabled = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
name = KeepTime.get_full_path()
KeepTime.pop_name()
if self.enabled:
profiler.lapse(name)
|
78829
|
import math
import hashlib
import zlib
from bitarray import bitarray
from globals import G
class BFsignature():
def __init__(self, total_chunks):
self.total_chunks = total_chunks
if self.total_chunks > 0:
self.cal_m()
#print("bf size = ",self.m)
else:
#log.error("Non-positive total_chunks")
print("Non-positive total_chunks")
def cal_m(self):
self.k = - math.log(0.001) / math.log(2)
self.k = int(self.k)
self.m = - self.total_chunks * math.log(0.001) / (math.log(2)**2)
self.m = int(self.m)
self.bitarray = bitarray(self.m)
self.bitarray.setall(False)
def insert_item(self, key):
positions = self.cal_positions(key)
for pos in positions:
self.bitarray[pos] = True
def or_bf(self, other_bitarray):
self.bitarray = self.bitarray | other_bitarray
def gen_signature(self):
#print(self.bitarray)
h = hashlib.sha1()
# if bitarray is too large, might do this in sections
h.update(self.bitarray.tobytes())
return h.hexdigest()
def cal_positions(self, key):
positions = []
for i in range (self.k):
hashValue = zlib.crc32(key, i) % self.m
positions.append(hashValue)
return positions
|
78857
|
from typing import List, TYPE_CHECKING
from cloudfoundry_client.json_object import JsonObject
if TYPE_CHECKING:
from cloudfoundry_client.client import CloudFoundryClient
class ResourceManager(object):
def __init__(self, target_endpoint: str, client: "CloudFoundryClient"):
self.target_endpoint = target_endpoint
self.client = client
def match(self, items: List[dict]) -> List[JsonObject]:
response = self.client.put("%s/v2/resource_match" % self.client.info.api_endpoint, json=items)
return response.json(object_pairs_hook=JsonObject)
|
78935
|
from aiogram import types
from .dataset import PHOTO
photo = types.PhotoSize(**PHOTO)
def test_export():
exported = photo.to_python()
assert isinstance(exported, dict)
assert exported == PHOTO
def test_file_id():
assert isinstance(photo.file_id, str)
assert photo.file_id == PHOTO['file_id']
def test_file_size():
assert isinstance(photo.file_size, int)
assert photo.file_size == PHOTO['file_size']
def test_size():
assert isinstance(photo.width, int)
assert isinstance(photo.height, int)
assert photo.width == PHOTO['width']
assert photo.height == PHOTO['height']
|
78954
|
from yo_fluq_ds__tests.common import *
import numpy as np
class MiscMethodsTests(TestCase):
def test_pairwise(self):
result = Query.args(1,2,3).feed(fluq.pairwise()).to_list()
self.assertListEqual([(1,2),(2,3)],result)
def test_strjoin(self):
result = Query.args(1,2,3).feed(fluq.strjoin(','))
self.assertEqual("1,2,3",result)
def test_countby(self):
result = Query.args(1,1,1,2,2,3).feed(fluq.count_by(lambda z: z)).to_series()
self.assertListEqual([1,2,3],list(result.index))
self.assertListEqual([3,2,1],list(result))
def test_shuffle(self):
arg = Query.en(range(5)).feed(fluq.shuffle(1)).to_list()
self.assertListEqual([2,1,4,0,3],arg)
def test_shuffle_rstate(self):
arg = Query.en(range(5)).feed(fluq.shuffle(np.random.RandomState(1))).to_list()
self.assertListEqual([2,1,4,0,3],arg)
def test_shuffle_true(self):
arg = Query.en(range(5)).feed(fluq.shuffle(True)).to_set()
self.assertSetEqual({0,1,2,3,4}, arg)
self.assertEqual(5,len(arg))
def test_shuffle_false(self):
res = Query.en(range(5)).feed(fluq.shuffle(False)).to_list()
self.assertListEqual([0,1,2,3,4],res)
def test_shuffle_none(self):
res = Query.en(range(5)).feed(fluq.shuffle(None)).to_list()
self.assertListEqual([0,1,2,3,4],res)
def test_shuffle_raises(self):
self.assertRaises(
TypeError,
lambda: Query.en(range(5)).feed(fluq.shuffle('a')).to_list()
)
|
78961
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
class AwardManagerUD(DistributedObjectUD):
notify = DirectNotifyGlobal.directNotify.newCategory('AwardManagerUD')
|
78971
|
from motion import *
if __name__ == '__main__':
from time import sleep
buf = []
buf_len = 5
last_avg = 0
threshold = 0.02
lsm = accelcomp()
while True:
lsm.getAccel()
buf.append(lsm.accel[X] + lsm.accel[Y] + lsm.accel[Z])
buf = buf[-buf_len:]
avg = reduce(lambda x, y: x + y, buf) / len(buf)
diff = abs(avg - last_avg)
if diff > threshold and diff < 1:
print("MOTION!", abs(avg-last_avg))
last_avg = avg
time.sleep(0.1)
|
79012
|
from typing import List
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
mods = [0] * 60
for t in time:
mods[t % 60] += 1
cnt = sum(mods[i] * mods[60 - i] for i in range(1, 30))
for i in [0, 30]:
cnt += mods[i] * (mods[i] - 1) // 2
return cnt
|
79025
|
from abc import abstractmethod
import numpy as np
from pymoo.core.population import Population
# ---------------------------------------------------------------------------------------------------------
# Survival
# ---------------------------------------------------------------------------------------------------------
class Survival:
def __init__(self, filter_infeasible=True):
super().__init__()
self.filter_infeasible = filter_infeasible
def do(self,
problem,
pop,
*args,
n_survive=None,
return_indices=False,
**kwargs):
# make sure the population has at least one individual
if len(pop) == 0:
return pop
if n_survive is None:
n_survive = len(pop)
n_survive = min(n_survive, len(pop))
# if the split should be done beforehand
if self.filter_infeasible and problem.n_constr > 0:
# split feasible and infeasible solutions
feas, infeas = split_by_feasibility(pop, eps=0.0, sort_infeasbible_by_cv=True)
if len(feas) == 0:
survivors = Population()
else:
survivors = self._do(problem, pop[feas], *args, n_survive=min(len(feas), n_survive), **kwargs)
# calculate how many individuals are still remaining to be filled up with infeasible ones
n_remaining = n_survive - len(survivors)
# if infeasible solutions needs to be added
if n_remaining > 0:
survivors = Population.merge(survivors, pop[infeas[:n_remaining]])
else:
survivors = self._do(problem, pop, *args, n_survive=n_survive, **kwargs)
if return_indices:
H = {}
for k, ind in enumerate(pop):
H[ind] = k
return [H[survivor] for survivor in survivors]
else:
return survivors
@abstractmethod
def _do(self, problem, pop, *args, n_survive=None, **kwargs):
pass
def split_by_feasibility(pop, eps=0.0, sort_infeasbible_by_cv=True):
CV = pop.get("CV")
b = (CV <= eps)
feasible = np.where(b)[0]
infeasible = np.where(~b)[0]
if sort_infeasbible_by_cv:
infeasible = infeasible[np.argsort(CV[infeasible, 0])]
return feasible, infeasible
def calc_adapt_eps(pop):
cv = pop.get("CV")[:, 0]
cv_mean = np.median(cv)
fr = (cv <= 0).sum() / len(cv)
return cv_mean * fr
|
79078
|
import json
import logging
from pprint import pformat
from typing import Any, Dict, Set
import pydantic
from models_library.projects_nodes import NodeID
from models_library.utils.nodes import compute_node_hash
from packaging import version
from ..node_ports_common.dbmanager import DBManager
from ..node_ports_common.exceptions import InvalidProtocolError
from .nodeports_v2 import Nodeports
# NOTE: Keeps backwards compatibility with pydantic
# - from 1.8, it DOES NOT need __root__ specifiers when nested
_PYDANTIC_NEEDS_ROOT_SPECIFIED = version.parse(pydantic.VERSION) < version.parse("1.8")
log = logging.getLogger(__name__)
NODE_REQUIRED_KEYS: Set[str] = {
"schema",
"inputs",
"outputs",
}
async def load(
db_manager: DBManager,
user_id: int,
project_id: str,
node_uuid: str,
auto_update: bool = False,
) -> Nodeports:
"""creates a nodeport object from a row from comp_tasks"""
log.debug(
"creating node_ports_v2 object from node %s with auto_uptate %s",
node_uuid,
auto_update,
)
port_config_str: str = await db_manager.get_ports_configuration_from_node_uuid(
project_id, node_uuid
)
port_cfg = json.loads(port_config_str)
if any(k not in port_cfg for k in NODE_REQUIRED_KEYS):
raise InvalidProtocolError(
port_cfg, "nodeport in comp_task does not follow protocol"
)
# convert to our internal node ports
if _PYDANTIC_NEEDS_ROOT_SPECIFIED:
_PY_INT = "__root__"
node_ports_cfg: Dict[str, Dict[str, Any]] = {
"inputs": {_PY_INT: {}},
"outputs": {_PY_INT: {}},
}
for port_type in ["inputs", "outputs"]:
# schemas first
node_ports_cfg.update(
{
port_type: {_PY_INT: port_cfg["schema"][port_type]},
}
)
# add the key and the payload
for key, port_value in node_ports_cfg[port_type][_PY_INT].items():
port_value["key"] = key
port_value["value"] = port_cfg[port_type].get(key, None)
else:
node_ports_cfg: Dict[str, Dict[str, Any]] = {}
for port_type in ["inputs", "outputs"]:
# schemas first
node_ports_cfg[port_type] = port_cfg["schema"][port_type]
# add the key and the payload
for key, port_value in node_ports_cfg[port_type].items():
port_value["key"] = key
port_value["value"] = port_cfg[port_type].get(key, None)
ports = Nodeports(
**node_ports_cfg,
db_manager=db_manager,
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
save_to_db_cb=dump,
node_port_creator_cb=load,
auto_update=auto_update,
)
log.debug(
"created node_ports_v2 object %s",
pformat(ports, indent=2),
)
return ports
async def dump(nodeports: Nodeports) -> None:
log.debug(
"dumping node_ports_v2 object %s",
pformat(nodeports, indent=2),
)
_nodeports_cfg = nodeports.dict(
include={"internal_inputs", "internal_outputs"},
by_alias=True,
exclude_unset=True,
)
async def get_node_io_payload_cb(node_id: NodeID) -> Dict[str, Any]:
ports = (
nodeports
if str(node_id) == nodeports.node_uuid
else await load(
db_manager=nodeports.db_manager,
user_id=nodeports.user_id,
project_id=nodeports.project_id,
node_uuid=str(node_id),
)
)
return {
"inputs": {
port_key: port.value for port_key, port in ports.internal_inputs.items()
},
"outputs": {
port_key: port.value
for port_key, port in ports.internal_outputs.items()
},
}
run_hash = await compute_node_hash(
NodeID(nodeports.node_uuid), get_node_io_payload_cb
)
# convert to DB
port_cfg = {
"schema": {"inputs": {}, "outputs": {}},
"inputs": {},
"outputs": {},
"run_hash": run_hash,
}
for port_type in ["inputs", "outputs"]:
for port_key, port_values in _nodeports_cfg[port_type].items():
# schemas
key_schema = {
k: v
for k, v in _nodeports_cfg[port_type][port_key].items()
if k not in ["key", "value"]
}
port_cfg["schema"][port_type][port_key] = key_schema
# payload (only if default value was not used)
# pylint: disable=protected-access
if (
port_values["value"] is not None
and not getattr(nodeports, f"internal_{port_type}")[
port_key
]._used_default_value
):
port_cfg[port_type][port_key] = port_values["value"]
await nodeports.db_manager.write_ports_configuration(
json.dumps(port_cfg),
nodeports.project_id,
nodeports.node_uuid,
)
|
79101
|
import warnings
from random import randint
from unittest import TestCase
from .models import (
ColumnFamilyTestModel,
ColumnFamilyIndexedTestModel,
ClusterPrimaryKeyModel,
ForeignPartitionKeyModel,
DictFieldModel
)
from .util import (
connect_db,
destroy_db,
create_model
)
class ColumnFamilyModelTestCase(TestCase):
def setUp(self):
self.connection = connect_db()
self.cached_rows = {}
'''
Let's create some simple data.
'''
create_model(
self.connection,
ColumnFamilyTestModel
)
field_names = [
field.name if field.get_internal_type() != 'AutoField' else None
for field in ColumnFamilyTestModel._meta.fields
]
field_values = ['foo', 'bar', 'raw', 'awk', 'lik', 'sik', 'dik', 'doc']
self.total_rows = 100
value_index = 0
for x in xrange(self.total_rows):
test_data = {}
for name in field_names:
if not name:
continue
test_data[name] = field_values[value_index % len(field_values)]
value_index += 1
test_data['field_1'] = test_data['field_1'] + str(
randint(1000, 9999)
)
if test_data['field_1'] in self.cached_rows.keys():
continue
created_instance = ColumnFamilyTestModel.objects.create(
**test_data
)
self.cached_rows[created_instance.pk] = created_instance
self.created_instances = len(self.cached_rows)
import django
django.setup()
def tearDown(self):
destroy_db(self.connection)
def test_token_partition_key_field_value_to_string(self):
first_instance = ColumnFamilyTestModel.objects.all()[:1][0]
token_field, _, _, _ = ColumnFamilyTestModel._meta.get_field_by_name(
'pk_token'
)
result = token_field.value_to_string(first_instance)
self.assertIsNotNone(result)
class ColumnFamilyTestIndexedQueriesTestCase(TestCase):
def setUp(self):
self.connection = connect_db()
self.cached_rows = {}
'''
Let's create some simple data.
'''
create_model(
self.connection,
ColumnFamilyIndexedTestModel
)
field_names = [
field.name if field.get_internal_type() != 'AutoField' else None
for field in ColumnFamilyIndexedTestModel._meta.fields
]
field_values = [
'foo',
'bar',
'raw',
'awk',
'lik',
'sik',
'dik',
'doc',
'dab'
]
high_cardinality_field_values = ['yes', 'no']
self.total_rows = 400
value_index = 0
for x in xrange(self.total_rows):
test_data = {}
for name in field_names:
if not name:
continue
test_data[name] = field_values[value_index % len(field_values)]
test_data['field_4'] = (
high_cardinality_field_values[
value_index % len(
high_cardinality_field_values
)
]
)
value_index += 1
test_data['field_1'] = test_data['field_1'] + str(
randint(1000, 9999)
)
if test_data['field_1'] in self.cached_rows.keys():
continue
created_instance = ColumnFamilyIndexedTestModel.objects.create(
**test_data
)
self.cached_rows[created_instance.pk] = created_instance
self.created_instances = len(self.cached_rows)
import django
django.setup()
def tearDown(self):
destroy_db(self.connection)
def test_partial_inefficient_get_query(self):
all_results = ColumnFamilyIndexedTestModel.objects.all()
all_results = [x for x in all_results]
last_result = all_results[-1]
last_result.field_3 = 'tool'
last_result_indexed_value = last_result.field_4
last_result.save()
partial_inefficient_get = (
ColumnFamilyIndexedTestModel.objects.get(
field_3='tool',
field_4=last_result_indexed_value
)
)
self.assertIsNotNone(partial_inefficient_get)
self.assertTrue(partial_inefficient_get.pk in self.cached_rows.keys())
class ForeignPartitionKeyModelTestCase(TestCase):
def setUp(self):
import django
django.setup()
self.connection = connect_db()
create_model(
self.connection,
ClusterPrimaryKeyModel
)
create_model(
self.connection,
ForeignPartitionKeyModel
)
def tearDown(self):
destroy_db(self.connection)
def test_order_by_efficient(self):
rel_instance = ClusterPrimaryKeyModel()
rel_instance.auto_populate()
rel_instance.save()
instances = []
for i in xrange(10):
instances.append(ForeignPartitionKeyModel.objects.create(
related=rel_instance
))
with warnings.catch_warnings(record=True) as w:
ordered_query = ForeignPartitionKeyModel.objects.filter(
related=rel_instance
).order_by('-created')
results = list(ordered_query)
self.assertEqual(
0,
len(w)
)
self.assertEqual(
10,
len(results)
)
for i in instances:
i.delete()
all_instances = ForeignPartitionKeyModel.objects.all()
self.assertEqual(
0,
len(all_instances)
)
class TestDictFieldModel(TestCase):
def setUp(self):
import django
django.setup()
self.connection = connect_db()
create_model(
self.connection,
DictFieldModel
)
def tearDown(self):
destroy_db(self.connection)
def test_creation(self):
instance = DictFieldModel.objects.create(
parameters={'key0': 'value0', 'key1': 'value1'}
)
self.assertIsNotNone(instance)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.