ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a30bc6d2b8411848efd48e48f9dc4590ba994ae | import pandas as pd
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.cloud_io import load as pl_load
import argparse
import json
import pytorch_lightning as pl
import pandas as pd
import sklearn
from ray import tune
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import os
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining
from ray.tune.integration.pytorch_lightning import TuneReportCallback
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.optim import SGD, Adam
from torchvision import transforms
import MLmodels as m
from ray.tune.integration.pytorch_lightning import TuneReportCheckpointCallback
from ray.tune.suggest.bayesopt import BayesOptSearch
class ResNetClassifier(pl.LightningModule):
def __init__(self, config, num_classes, resnet_version,
test_path=None,
optimizer='adam',
transfer=True):
super().__init__()
self.__dict__.update(locals())
resnets = {
18: models.resnet18, 34: models.resnet34,
50: models.resnet50, 101: models.resnet101,
152: models.resnet152
}
optimizers = {'adam': Adam, 'sgd': SGD}
self.optimizer = optimizers[optimizer]
# hyperparameters
self.lr = config['lr']
self.batch_size = config['batch_size']
# for importing different versions of the data
self.datatype = config['datatype']
if 'B' in self.datatype and '20' not in self.datatype:
self.data_length = 40
else:
self.data_length = 20
self.training_data = None
self.validation_data = None
# Using a pretrained ResNet backbone
self.resnet_model = resnets[resnet_version](pretrained=transfer)
# Replace old FC layer with Identity so we can train our own
linear_size = list(self.resnet_model.children())[-1].in_features
# replace final layer for fine tuning
fcn = [
nn.Dropout(config['dr']),
nn.Linear(linear_size, linear_size),
]
fcn2 = [
nn.Linear(linear_size, num_classes)
]
if num_classes > 1:
fcn2.append(torch.nn.LogSoftmax(dim=1))
self.fcn1 = nn.Sequential(*fcn)
self.d1 = m.drelu(linear_size)
self.fcn2 = nn.Sequential(*fcn2)
self.resnet_model.conv1 = torch.nn.Conv1d(1, 64, (7, 7), (2, 2), (3, 3), bias=False)
modules = list(self.resnet_model.children())[:-1] # delete the last fc layer.
self.resnet_model = nn.Sequential(*modules)
def forward(self, X):
x = self.resnet_model(X)
x = x.view(x.size(0), -1) # flatten
x = self.fcn1(x)
x = self.d1(x)
x = self.fcn2(x)
return x
def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.lr)
def prepare_data(self):
# import our data
train, validate, weights = m.get_rawdata(self.datatype, 10, 5, round=8)
_train = train.copy()
_validate = validate.copy()
# Assigns labels for learning
_train["binary"] = _train["affinity"].apply(m.bi_labelM)
_validate["binary"] = _validate["affinity"].apply(m.bi_labelM)
_weights = torch.FloatTensor(weights)
# instantiate loss criterion, need weights so put this here
self.criterion = m.SmoothCrossEntropyLoss(weight=_weights, smoothing=0.01)
self.training_data = _train
self.validation_data = _validate
def train_dataloader(self):
# Data Loading
train_reader = m.NAReader(self.training_data, shuffle=True, max_length=self.data_length)
train_loader = torch.utils.data.DataLoader(
train_reader,
batch_size=self.batch_size,
collate_fn=m.my_collate,
num_workers=4,
# pin_memory=True,
shuffle=True
)
return train_loader
def training_step(self, batch, batch_idx):
seq, x, y = batch
softmax = self(x)
train_loss = self.criterion(softmax, y)
# Convert to labels
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = list(y.detach().cpu().numpy())
train_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# perform logging
self.log("ptl/train_loss", train_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/train_accuracy", train_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return train_loss
def val_dataloader(self):
# Data Loading
val_reader = m.NAReader(self.validation_data, shuffle=False)
val_loader = torch.utils.data.DataLoader(
val_reader,
batch_size=self.batch_size,
collate_fn=m.my_collate,
num_workers=4,
# pin_memory=True,
shuffle=False
)
return val_loader
def validation_step(self, batch, batch_idx):
seq, x, y = batch
softmax = self(x)
val_loss = self.criterion(softmax, y)
# Convert to labels
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = list(y.detach().cpu().numpy())
val_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# perform logging
self.log("ptl/val_loss", val_loss, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/val_accuracy", val_acc, on_epoch=True, prog_bar=True, logger=True)
return {"val_loss": val_loss, "val_acc": val_acc}
def train_resnet(config, checkpoint_dir=None, num_epochs=10, num_gpus=0):
trainer = pl.Trainer(
# default_root_dir="./checkpoints/",
max_epochs=num_epochs,
gpus=num_gpus,
logger=TensorBoardLogger(
save_dir=tune.get_trial_dir(), name="", version="."),
progress_bar_refresh_rate=0,
callbacks=[
TuneReportCheckpointCallback(
metrics={
"loss": "ptl/val_loss",
"acc": "ptl/val_accuracy"
},
filename="checkpoint",
on="validation_end")
]
)
if checkpoint_dir:
# Currently, this leads to errors:
# model = LightningMNISTClassifier.load_from_checkpoint(
# os.path.join(checkpoint, "checkpoint"))
# Workaround:
ckpt = pl_load(
os.path.join(checkpoint_dir, "checkpoint"),
map_location=lambda storage, loc: storage)
model = ResNetClassifier._load_model_state(
ckpt, config=config)
trainer.current_epoch = ckpt["epoch"]
else:
model = ResNetClassifier(config, 2, 18, optimizer='adam')
trainer.fit(model)
def tune_asha(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.loguniform(1e-4, 1e-1),
"batch_size": tune.choice([32, 64, 128]),
"dr": tune.loguniform(0.005, 0.5),
"datatype": datatype
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=5,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "acc", "training_iteration"])
analysis = tune.run(
tune.with_parameters(
train_resnet,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
local_dir="./ray_results/",
config=config,
num_samples=num_samples,
scheduler=scheduler,
progress_reporter=reporter,
name="tune_res_drelu_asha")
print("Best hyperparameters found were: ", analysis.best_config)
# analysis.to_csv('~/ray_results/' + config['datatype'])
def tune_asha_search(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.uniform(1e-4, 1e-1),
"batch_size": 64,
"dr": tune.uniform(0.005, 0.5),
"datatype": datatype
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=5,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "acc", "training_iteration"])
bayesopt = BayesOptSearch(metric="mean_loss", mode="min")
analysis = tune.run(
tune.with_parameters(
train_resnet,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
local_dir="./ray_results/",
config=config,
num_samples=num_samples,
search_alg=bayesopt,
scheduler=scheduler,
progress_reporter=reporter,
name="tune_res_drelu_bayopt")
print("Best hyperparameters found were: ", analysis.best_config)
# analysis.to_csv('~/ray_results/' + config['datatype'])
def exp_results_check(checkpoint_path, result_path, title):
# example
# checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'
checkpoint_file = checkpoint_path
param_file = open(result_path, 'r')
check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
resultjsons = param_file.read().split('\n')
results = json.loads(resultjsons[check_epoch + 1])
params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}
model = ResNetClassifier(con, 2, 18, optimizer='adam')
checkpoint = torch.load(checkpoint_file)
model.prepare_data()
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_set = m.test_set_corr
verdict = {'sequence':list(test_set.keys()), 'binary':list(test_set.values())}
_verification = pd.DataFrame(verdict)
ver_reader = m.NAReader(_verification, shuffle=False)
ver_loader = torch.utils.data.DataLoader(
ver_reader,
batch_size=len(test_set.keys()),
collate_fn=m.my_collate,
# num_workers=4,
# pin_memory=True,
shuffle=False
)
for i, batch in enumerate(ver_loader):
seqs, ohe, labels = batch
softmax = model(ohe)
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = labels
# ver_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# Make confusion Matrix
y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()
y_pred = np.asarray(predcpu, dtype=np.bool).tolist()
score = np.asarray([1 if x == y_pred[xid] else 0 for xid, x in enumerate(y_true)])
ver_acc = np.mean(score)
f1 = sklearn.metrics.f1_score(y_true, y_pred)
cm = sklearn.metrics.confusion_matrix(y_true, y_pred, normalize='true')
df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])
plt.figure(figsize=(10, 7))
ax = plt.subplot()
seaborn.set(font_scale=3.0)
seaborn.heatmap(df_cm, annot=True, ax=ax)
label_font = {'size': '26'}
ax.tick_params(axis='both', which='major', labelsize=40)
ax.xaxis.set_ticklabels(["0", "1"])
ax.yaxis.set_ticklabels(["0", "1"])
# plt.title(title)
plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title)
o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results.txt', "w+")
print("Validation Loss", results['loss'], file=o)
print("Validation Accuracy", results['acc'], file=o)
print("Verification Accuracy:", ver_acc, "of dataset size:", len(test_set.keys()), file=o)
print("F1-score", f1, file=o)
o.close()
def exp_results_check_progress(checkpoint_path, hparams, progress, title):
# example
checkpoint_file = checkpoint_path
# param_file = open(result_path, 'r')
# check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
# resultjsons = param_file.read().split('\n')
o = open(hparams, 'r')
params = json.load(o)
# params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
progress = pd.read_csv(progress)
loss = progress.iloc[-1].loss
acc = progress.iloc[-1].acc
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}
model = ResNetClassifier(con, 2, 18, optimizer='adam')
checkpoint = torch.load(checkpoint_file)
model.prepare_data()
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_set = m.test_set_corr
verdict = {'sequence': list(test_set.keys()), 'binary': list(test_set.values())}
_verification = pd.DataFrame(verdict)
ver_reader = m.NAReader(_verification, shuffle=False)
ver_loader = torch.utils.data.DataLoader(
ver_reader,
batch_size=len(test_set.keys()),
collate_fn=m.my_collate,
# num_workers=4,
# pin_memory=True,
shuffle=False
)
for i, batch in enumerate(ver_loader):
seqs, ohe, labels = batch
softmax = model(ohe)
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = labels
# ver_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# Make confusion Matrix
y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()
y_pred = np.asarray(predcpu, dtype=np.bool).tolist()
score = np.asarray([1 if x == y_pred[xid] else 0 for xid, x in enumerate(y_true)])
ver_acc = np.mean(score)
f1 = sklearn.metrics.f1_score(y_true, y_pred)
cm = sklearn.metrics.confusion_matrix(y_true, y_pred, normalize='true')
df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])
plt.figure(figsize=(10, 7))
ax = plt.subplot()
seaborn.set(font_scale=3.0)
seaborn.heatmap(df_cm, annot=True, ax=ax)
label_font = {'size': '26'}
ax.tick_params(axis='both', which='major', labelsize=40)
ax.xaxis.set_ticklabels(["0", "1"])
ax.yaxis.set_ticklabels(["0", "1"])
# plt.title(title)
plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title)
o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results.txt', "w+")
print("Validation Loss", loss, file=o)
print("Validation Accuracy", acc, file=o)
print("Verification Accuracy:", ver_acc, "of dataset size:", len(test_set.keys()), file=o)
print("F1-score", f1, file=o)
o.close()
def val_results_check(checkpoint_path, hparams, progress, result_path, title, r=True):
# example
# checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'
checkpoint_file = checkpoint_path
if r:
param_file = open(result_path, 'r')
check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
resultjsons = param_file.read().split('\n')
results = json.loads(resultjsons[check_epoch + 1])
params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
loss = results['loss']
acc = results['acc']
else:
o = open(hparams, 'r')
params = json.load(o)
# params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
progress = pd.read_csv(progress)
loss = progress.iloc[-1].loss
acc = progress.iloc[-1].acc
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}
model = ResNetClassifier(con, 2, 18, optimizer='adam')
checkpoint = torch.load(checkpoint_file)
model.prepare_data()
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
vd = model.val_dataloader()
yt, yp = [], []
for i, batch in enumerate(vd):
seqs, ohe, labels = batch
softmax = model(ohe)
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = labels
# Make confusion Matrix
y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()
y_pred = np.asarray(predcpu, dtype=np.bool).tolist()
yt += y_true
yp += y_pred
ver_acc = np.mean(np.asarray([1 if x == yp[xid] else 0 for xid, x in enumerate(yt)]))
# ver_acc = sklearn.metrics.balanced_accuracy_score(yt, yp)
cm = sklearn.metrics.confusion_matrix(yt, yp, normalize='true')
df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])
plt.figure(figsize=(10, 7))
ax = plt.subplot()
seaborn.set(font_scale=3.0)
seaborn.heatmap(df_cm, annot=True, ax=ax)
label_font = {'size': '26'}
ax.tick_params(axis='both', which='major', labelsize=40)
ax.xaxis.set_ticklabels(["0", "1"])
ax.yaxis.set_ticklabels(["0", "1"])
# plt.title(title)
plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + "_VER")
o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results_ver.txt', "w+")
print("Validation Loss", loss, file=o)
print("Validation Accuracy", acc, file=o)
print("Verification Accuracy:", ver_acc, "of dataset size:", len(yt), file=o)
o.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Resnet Training on Aptamer Dataset")
parser.add_argument('dataset', type=str, help="3-7 letter/number abbreviation describing subset of the data to use")
parser.add_argument('cpus_per_trial', type=str, help="Number of cpus available to each trial in Ray Tune")
parser.add_argument('gpus_per_trial', type=str, help="Number of gpus available to each trial in Ray Tune")
parser.add_argument('samples', type=str, help="Number of Ray Tune Samples")
args = parser.parse_args()
os.environ["SLURM_JOB_NAME"] = "bash"
tune_asha(args.dataset, int(args.samples), 30, gpus_per_trial=int(args.gpus_per_trial), cpus_per_trial=int(args.cpus_per_trial))
# tune_asha_search(args.dataset, int(args.samples), 50, gpus_per_trial=int(args.gpus_per_trial), cpus_per_trial=int(args.cpus_per_trial))
### Debugging
# con = {'lr': 1e-4, 'dr': 0.1, 'batch_size': 32, 'datatype': 'HCL'}
#
# model = ResNetClassifier(con, 2, 18)
#
## Single Loop debugging
# model.prepare_data()
# d = model.train_dataloader()
# for i, batch in enumerate(d):
# if i > 0:
# break
# else:
# model.training_step(batch, i)
# pytorch lightning loop
# rn = ResNetClassifier(con, 2, 18, optimizer='adam')
# plt = pl.Trainer(gpus=1)
# plt.fit(rn)
|
py | 1a30bda7eaa99fc763bc21d8b15291df971feba6 | ##### For testing the original keras model, which is saved as .hdf5 format.
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import numpy as np
import h5py
import scipy.io
import pandas as pd
import librosa
import soundfile as sound
import keras
import tensorflow
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
import sys
sys.path.append("..")
from utils import *
from funcs import *
from tensorflow import ConfigProto
from tensorflow import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
val_csv = 'data_2020/evaluation_setup/fold1_evaluate.csv'
feat_path = 'features/logmel128_scaled_d_dd/'
model_path = '../pretrained_models/smallfcnn-model-0.9618.hdf5'
num_freq_bin = 128
num_classes = 3
data_val, y_val = load_data_2020(feat_path, val_csv, num_freq_bin, 'logmel')
y_val_onehot = keras.utils.to_categorical(y_val, num_classes)
print(data_val.shape)
print(y_val.shape)
best_model = keras.models.load_model(model_path)
preds = best_model.predict(data_val)
y_pred_val = np.argmax(preds,axis=1)
over_loss = log_loss(y_val_onehot, preds)
overall_acc = np.sum(y_pred_val==y_val) / data_val.shape[0]
print(y_val_onehot.shape, preds.shape)
np.set_printoptions(precision=3)
print("\n\nVal acc: ", "{0:.3f}".format(overall_acc))
print("Val log loss:", "{0:.3f}".format(over_loss))
conf_matrix = confusion_matrix(y_val,y_pred_val)
print("\n\nConfusion matrix:")
print(conf_matrix)
conf_mat_norm_recall = conf_matrix.astype('float32')/conf_matrix.sum(axis=1)[:,np.newaxis]
recall_by_class = np.diagonal(conf_mat_norm_recall)
mean_recall = np.mean(recall_by_class)
dev_test_df = pd.read_csv(val_csv,sep='\t', encoding='ASCII')
ClassNames = np.unique(dev_test_df['scene_label'])
print("Class names:", ClassNames)
print("Per-class val acc: ",recall_by_class, "\n\n")
|
py | 1a30be050f88378838d51722197d6a1666cd1271 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 2 23:26:08 2017
@author: Shashwat Sridhar
"""
# system imports
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
from os import sep
# swan-specific imports
from swan.views.mean_waveforms_view import PgWidget2d
from swan.views.virtual_units_view import VirtualUnitsView
from swan.widgets.plot_grid import MyPlotGrid
from swan.views.isi_histograms_view import PgWidgetISI
from swan.views.pca_3d_view import PgWidgetPCA
from swan.views.rate_profile_view import PgWidgetRateProfile
from swan.widgets.plot_grid_tools import PlotGridTools
from swan.widgets.view_toolbar import CollapsibleWidget
from swan.resources import icons
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _from_utf_8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
class MainUI(object):
def __init__(self, main_application):
main_application.setObjectName(_from_utf_8("Main"))
main_application.setDockOptions(QtWidgets.QMainWindow.AllowTabbedDocks |
QtWidgets.QMainWindow.AllowNestedDocks |
QtWidgets.QMainWindow.GroupedDragging)
self.plotGridDock = QtWidgets.QDockWidget("Plot Grid")
self.plotGridDock.setObjectName(_from_utf_8("PlotGridDock"))
self.plotGrid = MyPlotGrid(main_application)
self.plotGridDock.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.plotGridDock.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.plotGridDock.setWidget(self.plotGrid)
self.dock_virtual_unit_view = QtWidgets.QDockWidget("Virtual Unit Mappings")
self.dock_virtual_unit_view.setObjectName(_from_utf_8("virtualUnitsDock"))
self.dock_virtual_unit_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_virtual_unit_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.virtual_units_view = VirtualUnitsView()
self.virtual_units_view.setObjectName(_from_utf_8("virtualUnitsView"))
self.dock_virtual_unit_view.setWidget(self.virtual_units_view)
self.dock_mean_waveforms_view = QtWidgets.QDockWidget("Mean Waveforms")
self.dock_mean_waveforms_view.setObjectName(_from_utf_8("meanWaveformView"))
self.dock_mean_waveforms_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_mean_waveforms_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.mean_waveforms_view = PgWidget2d()
self.mean_waveforms_view.setObjectName(_from_utf_8("meanWaveformsView"))
self.dock_mean_waveforms_view.setWidget(self.mean_waveforms_view)
self.dock_isi_histograms_view = QtWidgets.QDockWidget("ISI Histograms")
self.dock_isi_histograms_view.setObjectName(_from_utf_8("ISIHView"))
self.dock_isi_histograms_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_isi_histograms_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.isi_histograms_view = PgWidgetISI()
self.isi_histograms_view.setObjectName(_from_utf_8("IsihView"))
self.dock_isi_histograms_view.setWidget(self.isi_histograms_view)
self.dock_pca_3d_view = QtWidgets.QDockWidget("Principal Component Analysis")
self.dock_pca_3d_view.setObjectName(_from_utf_8("PCAView"))
self.dock_pca_3d_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_pca_3d_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.pca_3d_view = PgWidgetPCA()
self.pca_3d_view.setObjectName(_from_utf_8("PcaView"))
self.dock_pca_3d_view.setWidget(self.pca_3d_view)
self.dock_rate_profiles_view = QtWidgets.QDockWidget("Rate Profiles")
self.dock_rate_profiles_view.setObjectName(_from_utf_8("RateProfiles"))
self.dock_rate_profiles_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_rate_profiles_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.rate_profiles_view = PgWidgetRateProfile()
self.rate_profiles_view.setObjectName(_from_utf_8("RateProfileView"))
self.dock_rate_profiles_view.setWidget(self.rate_profiles_view)
self.tools = PlotGridTools()
self.plotGridOptionsLayout = QtWidgets.QGridLayout()
self.plotGridOptionsLayout.setObjectName(_from_utf_8("PlotGridOptionsLayout"))
self.plotGridOptionsLayout.addWidget(self.tools)
self.plotGridOptions = CollapsibleWidget(parent=self.plotGrid, title="Options", animation_duration=400)
self.plotGridOptions.set_content_layout(self.plotGridOptionsLayout)
self.plotGrid.main_grid_layout.addWidget(self.plotGridOptions, 1, 0)
self.plotGrid.main_grid_layout.setRowStretch(0, 10)
self.menu_bar = QtWidgets.QMenuBar(main_application)
self.menu_bar.setGeometry(QtCore.QRect(0, 0, 1159, 25))
self.menu_bar.setObjectName(_from_utf_8("menubar"))
self.menu_File = QtWidgets.QMenu(self.menu_bar)
self.menu_File.setObjectName(_from_utf_8("menu_File"))
self.menu_Edit = QtWidgets.QMenu(self.menu_bar)
self.menu_Edit.setObjectName(_from_utf_8("menu_Edit"))
self.menu_Help = QtWidgets.QMenu(self.menu_bar)
self.menu_Help.setObjectName(_from_utf_8("menu_Help"))
self.menu_View = QtWidgets.QMenu(self.menu_bar)
self.menu_View.setObjectName(_from_utf_8("menu_View"))
main_application.setMenuBar(self.menu_bar)
self.statusbar = QtWidgets.QStatusBar(main_application)
self.statusbar.setObjectName(_from_utf_8("statusbar"))
main_application.setStatusBar(self.statusbar)
self.toolbar = QtWidgets.QToolBar(main_application)
self.toolbar.setObjectName(_from_utf_8("toolBar"))
main_application.addToolBar(QtCore.Qt.TopToolBarArea, self.toolbar)
self.action_new_project = QtWidgets.QAction(main_application)
self.action_new_project.setObjectName(_from_utf_8("action_new_project"))
self.action_load_project = QtWidgets.QAction(main_application)
self.action_load_project.setObjectName(_from_utf_8("action_load_project"))
self.action_save_project = QtWidgets.QAction(main_application)
self.action_save_project.setObjectName(_from_utf_8("action_save_project"))
self.action_quit = QtWidgets.QAction(main_application)
self.action_quit.setObjectName(_from_utf_8("action_quit"))
self.action_swap = QtWidgets.QAction(main_application)
self.action_swap.setObjectName(_from_utf_8("action_swap"))
self.action_collapse = QtWidgets.QAction(main_application)
self.action_collapse.setObjectName(_from_utf_8("action_collapse"))
self.action_recalculate_mapping = QtWidgets.QAction(main_application)
self.action_recalculate_mapping.setObjectName(_from_utf_8("action_recalculate_mapping"))
self.action_save_as = QtWidgets.QAction(main_application)
self.action_save_as.setObjectName(_from_utf_8("action_save_as"))
self.action_load_connector_map = QtWidgets.QAction(main_application)
self.action_load_connector_map.setObjectName(_from_utf_8("action_load_connector_map"))
self.action_zoom_in = QtWidgets.QAction(main_application)
self.action_zoom_in.setObjectName(_from_utf_8("action_zoom_in"))
self.action_zoom_out = QtWidgets.QAction(main_application)
self.action_zoom_out.setObjectName(_from_utf_8("action_zoom_out"))
self.action_revert_mapping = QtWidgets.QAction(main_application)
self.action_revert_mapping.setObjectName(_from_utf_8("action_revert_mapping"))
self.action_collapse_overview = QtWidgets.QAction(main_application)
self.action_collapse_overview.setObjectName(_from_utf_8("action_collapse_overview"))
self.action_expand_overview = QtWidgets.QAction(main_application)
self.action_expand_overview.setObjectName(_from_utf_8("action_expand_overview"))
self.action_preferences = QtWidgets.QAction(main_application)
self.action_preferences.setObjectName(_from_utf_8("action_preferences"))
self.action_about = QtWidgets.QAction(main_application)
self.action_about.setObjectName(_from_utf_8("action_about"))
self.action_tutorials = QtWidgets.QAction(main_application)
self.action_tutorials.setObjectName(_from_utf_8("action_tutorials"))
self.action_export_to_csv = QtWidgets.QAction(main_application)
self.action_export_to_csv.setObjectName(_from_utf_8("action_export_to_csv"))
self.action_export_to_odml = QtWidgets.QAction(main_application)
self.action_export_to_odml.setObjectName(_from_utf_8("action_export_to_odml"))
self.action_import_from_csv = QtWidgets.QAction(main_application)
self.action_import_from_csv.setObjectName(_from_utf_8("action_import_from_csv"))
self.action_import_from_od_ml = QtWidgets.QAction(main_application)
self.action_import_from_od_ml.setObjectName(_from_utf_8("action_import_from_od_ml"))
self.action_revert_state = QtWidgets.QAction(main_application)
self.action_revert_state.setObjectName(_from_utf_8("action_revert_state"))
self.action_restore_state = QtWidgets.QAction(main_application)
self.action_restore_state.setObjectName(_from_utf_8("action_restore_state"))
self.action_save_state = QtWidgets.QAction(main_application)
self.action_save_state.setObjectName(_from_utf_8("action_save_state"))
self.menu_File.addAction(self.action_new_project)
self.menu_File.addAction(self.action_load_project)
self.menu_File.addAction(self.action_save_project)
self.menu_File.addAction(self.action_save_as)
self.menu_File.addSeparator()
self.menu_File.addAction(self.action_load_connector_map)
self.menu_File.addAction(self.action_export_to_csv)
self.menu_File.addAction(self.action_export_to_odml)
self.menu_File.addSeparator()
self.menu_File.addAction(self.action_quit)
self.menu_Edit.addAction(self.action_recalculate_mapping)
self.menu_Edit.addAction(self.action_revert_mapping)
self.menu_Edit.addAction(self.action_swap)
self.menu_Edit.addSeparator()
self.menu_Edit.addAction(self.action_zoom_in)
self.menu_Edit.addAction(self.action_zoom_out)
self.menu_Edit.addAction(self.action_expand_overview)
self.menu_Edit.addAction(self.action_collapse_overview)
self.menu_Edit.addSeparator()
self.menu_Edit.addAction(self.action_preferences)
self.menu_Help.addAction(self.action_tutorials)
self.menu_Help.addAction(self.action_about)
self.menu_View.addAction(self.action_save_state)
self.menu_View.addAction(self.action_restore_state)
self.menu_View.addAction(self.action_revert_state)
self.menu_bar.addAction(self.menu_File.menuAction())
self.menu_bar.addAction(self.menu_Edit.menuAction())
self.menu_bar.addAction(self.menu_View.menuAction())
self.menu_bar.addAction(self.menu_Help.menuAction())
self.toolbar.addAction(self.action_new_project)
self.toolbar.addAction(self.action_load_project)
self.toolbar.addAction(self.action_save_project)
self.toolbar.addAction(self.action_save_as)
self.toolbar.addAction(self.action_preferences)
self.toolbar.addSeparator()
self.toolbar.addAction(self.action_revert_mapping)
self.toolbar.addAction(self.action_swap)
self.toolbar.addSeparator()
self.toolbar.addAction(self.action_zoom_in)
self.toolbar.addAction(self.action_zoom_out)
self.toolbar.addAction(self.action_expand_overview)
self.toolbar.addAction(self.action_collapse_overview)
self.load_icons()
self.retranslate_ui(main_application)
main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.plotGridDock, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_virtual_unit_view, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_rate_profiles_view, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_pca_3d_view, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dock_mean_waveforms_view, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dock_isi_histograms_view, QtCore.Qt.Vertical)
main_application.splitDockWidget(self.plotGridDock, self.dock_virtual_unit_view, QtCore.Qt.Horizontal)
main_application.splitDockWidget(self.dock_virtual_unit_view, self.dock_rate_profiles_view, QtCore.Qt.Horizontal)
main_application.splitDockWidget(self.dock_rate_profiles_view, self.dock_pca_3d_view, QtCore.Qt.Vertical)
# self.action_quit.triggered.connect(main_application.close)
QtCore.QMetaObject.connectSlotsByName(main_application)
@staticmethod
def set_program_title(main_application, text):
main_application.setWindowTitle(_translate("main_application", text, None))
def retranslate_ui(self, main_application):
main_application.setWindowTitle(_translate("main_application", "SWAN - Sequential waveform analyser", None))
self.menu_File.setTitle(_translate("main_application", "&File", None))
self.menu_Edit.setTitle(_translate("main_application", "&Edit", None))
self.menu_Help.setTitle(_translate("main_application", "&Help", None))
self.menu_View.setTitle(_translate("main_application", "&View", None))
self.toolbar.setWindowTitle(_translate("main_application", "toolBar", None))
self.action_new_project.setText(_translate("main_application", "&New Project...", None))
self.action_new_project.setIconText(_translate("main_application", "New Project...", None))
self.action_new_project.setToolTip(_translate("main_application", "Create a new project", None))
self.action_new_project.setShortcut(_translate("main_application", "Ctrl+N", None))
self.action_load_project.setText(_translate("main_application", "&Load Project...", None))
self.action_load_project.setIconText(_translate("main_application", "Load Project...", None))
self.action_load_project.setToolTip(_translate("main_application", "Load project from file", None))
self.action_load_project.setShortcut(_translate("main_application", "Ctrl+O", None))
self.action_save_project.setText(_translate("main_application", "&Save Project", None))
self.action_save_project.setIconText(_translate("main_application", "Save Project", None))
self.action_save_project.setToolTip(_translate("main_application", "Save project", None))
self.action_save_project.setShortcut(_translate("main_application", "Ctrl+S", None))
self.action_quit.setText(_translate("main_application", "&Quit", None))
self.action_quit.setToolTip(_translate("main_application", "Close this application", None))
self.action_quit.setShortcut(_translate("main_application", "Ctrl+Q", None))
self.action_swap.setText(_translate("main_application", "Swap", None))
self.action_swap.setToolTip(_translate("main_application", "Swap two selected units", None))
self.action_collapse.setText(_translate("main_application", "Collapse", None))
self.action_collapse.setToolTip(_translate("main_application", "Collapse selected unit row(s)", None))
self.action_recalculate_mapping.setText(_translate("main_application", "Recalculate mapping...", None))
self.action_recalculate_mapping.setToolTip(_translate("main_application", "Try to find a mapping automatically",
None))
self.action_save_as.setText(_translate("main_application", "Save project as...", None))
self.action_save_as.setToolTip(_translate("main_application", "Save project to a new file", None))
self.action_load_connector_map.setText(_translate("main_application", "Load connector map...", None))
self.action_zoom_in.setText(_translate("main_application", "Zoom in", None))
self.action_zoom_in.setToolTip(_translate("main_application", "Zoom overview in", None))
self.action_zoom_in.setShortcut(_translate("main_application", "Ctrl++", None))
self.action_zoom_out.setText(_translate("main_application", "Zoom out", None))
self.action_zoom_out.setToolTip(_translate("main_application", "Zoom overview out", None))
self.action_zoom_out.setShortcut(_translate("main_application", "Ctrl+-", None))
self.action_revert_mapping.setText(_translate("main_application", "Revert mapping...", None))
self.action_revert_mapping.setToolTip(_translate("main_application", "Revert current mapping to last saved",
None))
self.action_collapse_overview.setText(_translate("main_application", "Collapse overview", None))
self.action_collapse_overview.setToolTip(_translate("main_application", "Decrease overview\'s y range", None))
self.action_expand_overview.setText(_translate("main_application", "Expand overview", None))
self.action_expand_overview.setToolTip(_translate("main_application", "Increase overview\'s y range", None))
self.action_preferences.setText(_translate("main_application", "Preferences", None))
self.action_preferences.setToolTip(_translate("main_application", "View and change preferences", None))
self.action_about.setText(_translate("main_application", "About", None))
self.action_about.setToolTip(_translate("main_application", "Information about SWAN", None))
self.action_tutorials.setText(_translate("main_application", "Tutorials", None))
self.action_export_to_csv.setText(_translate("main_application", "Export to CSV...", None))
self.action_export_to_odml.setText(_translate("main_application", "Export to odML...", None))
self.action_import_from_csv.setText(_translate("main_application", "Import from csv", None))
self.action_restore_state.setText(_translate("main_application", "Restore GUI state", None))
self.action_revert_state.setText(_translate("main_application", "Revert GUI state", None))
self.action_save_state.setText(_translate("main_application", "Save GUI state", None))
def load_icons(self):
"""
Loads the icons.
"""
try:
prefix = ":" + sep + "icons" + sep
# File
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "new.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_new_project.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_load_project.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_save_project.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "save_as.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_save_as.setIcon(icon)
# Edit
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "revert.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_revert_mapping.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "swap.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_swap.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "zoom_in.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_zoom_in.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "zoom_out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_zoom_out.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "expand.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_expand_overview.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "collapse.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_collapse_overview.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "preferences.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_preferences.setIcon(icon)
except Exception as e:
print("Icon Exception: {exception}".format(exception=e))
pass
|
py | 1a30bfa58394f6e7f42ba7213bc2f23010d0481f | # -*- coding: utf-8 -*-
"""
Container for building a scene with fluorescent objects (i.e., scene plays a role of background or frame).
@author: ssklykov
"""
# %% Imports
import numpy as np
import matplotlib.pyplot as plt
# from skimage.util import img_as_ubyte
import os
from skimage.io import imsave
from scipy.ndimage import measurements
# %% class definition
class u_scene():
"""Class composing all capabilities of building image (numpy 2D array) with some objects drawn on the scene.
The image commonly is designated as width x height (e.g., 800x600)"""
# default values
width = 100
height = 100
possible_img_types = ['uint8', 'uint16', 'float']
image_type = 'uint8'
scene_image = np.zeros((height, width), dtype=image_type)
maxPixelValue = 255
counter = 1 # counting how many images saved along generation
centers_of_mass = []
# %% Constructor
def __init__(self, width: int, height: int, image_type: str = 'uint8'):
"""
Initialize the blank (dark) scene image with the specified type (800x600 8bit image as an example)
Parameters
----------
width : int
Width of the initialized image (scene)
height : int
Height of the initialized image (scene)
image_type : str, optional
Image type used for pixel value calculations. Possible values are: 'uint8', 'uint16', 'float'.
The default is 'uint8'.
Returns
-------
None.
"""
width = abs(width)
height = abs(height)
if width > 0:
self.width = width
if height > 0:
self.width = width
if image_type in self.possible_img_types:
self.image_type = image_type
else:
self.image_type = 'uint8'
print("Image type hasn't been recognized, initialized default 8bit gray image")
if (width != 100) or (height != 100) or (image_type != 'uint8'):
# non default values => re-initialization of the class attributes
self.scene_image = np.zeros((height, width), dtype=self.image_type)
self.width = width
self.height = height
if self.image_type == 'uint16':
self.maxPixelValue = 65535
elif self.image_type == 'float':
self.maxPixelValue = 1.0 # According to the specification of scikit-image
# %% Supportive functions
def cast_pixels_sum(self, pixels_sum):
"""
Casting of input result of pixel summing to conform with data type of the used image.
Parameters
----------
pixels_sum : uint8, uint16 or float
Sum of pixels (mask + scene (background)).
Returns
-------
value_returned : uint8, uint16 or float
Returns casted / corrected pixel value.
"""
if (pixels_sum) <= self.maxPixelValue:
# additional conversion for insuring of conformity with data type
if self.image_type == 'uint8':
value_returned = np.uint8(pixels_sum)
elif self.image_type == 'uint16':
value_returned = np.uint16(pixels_sum)
else:
value_returned = float(pixels_sum)
else:
value_returned = self.maxPixelValue
return value_returned
def get_j_finish(self, j_start: int, nCols: int) -> int:
"""
Calculation of maximum j index for adding mask, preventing it to be for out of bounds.
Parameters
----------
j_start : int
Starting index for filling mask in.
nCols : int
Number of columns in mask that should be added to the scene.
Returns
-------
int
Ending ("final") index j for filling mask into the scene.
"""
if ((j_start + nCols) < self.width): # checking that starting/ending of summing are not out of bounds
j_finish = j_start + nCols
else:
j_finish = self.width
return j_finish
def get_i_finish(self, i_start: int, nRows: int) -> int:
"""
Calculation of maximum i index for adding mask, preventing it to be for out of bounds
Parameters
----------
i_start : int
Starting index for filling mask in.
nRows : int
Number of columns in mask that should be added to the scene.
Returns
-------
int
Ending ("final") index j for filling mask into the scene.
"""
if ((i_start + nRows) < self.height): # checking that starting/ending of summing are not out of bounds
i_finish = i_start + nRows
else:
i_finish = self.height
return i_finish
# %% Drawing of an object with some intensity mask (profile)
def add_mask(self, i_start: int, j_start: int, mask, debug: bool = False):
"""
Adding the "mask" - representation of the object (basically, less than the scene (background) image).
Contradictory, i_start represents "y" coordinate, j_start - "x", due to array representation of column and row.
This function accepts coordinates of image origin - starting pixel for drawing (like zero pixel).
The coordinates (j_start, i_start) as (x, y) could be negative or exceeding the scene sizes - in such case
whenever it possible, only the part of an object image will be added.
Parameters
----------
i_start : int
Start pixel (y coordinate) for drawing of an image ("mask").
j_start : int
Start pixel (x coordinate) for drawing of an image ("mask").
mask : np.array
2D np.array ("mask") with pixel values which represent the object.
debug: bool, optional
Flag for saving some internal statistical values for checking of possible bugs during calculations.
The default is False.
Returns
-------
None.
The scene collected as internal attribute of this class.
"""
(nRows, nCols) = np.shape(mask) # getting of sizes of mask
# Below is checking that the mask is not empty, it should be 1x1 matrix at least
if (nRows == 0) or (nCols == 0):
raise(IndexError('Provided mask is empty along some of its axis'))
# Below is checking that the i_start and j_start makes sense to apply to the scene image:
# i_start and j_start could be negative, but at least 1 point should be added to a scene
# also, j associates with WIDTH, so with # of columns! i - with rows!
if ((i_start + nRows) < 1) or ((j_start + nCols) < 1):
raise(IndexError('Provided i_start or j_start is not conformed with the mask sizes'))
# Below is checking filling parameters (i_start, j_start) is laying on an scene image
if (i_start >= self.height) or (j_start >= self.width):
raise(IndexError("Starting indices for adding mask is out of scene image bounds"))
# i_start, j_start > 0 both, filling some mask into a scene image - basic check for conformity
if (i_start >= 0) and (j_start >= 0) and (nRows > 0) and (nCols > 0):
# Attempt to speed up the adding mask to a scene: transferring pixel values as chunk with rows
if ((i_start + nRows) < self.height): # checking that fast sum over y axis could be performed
i_finish = i_start + nRows
j_finish = self.get_j_finish(j_start, nCols)
# "Fast summing" - adding the whole rows (all columns) to the image (scene)
for j in range(j_start, j_finish): # summing along j axis
# checking the conformity with image type
if np.max(self.scene_image[i_start:i_finish, j] + mask[:, j-j_start]) <= self.maxPixelValue:
self.scene_image[i_start:i_finish, j] += mask[:, j-j_start] # fast adding mask to a scene
else:
# checking each pixel from a scene and added from a mask pixel to be in range with image type
for i in range(i_start, i_finish):
pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]
self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)
# Attempt to speed up the adding mask to a scene: transferring pixel values as a chunk with columns
elif ((j_start + nCols) < self.width): # checking that fast sum over i axis could be performed
j_finish = j_start + nCols
i_finish = self.get_i_finish(i_start, nRows)
# "Fast summing" - along column - adding all rows at once
for i in range(i_start, i_finish): # summing along j axis
# checking the conformity with image type
if np.max(self.scene_image[i, j_start:j_finish] + mask[i-i_start, :]) <= self.maxPixelValue:
self.scene_image[i, j_start:j_finish] += mask[i-i_start, :] # fast adding mask to a scene
else:
# checking each pixel from a scene and added from a mask pixel to be in range with image type
for j in range(j_start, j_finish):
pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]
self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)
# filling right upper corner with exceptional case - when mask is out of image bounds
else:
i_finish = self.height
j_finish = self.width
for i in range(i_start, i_finish):
for j in range(j_start, j_finish):
pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]
self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)
# Making correction of i_start, j_start if some of them is negative for working with partial mask overlap
if (i_start < 0) or (j_start < 0):
i_mask_start = 0
j_mask_start = 0
if (i_start < 0):
nRows += i_start # it will draw the mask if it partially overlaps with image boundaries
i_mask_start = abs(i_start)
i_start = 0
if (j_start < 0):
nCols += j_start
j_mask_start = abs(j_start)
j_start = 0
i_finish = self.get_i_finish(i_start, nRows)
j_finish = self.get_j_finish(j_start, nCols)
for i in range(i_start, i_finish):
for j in range(j_start, j_finish):
pixels_sum = self.scene_image[i, j] + mask[i+i_mask_start, j+j_mask_start]
self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)
# HINT: below is controlling of simulation - calculation of center of mass of added mask (generated scene)
if debug:
(i_mass_center, j_mass_center) = measurements.center_of_mass(self.scene_image)
self.centers_of_mass.append([i_mass_center, j_mass_center])
# print([i_mass_center, j_mass_center])
# %% Plotting the summurized image (scene) with all objects
def plot_image(self):
"""
Plotting the self.scene composed with added masks (objects) / noise.
Returns
-------
Plotted the scene (picture) on the separate window using matplotlib library
"""
plt.figure()
# Below - representation according to the documentation:
# plt.cm.gray - for representing gray values, aspect - for filling image values in a window
# origin - for adjusting origin of pixels (0, 0), extent - regulation of axis values
# extent = (-0.5, numcols-0.5, -0.5, numrows-0.5)) - for origin = 'lower' - documents
plt.imshow(self.scene_image, cmap=plt.cm.gray, aspect='auto', origin='lower',
extent=(0, self.width, 0, self.height))
plt.tight_layout()
# %% Clearing the scene
def clear_scene(self):
"""
Clearing the scene (background) image by re-initialize it to zero values (completely dark).
Returns
-------
None.
"""
self.scene_image = np.zeros((self.height, self.width), dtype=self.image_type)
# %% Saving generated scene image
def save_scene(self, base_extension: str = "jpg"):
"""
Saving the scene (image) with all collected masks (objects) on it.
Parameters
----------
base_extension : str, optional
The base extension for saving images (like jpg, png, tiff, etc). The default is "jpg".
Returns
-------
None.
"""
scriptFolder = os.getcwd()
default_folder = "tests"
path = os.path.join(scriptFolder, default_folder)
# print(path)
if not os.path.isdir(path):
os.mkdir(path)
if os.path.isdir(path):
# print(path)
base_name = str(self.counter) + "." + base_extension
self.counter += 1
path_for_bead = os.path.join(path, base_name)
if base_extension == "jpg" or base_extension == "jpeg":
imsave(path_for_bead, self.scene_image, quality=100)
else:
imsave(path_for_bead, self.scene_image)
# %% Testing class methods / construction
if __name__ == '__main__':
uScene = u_scene(150, 150, 'uint8')
mask = np.ones((20, 20), dtype='uint8')
mask = mask[:, :]*256
uScene.add_mask(40, 40, mask)
uScene.add_mask(80, 80, mask)
uScene.plot_image()
uScene.save_scene()
|
py | 1a30bfc6feaa706123bd93f8aa309ab181bc795f | # Copyright 2019 BlueCat Networks (USA) Inc. and its affiliates
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# By: BlueCat Networks
# Date: 2021-11-10
# Gateway Version: 18.10.2
# Description: Confirm MAC Address Page
# Various Flask framework items.
from flask import url_for, redirect, render_template, flash, g
from bluecat import route, util
from bluecat.entity import Entity
from bluecat.api_exception import PortalException
import config.default_config as config
from main_app import app
from .confirm_mac_address_form import get_resource_text
from .confirm_mac_address_form import GenericFormTemplate
def get_configuration():
configuration = None
if g.user:
configuration = g.user.get_api().get_configuration(config.default_configuration)
return configuration
def get_mac_address(configuration, address):
mac_addr = None
try:
mac_addr = configuration.get_mac_address(address)
print(mac_addr)
except PortalException:
pass
return mac_addr
# The workflow name must be the first part of any endpoints defined in this file.
# If you break this rule, you will trip up on other people's endpoint names and
# chaos will ensue.
@route(app, '/confirm_mac_address/confirm_mac_address_endpoint')
@util.workflow_permission_required('confirm_mac_address_page')
@util.exception_catcher
def confirm_mac_address_confirm_mac_address_page():
form = GenericFormTemplate()
configuration = get_configuration()
return render_template(
'confirm_mac_address_page.html',
form=form,
text=get_resource_text(),
options=g.user.get_options(),
)
@route(app, '/confirm_mac_address/form', methods=['POST'])
@util.workflow_permission_required('confirm_mac_address_page')
@util.exception_catcher
def confirm_mac_address_confirm_mac_address_page_form():
form = GenericFormTemplate()
configuration = get_configuration()
text = get_resource_text()
if form.validate_on_submit():
mac_address = get_mac_address(configuration, form.mac_address.data)
if mac_address is not None:
mac_pool=mac_address.get_property('macPool')
if mac_pool is None:
mac_pool=text['nomacpool']
flash(mac_address.get_address() + text['registered'] , 'succeed')
flash('MAC Pool : ' + mac_pool, 'succeed')
else:
flash(form.mac_address.data + text['noregistered'], 'failed')
# Put form processing code here
g.user.logger.info('SUCCESS')
return redirect(url_for('confirm_mac_addressconfirm_mac_address_confirm_mac_address_page'))
else:
g.user.logger.info('Form data was not valid.')
return render_template(
'confirm_mac_address_page.html',
form=form,
text=text,
options=g.user.get_options(),
)
|
py | 1a30c050f0c817ab25120d1c571880c40f48bd96 | import yaml
stream=file("/home/vagrant/.kube/config", 'r')
yml=yaml.load(stream)
print yml["clusters"][0]["cluster"]["certificate-authority-data"]
# print yaml.dump(yml)
|
py | 1a30c0a56da2c2d1169f301507df66a8807945ff | import sys
import spider
from spider_ui import Ui_Dialog, QtWidgets, QtGui
class SpiderDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.spider = spider.RenrenSpider()
self.init_signals()
if self.spider.is_login():
self.ui.loginFrame.hide()
self.ui.mainFrame.show()
def init_signals(self):
self.ui.loginBtn.clicked.connect(self.on_login)
self.ui.startBtn.clicked.connect(self.on_start)
self.ui.browserBtn.clicked.connect(self.on_browse_dir)
def on_login(self):
email = self.ui.emailInput.text()
password = self.ui.passwordInput.text()
remember = self.ui.rememberCkb.isChecked()
icode = self.ui.iCodeInput.text()
try:
self.spider.login(email, password, icode, remember)
except spider.iCodeRequired as e:
self.show_icode()
error = QtWidgets.QErrorMessage()
error.showMessage(str(e))
else:
self.ui.loginFrame.hide()
self.ui.mainFrame.show()
def show_icode(self):
with open('icode.jpg', 'wb') as f:
f.write(self.spider.get_icode_image())
icode_image = QtGui.QImage('icode.jpg')
icode_pixmap = QtGui.QPixmap.fromImage(icode_image)
self.ui.iCodeImg.setPixmap(icode_pixmap)
self.ui.iCodeFrame.show()
def on_start(self):
self.spider.set_params(
user_id=self.ui.userInput.text(),
output_dir=self.ui.outputPathInput.text()
)
self.ui.progressFrame.show()
self.spider.main(self)
self.ui.label.setText("备份完成!")
def on_browse_dir(self):
file_dialog = QtWidgets.QFileDialog()
file_dialog.setFileMode(QtWidgets.QFileDialog.Directory)
file_dialog.setOption(QtWidgets.QFileDialog.ShowDirsOnly)
if file_dialog.exec_():
self.ui.outputPathInput.setText(file_dialog.selectedFiles()[0])
def progressbar(self, total: int, desc: str):
ui = self.ui
class ProgressBar(object):
def __init__(self):
self.current = 0.0
ui.label.setText(desc)
ui.progressBar.reset()
def update(self, number: int = 1):
self.current += number
ui.progressBar.setValue(int(self.current / total * 100))
return ProgressBar()
def main():
app = QtWidgets.QApplication(sys.argv)
dialog = SpiderDialog()
dialog.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
py | 1a30c0f2d3d98f4ad9810b94a3b7c29586620c71 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for fusion of fc and activation."""
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
from inference_pass_test import InferencePassTest
from paddle import enable_static
from paddle.fluid.core import PassVersionChecker
enable_static()
class FCGeluTanhOneDnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data",
shape=[-1, 128, 768],
dtype="float32")
fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)
gelu_out = fluid.layers.gelu(fc_out, approximate=False)
self.feeds = {"data": np.random.random((1, 128, 768)).astype("float32")}
self.fetch_list = [gelu_out]
self.enable_mkldnn = True
def set_params(self):
self.pass_name = "fc_act_mkldnn_fuse_pass"
def test_check_output(self):
self.check_output()
class FCGeluErfOneDnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data",
shape=[-1, 128, 768],
dtype="float32")
fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)
gelu_out = fluid.layers.gelu(fc_out, approximate=True)
self.feeds = {"data": np.random.random((1, 128, 768)).astype("float32")}
self.fetch_list = [gelu_out]
self.enable_mkldnn = True
def set_params(self):
self.pass_name = "fc_act_mkldnn_fuse_pass"
def test_check_output(self):
self.check_output()
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
class FCTanhOneDnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data",
shape=[-1, 128, 768],
dtype="float32")
fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)
tanh_out = fluid.layers.tanh(fc_out)
self.feeds = {"data": np.random.random((1, 128, 768)).astype("float32")}
self.fetch_list = [tanh_out]
self.enable_mkldnn = True
def set_params(self):
self.pass_name = "fc_act_mkldnn_fuse_pass"
def test_check_output(self):
self.check_output()
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
class FCSigmoidOneDnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data",
shape=[-1, 128, 768],
dtype="float32")
fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)
sigmoid_out = fluid.layers.sigmoid(fc_out)
self.feeds = {"data": np.random.random((1, 128, 768)).astype("float32")}
self.fetch_list = [sigmoid_out]
self.enable_mkldnn = True
def set_params(self):
self.pass_name = "fc_act_mkldnn_fuse_pass"
def test_check_output(self):
self.check_output()
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
class FCHardSwishOneDnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data",
shape=[-1, 128, 768],
dtype="float32")
fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)
hardswish_out = fluid.layers.hard_swish(fc_out)
self.feeds = {"data": np.random.random((1, 128, 768)).astype("float32")}
self.fetch_list = [hardswish_out]
self.enable_mkldnn = True
def set_params(self):
self.pass_name = "fc_act_mkldnn_fuse_pass"
def test_check_output(self):
self.check_output()
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
class FCMishOneDnnFusePassTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data",
shape=[-1, 128, 768],
dtype="float32")
fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)
mish_out = fluid.layers.mish(fc_out)
self.feeds = {"data": np.random.random((1, 128, 768)).astype("float32")}
self.fetch_list = [mish_out]
self.enable_mkldnn = True
def set_params(self):
self.pass_name = "fc_act_mkldnn_fuse_pass"
def test_check_output(self):
self.check_output()
self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
if __name__ == "__main__":
unittest.main()
|
py | 1a30c2711a6a007426cc1ca55a5fdbb26572d1d2 | # coding: utf-8
"""
DocuSign Click API
DocuSign Click lets you capture consent to standard agreement terms with a single click: terms and conditions, terms of service, terms of use, privacy policies, and more. The Click API lets you include this customizable clickwrap solution in your DocuSign integrations. # noqa: E501
OpenAPI spec version: v1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ServiceVersion(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'version': 'str',
'version_url': 'str'
}
attribute_map = {
'version': 'version',
'version_url': 'versionUrl'
}
def __init__(self, version=None, version_url=None): # noqa: E501
"""ServiceVersion - a model defined in Swagger""" # noqa: E501
self._version = None
self._version_url = None
self.discriminator = None
if version is not None:
self.version = version
if version_url is not None:
self.version_url = version_url
@property
def version(self):
"""Gets the version of this ServiceVersion. # noqa: E501
# noqa: E501
:return: The version of this ServiceVersion. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ServiceVersion.
# noqa: E501
:param version: The version of this ServiceVersion. # noqa: E501
:type: str
"""
self._version = version
@property
def version_url(self):
"""Gets the version_url of this ServiceVersion. # noqa: E501
# noqa: E501
:return: The version_url of this ServiceVersion. # noqa: E501
:rtype: str
"""
return self._version_url
@version_url.setter
def version_url(self, version_url):
"""Sets the version_url of this ServiceVersion.
# noqa: E501
:param version_url: The version_url of this ServiceVersion. # noqa: E501
:type: str
"""
self._version_url = version_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ServiceVersion, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ServiceVersion):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a30c2ce3f7cd935e1807d6df0a90a33ebfc7ff3 | ###############################################################################
# Copyright Keith Butler(2014) #
# #
# This file MacroDensity.density_tools.py is free software: you can #
# redistribute it and/or modify it under the terms of the GNU General Public #
# License as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# You should have received a copy of the GNU General Public License along with#
# this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from __future__ import print_function, division
from functools import reduce
import math
from itertools import chain
import numpy
import numpy as np
from scipy import interpolate
#------------------------------------------------------------------------------
def gradient_magnitude(gx, gy, gz):
"""Converts the separate gradient magnitudes to a single magnitude
Args:
gx/y/z : fields in x y and z directions 2D array
Returns:
grad_mag : gradient of fields at each point"""
grad_mag = gx
for i in range(gx.shape[0]):
for j in range(gy.shape[1]):
for k in range(gz.shape[2]):
grad_mag[i,j,k] = np.sqrt(gx[i,j,k]**2 +
gy[i,j,k]**2 +
gz[i,j,k]**2)
return grad_mag
#------------------------------------------------------------------------------
def vector_2_abscissa(vector, magnitude, dx, dy, dz):
"""Converts a vector with a magnitude given in units of grid density
(NGX/Y/Z) to AA for plotting
Args:
vector : the vector along which the line is being plotted [(3x1) array]
magnitude : the number of steps that were taken along that vector
[Integer]
dx/y/z: the resolution of the density grid in AA-1 [Real]
Returns:
abscissa : the values for plotting on the abscissa in AA [1D array]
"""
vec_mag = np.linalg.norm([vector[0] * dx, vector[1] * dy, vector[2] * dz])
abscissa = [i * vec_mag for i in range(magnitude)]
return np.asarray(abscissa)
#------------------------------------------------------------------------------
def number_in_field(gradients, cutoff):
"""Get number of grid elements with a field magnitude greater than cutoff
Args:
gradients: the grid of field gradients (Real(ngx,ngy,ngz))
cutoff: the value above which tocout them (Real)
Returns:
number_of_elements: the number satisfying the condition (Integer)
"""
number_of_elements = 0
for element in np.nditer(gradients):
if element >= cutoff:
number_of_elements += 1
return number_of_elements
#------------------------------------------------------------------------------
def element_vol(vol, nx, ny, nz):
"""Calculates the volume of each of the elements on the grid.
Args:
vol: the cell volume (real)
x : the number of grid points in each direction (real)
Returns:
ele_vol : the volume (real)
"""
number_of_elements = nx * ny * nz
ele_vol = vol / number_of_elements
return ele_vol
#------------------------------------------------------------------------------
def one_2_2d(Array, resolution, vector):
"""Converts the 1d potential array to 2D with angstroms in A[0]
Args:
Array: 1D array
resolution: density of sampling of distance (1/AA)
vector: The vector of the direction of sampling
Returns
New_array: 2D array
"""
length = np.sqrt(vector.dot(vector))
New_array = np.zeros(shape=(len(Array) - 1, 2))
resolution = length / len(Array)
for i in range(len(Array) - 1):
New_array[i,0] = i*resolution
New_array[i,1] = Array[i]
return New_array
#------------------------------------------------------------------------------
def macroscopic_average(potential, periodicity, resolution):
"""Getting the macroscopic average of potential
Args:
potential : array containig the electrostaticpotential/charge density
periodicity : real number; the period over which to average
resolution : the grid resolution in the direction of averaging
Returns:
macro_average : array with the macroscopically averaged values"""
macro_average = np.zeros(shape=(len(potential)))
period_points = int((periodicity/resolution))
# Period points must be even
if period_points % 2 != 0:
period_points = period_points + 1
length = len(potential)
for i in range(length):
start = i - int(period_points / 2)
end = i + int(period_points / 2)
if start < 0:
start = start + length
macro_average[i] = macro_average[i] + sum(potential[0:end]) + sum(potential[start:length])
macro_average[i] = macro_average[i] / period_points
elif end >= length:
end = end - length
macro_average[i] = macro_average[i] + sum(potential[start:length]) + sum(potential[0:end])
macro_average[i] = macro_average[i] / period_points
else:
macro_average[i] = macro_average[i] + sum(potential[start:end]) / period_points
print("Average of the average = ", numpy.average(macro_average))
return macro_average
#------------------------------------------------------------------------------
def cube_potential(origin, travelled, cube, Grid, nx, ny, nz):
"""Populates the sampling cube with the potential required"""
# Recalc the origin as grid point coordinates
n_origin = np.zeros(shape=(3))
n_origin[0] = int(origin[0]*nx)
n_origin[1] = int(origin[1]*ny)
n_origin[2] = int(origin[2]*nz)
potential_cube = np.zeros(shape=(cube[0],cube[1],cube[2]))
for x in range(0,cube[0]):
for y in range(0,cube[1]):
for z in range(0,cube[2]):
# Assign the values of coordinates in the original grid
xv = int(n_origin[0]+travelled[0]+x)
yv = int(n_origin[1]+travelled[1]+y)
zv = int(n_origin[2]+travelled[2]+z)
# Minimum image convention
zv = int(zv - nz*round(zv/nz))
yv = int(yv - ny*round(yv/ny))
xv = int(xv - nx*round(xv/nx))
potential_cube[x,y,z] = Grid[int(xv),int(yv),int(zv)]
return potential_cube.mean(), np.var(potential_cube)
#------------------------------------------------------------------------------
def cuboid_average(Grid, cube, origin, vector, nx, ny, nz, magnitude):
"""Calculates the average in a cube defined by size cube(a,b,c), beginning
at origin and travelling as far as magnitude."""
plotting_average = np.zeros(shape=(magnitude))
i = 0
while i < magnitude:
travelled = np.multiply(i, vector)
plotting_average[i], varience = cube_potential(origin, travelled,
cube, Grid,
nx, ny, nz)
i = i + 1
return plotting_average
#------------------------------------------------------------------------------
def planar_average(Grid, nx, ny, nz, axis='z'):
"""Calculate the average in a given plane for the full length of the
normal; e.g. the full length of z in the xy plane."""
if axis == 'x':
x_plane = np.zeros(shape=(ny, nz))
Average = np.zeros(shape=(nx))
for x_value in range(nx):
x_plane[:,:] = Grid[x_value,:,:]
Average[x_value] = x_plane.mean()
if axis == 'y':
Average = np.zeros(shape=(ny))
y_plane = np.zeros(shape=(nx,nz))
for y_value in range(ny):
y_plane[:,:] = Grid[:,y_value,:]
Average[y_value] = y_plane.mean()
if axis == 'z':
Average = np.zeros(shape=(nz))
z_plane = np.zeros(shape=(nx,ny))
for z_value in range(nz):
z_plane[:,:] = Grid[:,:,z_value]
Average[z_value] = z_plane.mean()
return Average
#------------------------------------------------------------------------------
def get_volume(a,b,c):
"""Calculate the volume of the cell from lattice vectors
Args:
a/b/c: vectors of the lattice edges
"""
volume = np.dot(a,np.cross(b,c))
return volume
#------------------------------------------------------------------------------
def numbers_2_grid(a,NGX,NGY,NGZ):
"""Takes a point (in fractional coordinates) and converts it to a VASP grid
point based on the NGX/Y/Z values."""
a_grid = np.zeros(shape=(3))
a_grid[0] = round(float(a[0])*NGX)
a_grid[1] = round(float(a[1])*NGY)
a_grid[2] = round(float(a[2])*NGZ)
return a_grid
#------------------------------------------------------------------------------
def matrix_2_abc(Lattice):
"""The the VASP lattice and convert to the a,b,c,alpha,beta,gamma format"""
a = np.sqrt(Lattice[0,0]**2+Lattice[0,1]**2+Lattice[0,2]**2)
b = np.sqrt(Lattice[1,0]**2+Lattice[1,1]**2+Lattice[1,2]**2)
c = np.sqrt(Lattice[2,0]**2+Lattice[2,1]**2+Lattice[2,2]**2)
a_vec = Lattice[0,:]
b_vec = Lattice[1,:]
c_vec = Lattice[2,:]
return a,b,c,a_vec,b_vec,c_vec
#------------------------------------------------------------------------------
def _print_boom(quiet=False):
if not quiet:
print("\n")
print("BBBB OOOO OOOO MMMMM ")
print("BBBB OOOO OOOO MMMMM ")
print("BBBB OOOO OOOO MMMMM ")
print("B B OOOO OOOO MMMMM ")
print("B B O O O O MMMMM ")
print("B B O O O O MMMMM ")
print("B B O O O O MMMMM ")
print("B B O O O O MMMMM ")
print("BBBB O O O O M M M ")
print("BBBB O O O O M M M ")
print("BBBB O O O O M M M ")
print("B B O O O O M M M ")
print("B B O O O O M M M ")
print("B B O O O O M M M ")
print("B B O O O O M M M ")
print("B B OOOO OOOO M M M ")
print("BBBB OOOO OOOO M M M ")
print("BBBB OOOO OOOO M M M ")
print("BBBB OOOO OOOO M M M ")
def read_vasp_density(FILE, use_pandas=None, quiet=False):
"""Generic reading of CHGCAR LOCPOT etc files from VASP
Args:
FILE (str): Path to density file
use_pandas (bool): Use Pandas library for faster file reading. If set
to None, Pandas will be used when available.
Returns:
Potential (array), NGX (int), NGY (int), NGZ (int), lattice (array)
where Potential is a 1-D flattened array of density data with original
dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.
"""
# Get Header information by reading a line at a time
if use_pandas:
from pandas import read_table as pandas_read_table
elif use_pandas is None:
try:
from pandas import read_table as pandas_read_table
use_pandas = True
except ImportError:
use_pandas = False
print("Reading header information...")
with open(FILE, "r") as f:
_ = f.readline()
scale_factor = float(f.readline())
lattice = np.zeros(shape=(3,3))
for row in range(3):
lattice[row] = [float(x) for x in f.readline().split()]
lattice = lattice * scale_factor
num_species = len(f.readline().split())
num_type = [int(x) for x in f.readline().split()]
num_atoms = sum(num_type)
coord_type = f.readline().strip()
coordinates = numpy.zeros(shape=(num_atoms, 3))
for atom_i in range(num_atoms):
coordinates[atom_i] = [float(x) for x in f.readline().split()]
# Skip blank line
_ = f.readline()
NGX, NGY, NGZ = [int(x) for x in f.readline().split()]
if use_pandas:
print("Reading 3D data using Pandas...")
skiprows = 10 + num_atoms
readrows = int(math.ceil(NGX * NGY * NGZ / 5))
dat = pandas_read_table(FILE, delim_whitespace=True,
skiprows=skiprows, header=None,
nrows=readrows)
Potential = dat.iloc[:readrows, :5].values.flatten()
remainder = (NGX * NGY * NGZ) % 5
if remainder > 0:
Potential = Potential[:(-5 + remainder)]
else:
print("Reading 3D data...")
Potential = (f.readline().split()
for i in range(int(math.ceil(NGX * NGY * NGZ / 5))))
Potential = numpy.fromiter(chain.from_iterable(Potential), float)
_print_boom(quiet=quiet)
if not quiet:
print("Average of the potential = ", numpy.average(Potential))
return Potential, NGX, NGY, NGZ, lattice
#------------------------------------------------------------------------------
def _read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ, spin=0):
'''
use_pandas (bool): Use Pandas library for faster file reading. If set
to None, Pandas will be used when available.
spin: the set of spin data to read, default 0 for ISPIN=1 calculation
'''
print("PANDAS:", use_pandas)
if use_pandas:
from pandas import read_table as pandas_read_table
elif use_pandas is None:
try:
from pandas import read_table as pandas_read_table
use_pandas = True
except ImportError:
use_pandas = False
with open(FILE, "r") as f:
_ = f.readline()
scale_factor = float(f.readline())
lattice = np.zeros(shape=(3,3))
for row in range(3):
lattice[row] = [float(x) for x in f.readline().split()]
lattice = lattice * scale_factor
num_species = len(f.readline().split())
num_type = [int(x) for x in f.readline().split()]
num_atoms = sum(num_type)
coord_type = f.readline().strip()
coordinates = numpy.zeros(shape=(num_atoms, 3))
for atom_i in range(num_atoms):
coordinates[atom_i] = [float(x) for x in f.readline().split()]
# Skip blank line
_ = f.readline()
NGX, NGY, NGZ = [int(x) for x in f.readline().split()]
if use_pandas:
print("Reading 3D data using Pandas...")
skiprows = 10 + num_atoms + spin * \
(math.ceil(NGX * NGY * NGZ / 10) + 2)
readrows = int(math.ceil(NGX * NGY * NGZ / 10))
dat = pandas_read_table(FILE, delim_whitespace=True,
skiprows=skiprows, header=None,
nrows=readrows)
density = dat.iloc[:readrows, :10].values.flatten()
remainder = (NGX * NGY * NGZ) % 10
if remainder > 0:
density = density[:(-10 + remainder)]
else:
print("Reading 3D data...")
density = (f.readline().split()
for i in range(int(math.ceil(NGX * NGY * NGZ / 10))))
density = numpy.fromiter(chain.from_iterable(density), float)
return density
#------------------------------------------------------------------------------
def read_vasp_parchg(FILE, use_pandas=None, quiet=False, spin=False):
"""Generic reading of CHGCAR LOCPOT etc files from VASP
Args:
FILE (str): Path to parchg file
use_pandas (bool): Use Pandas library for faster file reading. If set
to None, Pandas will be used when available.
spin(bool): is the data spin polarised?
Returns:
density (array), NGX (int), NGY (int), NGZ (int), lattice (array)
where density is a 1-D flattened array of density data with original
dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.
"""
# Get Header information by reading a line at a time
print("Reading header information...")
with open(FILE, "r") as f:
_ = f.readline()
scale_factor = float(f.readline())
lattice = np.zeros(shape=(3,3))
for row in range(3):
lattice[row] = [float(x) for x in f.readline().split()]
lattice = lattice * scale_factor
num_species = len(f.readline().split())
num_type = [int(x) for x in f.readline().split()]
num_atoms = sum(num_type)
coord_type = f.readline().strip()
coordinates = numpy.zeros(shape=(num_atoms, 3))
for atom_i in range(num_atoms):
coordinates[atom_i] = [float(x) for x in f.readline().split()]
# Skip blank line
_ = f.readline()
NGX, NGY, NGZ = [int(x) for x in f.readline().split()]
if not spin:
density = _read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ)
else:
densities = []
densities.append(_read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ
, spin=0))
densities.append(_read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ
, spin=1))
alpha = densities[0] + densities[1]
beta = densities[0] - densities[1]
density = [alpha, beta]
_print_boom(quiet=quiet)
return density, NGX, NGY, NGZ, lattice
def read_vasp_density_classic(FILE):
"""Reimplementation of the legacy 3D data importer
This is still quite a bit slower than the new ``read_vasp_density`` but it
makes less assumptions about where newlines will appear in the file. It
also prints the progress reading through the file; this definitely makes it
slower but might _feel_ faster!
"""
with open(FILE, "r") as f:
lines = f.readlines()
return _read_vasp_density_fromlines(lines)
def _read_vasp_density_fromlines(lines):
"""Generic reading of CHGCAR LOCPOT etc files from VASP"""
i, j, k = 0, 0, 0
NGX, NGY, NGZ = 0, 0, 0
lattice = np.zeros(shape=(3,3))
upper_limit, num_species, scale_factor = 0, 0, 0
num_atoms = 1 # First test needs to fail until headers have been read
Potential, Coordinates = np.zeros(1), np.zeros(1)
for line in lines:
inp = line.split()
if inp == []:
continue
else:
i += 1
if i > (num_atoms + 9) and i < (num_atoms + 10 + upper_limit):
for m, val in enumerate(inp):
Potential[k + m] = val
k = k + 5
if math.fmod(k, 100000) == 0:
print("Reading potential at point", k)
elif i == 2:
scale_factor = float(inp[0])
elif i >= 3 and i < 6:
lattice[i-3,:]=inp[:]
elif i == 6:
num_species = len(inp)
species = inp
elif i == 7:
num_type = inp
num_atoms = sum(int(x) for x in num_type)
elif i == 8:
coord_type = inp
Coordinates = numpy.zeros(shape=(num_atoms,3))
elif i >= 9 and i <= num_atoms + 8:
Coordinates[i-9,0] = float(inp[0])
Coordinates[i-9,1] = float(inp[1])
Coordinates[i-9,2] = float(inp[2])
elif i == num_atoms + 9:
NGX = int(inp[0])
NGY = int(inp[1])
NGZ = int(inp[2])
Potential = numpy.zeros(shape=(NGX * NGY * NGZ))
# Read in the potential data
upper_limit = (int(NGX * NGY * NGZ / 5) +
np.mod(NGX * NGY * NGZ, 5))
_print_boom()
print("Average of the potential = ", numpy.average(Potential))
lattice = lattice * scale_factor
return Potential, NGX, NGY, NGZ, lattice
#------------------------------------------------------------------------------
def density_2_grid(Density, nx, ny, nz, Charge=False, Volume=1):
"""Convert the Potential list to a grid for ease of manipulation
Args:
Density: Array of the output from a VAsp calulation charge/potential
nx,y,z : Number of mesh points in x/y/z
Charge : Boolean, is it charge or potential (charge needs to be
normalised by vol)
Volume : The lattice vectors, only required for normalising charge.
Returns:
Potential_grid: the (normalised) quantity on a mesh
total_electrons : the number of electrons in the system
"""
l = 0
Potential_grid = np.zeros(shape=(nx,ny,nz))
total_electrons = 0
is_CHGCAR = True
for k in range(nz):
for j in range(ny):
for i in range(nx):
Potential_grid[i,j,k] = Density[l] / Volume
if Charge == True:
# Convert the charge density to a number of electrons
point_volume = Volume / (nx*ny*nz)
Potential_grid[i,j,k] = Potential_grid[i,j,k]*point_volume
total_electrons = total_electrons + Density[l]
l = l + 1
if Charge == True:
print("Total electrons: ", total_electrons / (nx * ny * nz))
total_electrons = total_electrons / (nx * ny * nz)
return Potential_grid, total_electrons
#------------------------------------------------------------------------------
def density_2_grid_gulp(Density, nx, ny, nz):
"""Convert the Potential list to a grid for ease of manipulation
Args:
Density: Array of the output from a VAsp calulation charge/potential
nx,y,z : Number of mesh points in x/y/z
Returns:
Potential_grid: the (normalised) quantity on a mesh
"""
l = 0
Potential_grid = np.zeros(shape=(nx,ny,nz))
total_electrons = 0
is_CHGCAR = True
for k in range(nx):
for j in range(ny):
for i in range(nz):
Potential_grid[k,j,i] = Density[l]
l = l + 1
return Potential_grid
#------------------------------------------------------------------------------
def read_gulp_potential(gulpfile='gulp.out'):
"""Generic reading of GULP output
Args:
gulpfile (str): Path to gulp output file
Returns:
potential (array), NGX (int), NGY (int), NGZ (int), lattice (array)
where density is a 1-D flattened array of density data with original
dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.
"""
potential = []
try:
file_handle=open(gulpfile)
except IOError:
print("File not found or path is incorrect")
lines = file_handle.readlines()
for n, line in enumerate(lines):
if line.rfind('Cartesian lattice vectors') > -1:
lattice = np.zeros(shape=(3, 3))
for r in range(3):
lattice[r] = lines[n + 2 + r].split()
break
for n, line in enumerate(lines):
if line.rfind('Electrostatic potential on a grid') > -1:
NGX = int(lines[n + 3].split()[3])
NGY = int(lines[n + 3].split()[5])
NGZ = int(lines[n + 3].split()[7])
break
for n, line in enumerate(lines):
if line.rfind('Electrostatic potential on a grid') > -1:
for k in reversed(range(9, NGX*NGY*NGZ + 9)):
potential.append(float(lines[n + k].split()[3]))
return np.asarray(potential), NGX, NGY, NGZ, lattice
#------------------------------------------------------------------------------
def GCD(a,b):
""" The Euclidean Algorithm """
a = abs(a)
b = abs(b)
while a:
a, b = (b % a), a
return b
#------------------------------------------------------------------------------
def GCD_List(list):
""" Finds the GCD of numbers in a list.
Input: List of numbers you want to find the GCD of
E.g. [8, 24, 12]
Returns: GCD of all numbers
"""
return reduce(GCD, list)
#------------------------------------------------------------------------------
def inverse_participation_ratio(density):
""" Calculate the IPR, which is Psi**4 or Rho**2
Input: density, a 1-D flattened grid of the electron density for the state
this is calculated from the PARCHG in VASP
Output: ipr, float
"""
sq = sum(i**2 for i in density)
fr = sum(i**4 for i in density)
ifr = 1 / (len(density) * fr)
isq = 1 / (len(density) * sq)
return fr / sq**2
|
py | 1a30c42733997c4dd61bb8f8ece8382751675f09 | import sys
sys.path.append("../../")
def press(btn):
if btn == "SUB":
app.showSubWindow("Sub")
app.hide()
if btn in ["POPUP2", "POPUP"]:
app.infoBox("INFO", "INFO")
if btn == "MAIN":
app.show()
app.hideSubWindow("Sub")
def closer(btn=None):
print("aaa")
from appJar import gui
with gui("Main Window", startWindow="Sub") as app:
#with gui("Main Window") as app:
app.label("title", "Main Window")
app.button("POPUP", press)
with app.subWindow("Sub"):
app.label("sub", "SubWindow")
app.button("POPUP2", press)
app.button("MAIN", press)
app.setStopFunction(closer)
# app.hide()
# app.showSubWindow("Sub")
|
py | 1a30c46c94fb35b6bee3ab3b31e060ffca5f66d9 | from __future__ import print_function, division
import os
import re
import datetime
import sys
from os.path import join, isdir, isfile, dirname, abspath
import pandas as pd
import yaml
import psycopg2 as db
from nilmtk.measurement import measurement_columns
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.datastore import Key
from nilm_metadata import convert_yaml_to_hdf5
from nilmtk.utils import get_module_directory
import shutil
import tempfile
"""
MANUAL:
dataport is a large dataset hosted in a remote SQL database. This
file provides a function to download the dataset and save it to disk
as NILMTK-DF. Since downloading the entire dataset will likely take >
24 hours, this function provides some options to allow you to download
only a subset of the data.
'''''''''''''''' Previous Version '''''''''''''''''''''
For example, to only load house 26 for April 2014:
from nilmtk.dataset_converters.dataport.download_dataport
import download_dataport
download_dataport(
'username',
'password',
'/path/output_filename.h5',
periods_to_load={26: ('2014-04-01', '2014-05-01')}
)
'''''''''''''''' Previous Version '''''''''''''''''''''
'''''''''''''''' New Version '''''''''''''''''''''
from nilmtk.dataset_converters.dataport.download_dataport
import download_dataport,
_dataport_dataframe_to_hdf,
view_database_tables,
view_buildings,
view_data_window
# see all available tables in the dataport database.
view_database_tables(
'username',
'password',
'database_schema' # university or commercial
)
# show the list of all available buildings
view_buildings(
'username',
'password',
'database_schema', # university or commercial
'table_name' # for example 'electricity_egauge_15min', 'electricity_egauge_hours'
)
# view data collection window of selected buildings
view_data_window(
'username',
'password',
'database_schema', # university or commercial
'table_name', # for example 'electricity_egauge_15min','electricity_egauge_hours'
[18,26,43,44] # data collection window of building 18,26,43 and 44 respectively
)
# download the dataset.
For example, loading electricity_egauge_hours from 2018-11-17 to
2019-12-17 of building 26
download_dataport(
'username',
'password',
'/path/output_filename.h5',
'university',
'electricity_egauge_hours',
periods_to_load={ 26: ('2018-11-17', '2019-12-17')})
'''''''''''''''' New Version '''''''''''''''''''''
REQUIREMENTS:
On Ubuntu:
* sudo apt-get install libpq-dev
* sudo pip install psycopg2
TODO:
* intelligently handle queries that fail due to network
* integrate 'grid' (use - gen) and 'gen'
"""
feed_mapping = {
'use': {},
'air1': {'type': 'air conditioner'},
'air2': {'type': 'air conditioner'},
'air3': {'type': 'air conditioner'},
'airwindowunit1': {'type': 'air conditioner'},
'aquarium1': {'type': 'appliance'},
'bathroom1': {'type': 'sockets', 'room': 'bathroom'},
'bathroom2': {'type': 'sockets', 'room': 'bathroom'},
'bedroom1': {'type': 'sockets', 'room': 'bedroom'},
'bedroom2': {'type': 'sockets', 'room': 'bedroom'},
'bedroom3': {'type': 'sockets', 'room': 'bedroom'},
'bedroom4': {'type': 'sockets', 'room': 'bedroom'},
'bedroom5': {'type': 'sockets', 'room': 'bedroom'},
'car1': {'type': 'electric vehicle'},
'clotheswasher1': {'type': 'washing machine'},
'clotheswasher_dryg1': {'type': 'washer dryer'},
'diningroom1': {'type': 'sockets', 'room': 'dining room'},
'diningroom2': {'type': 'sockets', 'room': 'dining room'},
'dishwasher1': {'type': 'dish washer'},
'disposal1': {'type': 'waste disposal unit'},
'drye1': {'type': 'spin dryer'},
'dryg1': {'type': 'spin dryer'},
'freezer1': {'type': 'freezer'},
'furnace1': {'type': 'electric furnace'},
'furnace2': {'type': 'electric furnace'},
'garage1': {'type': 'sockets', 'room': 'dining room'},
'garage2': {'type': 'sockets', 'room': 'dining room'},
'gen': {},
'grid': {},
'heater1': {'type': 'electric space heater'},
'housefan1': {'type': 'electric space heater'},
'icemaker1': {'type': 'appliance'},
'jacuzzi1': {'type': 'electric hot tub heater'},
'kitchen1': {'type': 'sockets', 'room': 'kitchen'},
'kitchen2': {'type': 'sockets', 'room': 'kitchen'},
'kitchenapp1': {'type': 'sockets', 'room': 'kitchen'},
'kitchenapp2': {'type': 'sockets', 'room': 'kitchen'},
'lights_plugs1': {'type': 'light'},
'lights_plugs2': {'type': 'light'},
'lights_plugs3': {'type': 'light'},
'lights_plugs4': {'type': 'light'},
'lights_plugs5': {'type': 'light'},
'lights_plugs6': {'type': 'light'},
'livingroom1': {'type': 'sockets', 'room': 'living room'},
'livingroom2': {'type': 'sockets', 'room': 'living room'},
'microwave1': {'type': 'microwave'},
'office1': {'type': 'sockets', 'room': 'office'},
'outsidelights_plugs1': {'type': 'sockets', 'room': 'outside'},
'outsidelights_plugs2': {'type': 'sockets', 'room': 'outside'},
'oven1': {'type': 'oven'},
'oven2': {'type': 'oven'},
'pool1': {'type': 'electric swimming pool heater'},
'pool2': {'type': 'electric swimming pool heater'},
'poollight1': {'type': 'light'},
'poolpump1': {'type': 'electric swimming pool heater'},
'pump1': {'type': 'appliance'},
'range1': {'type': 'stove'},
'refrigerator1': {'type': 'fridge'},
'refrigerator2': {'type': 'fridge'},
'security1': {'type': 'security alarm'},
'shed1': {'type': 'sockets', 'room': 'shed'},
'sprinkler1': {'type': 'appliance'},
'unknown1': {'type': 'unknown'},
'unknown2': {'type': 'unknown'},
'unknown3': {'type': 'unknown'},
'unknown4': {'type': 'unknown'},
'utilityroom1': {'type': 'sockets', 'room': 'utility room'},
'venthood1': {'type': 'appliance'},
'waterheater1': {'type': 'electric water heating appliance'},
'waterheater2': {'type': 'electric water heating appliance'},
'winecooler1': {'type': 'appliance'},
}
feed_ignore = ['gen', 'grid']
def database_assert(database_table):
assert (
database_table == 'electricity_egauge_15min' or
database_table == 'electricity_egauge_hours' or
database_table == 'electricity_egauge_minutes' or
database_table == 'electricity_egauge_seconds'
), "Table not compatible with NILMTK"
def view_database_tables(
database_username,
database_password,
database_schema
):
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# Loading university schemas
sql_query = ("SELECT table_name" +
" FROM information_schema.views" +
" WHERE table_schema ='" + database_schema + "'" +
" ORDER BY table_name")
database_tables = pd.read_sql(sql_query, conn)['table_name'].tolist()
df = pd.DataFrame({database_schema: database_tables})
print(df)
conn.close()
def view_buildings(
database_username,
database_password,
database_schema,
database_table
):
database_assert(database_table)
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
# try to connect to database
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# select all buildings for the database_table
sql_query = ('SELECT DISTINCT dataid' +
' FROM university.metadata' +
' WHERE' + database_table +
' ORDER BY dataid')
buildings_in_table = pd.read_sql(sql_query, conn)['dataid'].tolist()
print(buildings_in_table)
conn.close()
def view_data_window(
database_username,
database_password,
database_schema,
database_table,
building_no=None):
database_assert(database_table)
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
# try to connect to database
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# select all buildings for the database_table
sql_query = ('SELECT DISTINCT dataid' +
' FROM university.metadata' +
' WHERE' + database_table +
' ORDER BY dataid')
if(not (building_no)):
print(" Please provide the list of building numbers ")
else:
for each_building in building_no:
sql_query = ('SELECT MIN(egauge_min_time) AS minlocalminute,' +
' MAX(egauge_max_time) AS maxlocalminute' +
' FROM university.metadata' +
' WHERE dataid=' + str(each_building))
timestamps = pd.read_sql(sql_query, conn)
first_timestamp_in_table = timestamps['minlocalminute'][0]
last_timestamp_in_table = timestamps['maxlocalminute'][0]
print(str(each_building),
"\t\t", first_timestamp_in_table,
"\t\t", last_timestamp_in_table)
print("Done loading all the buildings!!")
conn.close()
def download_dataport(database_username,
database_password, hdf_filename,
database_schema='university',
user_selected_table='electricity_egauge_minutes',
periods_to_load=None):
"""
Downloads data from dataport database into an HDF5 file.
Parameters
----------
hdf_filename : str
Output HDF filename. If file exists already then will be deleted.
database_username, database_password, database_schema,user_selected_table, hdf_filename : str
periods_to_load : dict of tuples, optional
Key of dict is the building number (int).
Values are (<start date>, <end date>)
e.g. ("2013-04-01", None) or ("2013-04-01", "2013-08-01")
defaults to all buildings and all date ranges
"""
database_assert(user_selected_table)
# dataport database settings
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
# try to connect to database
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# map user_selected_table and timestamp column
timestamp_map = {"electricity_egauge_15min": "local_15min",
"electricity_egauge_hours": "localhour",
"electricity_egauge_minutes": "localminute",
"electricity_egauge_seconds": "localminute"}
# set up a new HDF5 datastore (overwrites existing store)
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='zlib')
# Create a temporary metadata dir, remove existing building
# yaml files in module dir (if any)
original_metadata_dir = join(get_module_directory(),
'dataset_converters',
'dataport',
'metadata')
tmp_dir = tempfile.mkdtemp()
metadata_dir = join(tmp_dir, 'metadata')
shutil.copytree(original_metadata_dir, metadata_dir)
print("Using temporary dir for metadata:", metadata_dir)
for f in os.listdir(metadata_dir):
if re.search('^building', f):
os.remove(join(metadata_dir, f))
"""
TODO:
The section below can be altered or removed,
since the restructured Dataport
now has only one electricity_egauge_minutes table.
"""
# get tables in database schema
sql_query = ("SELECT table_name" +
" FROM information_schema.views" +
" WHERE table_schema ='" + database_schema + "'" +
" ORDER BY table_name")
database_tables = pd.read_sql(sql_query, conn)['table_name'].tolist()
database_tables = [t for t in database_tables if user_selected_table in t]
# if user has specified buildings
if periods_to_load:
buildings_to_load = list(periods_to_load.keys())
else:
# get buildings present in all tables
sql_query = ''
for table in database_tables:
sql_query = (sql_query + '(SELECT DISTINCT dataid' +
' FROM "' + database_schema + '".' + table +
') UNION ')
sql_query = sql_query[:-7]
sql_query = (sql_query + ' ORDER BY dataid')
buildings_to_load = pd.read_sql(sql_query, conn)['dataid'].tolist()
# for each user specified building or all buildings in database
for building_id in buildings_to_load:
print("Loading building {:d} @ {}"
.format(building_id, datetime.datetime.now()))
sys.stdout.flush()
# create new list of chunks for concatenating later
dataframe_list = []
# for each table of 1 month data
for database_table in database_tables:
print(" Loading table {:s}".format(database_table))
sys.stdout.flush()
# get buildings present in electricity_egauge_minutes table
sql_query = ('SELECT DISTINCT dataid' +
' FROM university.metadata' +
' WHERE egauge_min_time IS NOT NULL' +
' ORDER BY dataid')
buildings_in_table = pd.read_sql(sql_query,
conn)['dataid'].tolist()
if building_id in buildings_in_table:
# get first and last timestamps for this
# house in electricity_egauge_minutes table
sql_query = ('SELECT MIN(egauge_min_time) AS minlocalminute,' +
' MAX(egauge_max_time) AS maxlocalminute' +
' FROM university.metadata' +
' WHERE dataid=' + str(building_id))
range = pd.read_sql(sql_query, conn)
first_timestamp_in_table = range['minlocalminute'][0]
last_timestamp_in_table = range['maxlocalminute'][0]
# get requested start and end and localize them
requested_start = None
requested_end = None
database_timezone = 'US/Central'
if periods_to_load:
if periods_to_load[building_id][0]:
requested_start = pd.Timestamp(periods_to_load[building_id][0])
requested_start = requested_start.tz_localize(database_timezone)
if periods_to_load[building_id][1]:
requested_end = pd.Timestamp(periods_to_load[building_id][1])
requested_end = requested_end.tz_localize(database_timezone)
# check user start is not after end
if requested_start > requested_end:
print('requested end is before requested start')
sys.stdout.flush()
else:
# clip data to smallest range
if requested_start:
start = max(requested_start, first_timestamp_in_table)
else:
start = first_timestamp_in_table
if requested_end:
end = min(requested_end, last_timestamp_in_table)
else:
end = last_timestamp_in_table
# download data in chunks
chunk_start = start
chunk_size = datetime.timedelta(10) # 1 day
while chunk_start < end:
chunk_end = chunk_start + chunk_size
if chunk_end > end:
chunk_end = end
# subtract 1 second so end is exclusive
chunk_end = chunk_end - datetime.timedelta(0, 1)
# query power data for all channels
format = '%Y-%m-%d %H:%M:%S'
sql_query = ('SELECT *' +
' FROM "' + database_schema + '".' + user_selected_table +
' WHERE dataid=' + str(building_id) +
'and "' + timestamp_map[user_selected_table] + '" between ' +
"'" + chunk_start.strftime(format) + "'" +
" and " +
"'" + chunk_end.strftime(format) +
"' ORDER BY "+timestamp_map[user_selected_table]
)
chunk_dataframe = pd.read_sql(sql_query, conn)
# nilmtk requires building indices to start at 1
nilmtk_building_id = buildings_to_load.index(building_id) + 1
# convert to nilmtk-df and save to disk
nilmtk_dataframe = _dataport_dataframe_to_hdf(
chunk_dataframe, store,
nilmtk_building_id,
building_id,
timestamp_map[user_selected_table],
metadata_dir
)
# print progress
print(' ' + str(chunk_start) + ' -> ' +
str(chunk_end) + ': ' +
str(len(chunk_dataframe.index)) + ' rows')
sys.stdout.flush()
# append all chunks into list for csv writing
# dataframe_list.append(chunk_dataframe)
# move on to next chunk
chunk_start = chunk_start + chunk_size
# saves all chunks in list to csv
# if len(dataframe_list) > 0:
# dataframe_concat = pd.concat(dataframe_list)
# dataframe_concat.to_csv(output_directory + str(building_id) + '.csv')
store.close()
conn.close()
# write yaml to hdf5
# dataset.yaml and meter_devices.yaml are static, building<x>.yaml are dynamic
convert_yaml_to_hdf5(metadata_dir, hdf_filename)
# remote the temporary dir when finished
shutil.rmtree(tmp_dir)
def _dataport_dataframe_to_hdf(dataport_dataframe,
store,
nilmtk_building_id,
dataport_building_id,
timestamp_name,
metadata_dir):
local_dataframe = dataport_dataframe.copy()
# remove timezone information to avoid append errors
local_dataframe[timestamp_name] = pd.DatetimeIndex([i.replace(tzinfo=None)
for i in local_dataframe[timestamp_name]])
# set timestamp as frame index
local_dataframe = local_dataframe.set_index(timestamp_name)
# set timezone
local_dataframe = local_dataframe.tz_localize('US/Central')
# remove timestamp column from dataframe
feeds_dataframe = local_dataframe.drop('dataid', axis=1)
# Column names for dataframe
column_names = [('power', 'active')]
# convert from kW to W
feeds_dataframe = feeds_dataframe.mul(1000)
# building metadata
building_metadata = {}
building_metadata['instance'] = nilmtk_building_id
building_metadata['original_name'] = int(dataport_building_id) # use python int
building_metadata['elec_meters'] = {}
building_metadata['appliances'] = []
# initialise dict of instances of each appliance type
instance_counter = {}
meter_id = 1
for column in feeds_dataframe.columns:
if feeds_dataframe[column].notnull().sum() > 0 and not column in feed_ignore:
# convert timeseries into dataframe
feed_dataframe = pd.DataFrame(feeds_dataframe[column])
# set column names
feed_dataframe.columns = pd.MultiIndex.from_tuples(column_names)
# Modify the column labels to reflect the power measurements recorded.
feed_dataframe.columns.set_names(LEVEL_NAMES, inplace=True)
key = Key(building=nilmtk_building_id, meter=meter_id)
# store dataframe
store.put(str(key), feed_dataframe, format='table', append=True)
store.flush()
# elec_meter metadata
if column == 'use':
meter_metadata = {'device_model': 'eGauge',
'site_meter': True}
else:
meter_metadata = {'device_model': 'eGauge',
'submeter_of': 0}
building_metadata['elec_meters'][meter_id] = meter_metadata
# appliance metadata
if column != 'use':
# original name and meter id
appliance_metadata = {'original_name': column,
'meters': [meter_id]}
# appliance type and room if available
appliance_metadata.update(feed_mapping[column])
# appliance instance number
if instance_counter.get(appliance_metadata['type']) == None:
instance_counter[appliance_metadata['type']] = 0
instance_counter[appliance_metadata['type']] += 1
appliance_metadata['instance'] = instance_counter[appliance_metadata['type']]
building_metadata['appliances'].append(appliance_metadata)
meter_id += 1
# write building yaml to file
building = 'building{:d}'.format(nilmtk_building_id)
yaml_full_filename = join(metadata_dir, building + '.yaml')
with open(yaml_full_filename, 'w') as outfile:
outfile.write(yaml.dump(building_metadata))
return 0
|
py | 1a30c55d62d8a77272434ab0875bbada042fc988 | from unittest import TestCase
from nba_data.data.box_scores import GameBoxScore
class TestBoxScore(TestCase):
def test_instantiation(self):
test_box_score = GameBoxScore(game_id="bae", player_box_scores=[], team_box_scores=[])
self.assertIsNotNone(test_box_score)
self.assertEqual(test_box_score.game_id, "bae")
self.assertEqual(test_box_score.player_box_scores, [])
self.assertEqual(test_box_score.team_box_scores, [])
|
py | 1a30c560e3551736dc863a833eccb11ca158a08e | #!/usr/bin/env python
import urllib
from decimal import Decimal
from getpass import getpass
import click
from stellar_base import exceptions
from stellar_base.address import Address
from stellar_base.builder import Builder
from stellar_base.keypair import Keypair
from config import configs
from validate import validate
@click.command()
@click.argument('target_address')
@click.argument('amount')
@click.option('--network', default='TESTNET', type=click.Choice(['TESTNET', 'PUBLIC']))
@click.option('--source_secret', prompt=True, hide_input=True)
def payment(target_address: str, amount: str, network, source_secret):
config = configs[network]
src_address = Keypair.from_seed(source_secret).address().decode()
builder = Builder(secret=source_secret, horizon_uri=config['HORIZON_URL'], network=network)
builder.append_payment_op(destination=target_address, asset_code='HOT',
asset_issuer=config['ISSUER_HOT'], amount=amount)
builder.sign()
print("############### TX #################")
print('Payment {} HOT from {} to {}'.format(amount, src_address, target_address))
print('Network: {}'.format(network))
print('Sequence: {}'.format(builder.sequence))
print('Hash: {}'.format(builder.hash()))
print("#########################################")
click.confirm('Correct?', abort=True)
print('Submitting...')
builder.submit()
print('success')
return True
if __name__ == '__main__':
payment()
|
py | 1a30c62d73df388e8abad757f3574701663a0b82 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convience file system related operations."""
import os
import shutil
import sys
import tempfile
import platform
import time
def AtomicWriteFile(data, filename):
"""Write a file atomically.
NOTE: Not atomic on Windows!
Args:
data: String to write to the file.
filename: Filename to write.
"""
filename = os.path.abspath(filename)
handle, temp_file = tempfile.mkstemp(
prefix='atomic_write', suffix='.tmp',
dir=os.path.dirname(filename))
fh = os.fdopen(handle, 'wb')
fh.write(data)
fh.close()
# Window's can't move into place atomically, delete first.
if sys.platform in ['win32', 'cygwin']:
try:
os.remove(filename)
except OSError:
pass
os.rename(temp_file, filename)
def WriteFile(data, filename):
"""Write a file in one step.
Args:
data: String to write to the file.
filename: Filename to write.
"""
fh = open(filename, 'wb')
fh.write(data)
fh.close()
def ReadFile(filename):
"""Read a file in one step.
Args:
filename: Filename to read.
Returns:
String containing complete file.
"""
fh = open(filename, 'rb')
data = fh.read()
fh.close()
return data
class ExecutableNotFound(Exception):
pass
def Which(command, paths=None, require_executable=True):
"""Find the absolute path of a command in the current PATH.
Args:
command: Command name to look for.
paths: Optional paths to search.
Returns:
Absolute path of the command (first one found),
or default to a bare command if nothing is found.
"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
exe_suffixes = ['']
if sys.platform == 'win32':
exe_suffixes += ['.exe']
for p in paths:
np = os.path.abspath(os.path.join(p, command))
for suffix in exe_suffixes:
full_path = np + suffix
if (os.path.isfile(full_path) and
(not require_executable or os.access(full_path, os.X_OK))):
return full_path
raise ExecutableNotFound('Unable to find: ' + command)
def MakeDirectoryIfAbsent(path):
"""Create a directory if it doesn't already exist.
Args:
path: Directory to create.
"""
if not os.path.isdir(path):
os.makedirs(path)
def MakeParentDirectoryIfAbsent(path):
"""Creates a directory for the parent if it doesn't already exist.
Args:
path: Path of child where parent directory should be created for.
"""
MakeDirectoryIfAbsent(os.path.dirname(path))
def RemoveDirectoryIfPresent(path):
"""Remove a directory if it exists.
Args:
path: Directory to remove.
"""
# On Windows, attempts to remove read-only files get Error 5. This
# error handler fixes the permissions and retries the removal.
def onerror_readonly(func, path, exc_info):
import stat
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
if os.path.exists(path):
shutil.rmtree(path, onerror=onerror_readonly)
def CopyTree(src, dst):
"""Recursively copy the items in the src directory to the dst directory.
Unlike shutil.copytree, the destination directory and any subdirectories and
files may exist. Existing directories are left untouched, and existing files
are removed and copied from the source using shutil.copy2. It is also not
symlink-aware.
Args:
src: Source. Must be an existing directory.
dst: Destination directory. If it exists, must be a directory. Otherwise it
will be created, along with parent directories.
"""
if not os.path.isdir(dst):
os.makedirs(dst)
for root, dirs, files in os.walk(src):
relroot = os.path.relpath(root, src)
dstroot = os.path.join(dst, relroot)
for d in dirs:
dstdir = os.path.join(dstroot, d)
if not os.path.isdir(dstdir):
os.mkdir(dstdir)
for f in files:
dstfile = os.path.join(dstroot, f)
if os.path.isfile(dstfile):
os.remove(dstfile)
shutil.copy2(os.path.join(root, f), dstfile)
def MoveAndMergeDirTree(src_dir, dest_dir):
"""Moves everything from a source directory to a destination directory.
This is different from shutil's move implementation in that it only operates
on directories, and if the destination directory exists, it will move the
contents into the directory and merge any existing directories.
Args:
src_dir: Source directory which files should be moved from.
dest_dir: Destination directory where files should be moved and merged to.
"""
if not os.path.isdir(src_dir):
raise OSError('MoveAndMergeDirTree can only operate on directories.')
if not os.path.exists(dest_dir):
# Simply move the directory over if destination doesn't exist.
MakeParentDirectoryIfAbsent(dest_dir)
os.rename(src_dir, dest_dir)
else:
# Merge each item if destination directory exists.
for dir_item in os.listdir(src_dir):
source_item = os.path.join(src_dir, dir_item)
destination_item = os.path.join(dest_dir, dir_item)
if os.path.exists(destination_item):
if os.path.isdir(destination_item) and os.path.isdir(source_item):
# Merge the sub-directories together if they are both directories.
MoveAndMergeDirTree(source_item, destination_item)
elif os.path.isfile(destination_item) and os.path.isfile(source_item):
# Overwrite the file if they are both files.
os.unlink(destination_item)
os.rename(source_item, destination_item)
else:
raise OSError('Cannot move directory tree, mismatching types.'
' Source - %s. Destination - %s' %
(source_item, destination_item))
else:
os.rename(source_item, destination_item)
# Remove the directory once all the contents have been moved
os.rmdir(src_dir)
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if platform.IsWindows():
count = 0
while True:
try:
op(*args)
break
except Exception:
sys.stdout.write('FAILED: %s %s\n' % (op.__name__, repr(args)))
count += 1
if count < 5:
sys.stdout.write('RETRY: %s %s\n' % (op.__name__, repr(args)))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def MoveDirCleanly(src, dst):
RemoveDir(dst)
MoveDir(src, dst)
def MoveDir(src, dst):
Retry(shutil.move, src, dst)
def RemoveDir(path):
if os.path.exists(path):
Retry(shutil.rmtree, path)
def RemoveFile(path):
if os.path.exists(path):
Retry(os.unlink, path)
|
py | 1a30c632e40550975854418757821a570b2a722c | __author__ = "Thomas Spycher, Philipp Spinnler"
__copyright__ = "Copyright 2013, Zerodine GmbH (zerodine.com) "
__credits__ = ["Thomas Spycher", "Philipp Spinnler"]
__license__ = "Apache-2.0"
__maintainer__ = "Thomas Spycher"
__email__ = "[email protected]"
__status__ = "Development"
from indexcontroller import IndexController
from lookupcontroller import LookupController
from addcontroller import AddController
from reconcontroller import ReconController |
py | 1a30c6b22bfdf091c8fb98c1228153fa497b8362 | class Solution:
def myAtoi(self, string: str) -> int:
string = string.strip()
ans = 0
sign = "+"
index = 0
if not string:
return 0
if string[0] != "+" and string[0] != "-" and string[0].isdigit() is False:
return 0
if string[0] == "-":
sign = "-"
index += 1
elif string[0] == "+":
index += 1
while index < len(string):
if string[index].isdigit() is False:
ans = ans if sign == "+" else - ans
if ans < -2 ** 31:
return -2 ** 31
elif ans > 2 ** 31 - 1:
return 2 ** 31 - 1
else:
return ans
ans = 10 * ans + int(string[index])
index += 1
ans = ans if sign == "+" else -ans
if ans < -2 ** 31:
return -2 ** 31
elif ans > 2 ** 31 - 1:
return 2 ** 31 - 1
else:
return ans
|
py | 1a30c6bd92450c13df17b6d8d04efe99dd7ce850 | """Command-line tool to find out where a particular chip or board resides.
The ``spalloc-where-is`` command allows you to query boards by coordinate, by
physical location, by chip or by job. In response to a query, a standard set of
information is displayed as shown in the example below::
$ spalloc-where-is --job-chip 24 14, 3
Machine: my-machine
Physical Location: Cabinet 2, Frame 4, Board 7
Board Coordinate: (3, 4, 0)
Machine Chip Coordinates: (38, 51)
Coordinates within board: (2, 3)
Job using board: 24
Coordinates within job: (14, 3)
In this example we ask, 'where is chip (14, 3) in job 24'? We discover that:
* The chip is the machine named 'my-machine' on the board in cabinet 2, frame
4, board 7.
* This board's logical board coordinates are (3, 4, 0). These logical
coordinates may be used to specifically request this board from Spalloc in
the future.
* If 'my-machine' were booted as a single large machine, the chip we queried
would be chip (38, 51). This may be useful for cross-referencing with
diagrams produced by SpiNNer_.
* The chip in question is chip (2, 3) its board. This may be useful when
reporting faulty chips/replacing boards..
* The job currently running on the board has ID 24. Obviously in this example
we already knew this but this may be useful when querying by board.
* Finally, we're told that the queried chip has the coordinates (14, 3) in the
machine allocated to job 24. Again, this information may be more useful when
querying by board.
.. _SpiNNer: https://github.com/SpiNNakerManchester/SpiNNer
To query by logical board coordinate::
spalloc-where-is --board MACHINE X Y Z
To query by physical board location::
spalloc-where-is --physical MACHINE CABINET FRAME BOARD
To query by chip coordinate (as if the machine were booted as one large
machine)::
spalloc-where-is --chip MACHINE X Y
To query by chip coordinate of chips allocated to a job::
spalloc-where-is --job-chip JOB_ID X Y
"""
import sys
import argparse
from collections import OrderedDict
from spalloc import config
from spalloc import __version__, ProtocolClient, ProtocolTimeoutError
from spalloc.term import render_definitions
# The acceptable range of server version numbers
VERSION_RANGE_START = (0, 3, 0)
VERSION_RANGE_STOP = (2, 0, 0)
def main(argv=None):
cfg = config.read_config()
parser = argparse.ArgumentParser(
description="Find out the location (physical or logical) "
"of a chip or board.")
parser.add_argument("--version", "-V", action="version",
version=__version__)
control_args = parser.add_mutually_exclusive_group(required=True)
control_args.add_argument("--board", "-b", "--logical", "-l", nargs=4,
metavar=("MACHINE", "X", "Y", "Z"),
help="specify the logical board coordinate")
control_args.add_argument("--physical", "-p", nargs=4,
metavar=("MACHINE", "CABINET", "FRAME", "BOARD"),
help="specify a board's physical location")
control_args.add_argument("--chip", "-c", nargs=3,
metavar=("MACHINE", "X", "Y"),
help="specify a board by chip coordinates (as "
"if the whole machine is being used)")
control_args.add_argument("--job-chip", "-j", nargs=3,
metavar=("JOB_ID", "X", "Y"),
help="specify the chip coordinates of a chip "
"within a job's boards")
server_args = parser.add_argument_group("spalloc server arguments")
server_args.add_argument("--hostname", "-H", default=cfg["hostname"],
help="hostname or IP of the spalloc server "
"(default: %(default)s)")
server_args.add_argument("--port", "-P", default=cfg["port"],
type=int,
help="port number of the spalloc server "
"(default: %(default)s)")
server_args.add_argument("--timeout", default=cfg["timeout"],
type=float, metavar="SECONDS",
help="seconds to wait for a response "
"from the server (default: %(default)s)")
args = parser.parse_args(argv)
# Fail if server not specified
if args.hostname is None:
parser.error("--hostname of spalloc server must be specified")
client = ProtocolClient(args.hostname, args.port)
try:
# Connect to server and ensure compatible version
client.connect()
version = tuple(
map(int, client.version(timeout=args.timeout).split(".")))
if not (VERSION_RANGE_START <= version < VERSION_RANGE_STOP):
sys.stderr.write("Incompatible server version ({}).\n".format(
".".join(map(str, version))))
return 2
# Work out what the user asked for
try:
show_board_chip = False
if args.board:
machine, x, y, z = args.board
where_is_kwargs = {
"machine": machine,
"x": int(x),
"y": int(y),
"z": int(z),
}
elif args.physical:
machine, c, f, b = args.physical
where_is_kwargs = {
"machine": machine,
"cabinet": int(c),
"frame": int(f),
"board": int(b),
}
elif args.chip:
machine, x, y = args.chip
where_is_kwargs = {
"machine": machine,
"chip_x": int(x),
"chip_y": int(y),
}
show_board_chip = True
elif args.job_chip:
job_id, x, y = args.job_chip
where_is_kwargs = {
"job_id": int(job_id),
"chip_x": int(x),
"chip_y": int(y),
}
show_board_chip = True
except ValueError as e:
parser.error("Error: {}".format(e))
# Ask the server
location = client.where_is(**where_is_kwargs)
if location is None:
sys.stderr.write("No boards at the specified location.\n")
return 4
else:
out = OrderedDict()
out["Machine"] = location["machine"]
out["Physical location"] = "Cabinet {}, Frame {}, Board {}".format(
*location["physical"])
out["Board coordinate"] = tuple(location["logical"])
out["Machine chip coordinates"] = tuple(location["chip"])
if show_board_chip:
out["Coordinates within board"] = tuple(location["board_chip"])
out["Job using board"] = location["job_id"]
if location["job_id"]:
out["Coordinates within job"] = tuple(location["job_chip"])
print(render_definitions(out))
return 0
except (IOError, OSError, ProtocolTimeoutError) as e:
sys.stderr.write("Error communicating with server: {}\n".format(e))
return 1
finally:
client.close()
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
|
py | 1a30c7911b5f3552849125ccf446ef0d8bd4b9b1 | # [1081] 不同字符的最小子序列
# https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters/description/
# * algorithms
# * Medium (53.88%)
# * Total Accepted: 6.7K
# * Total Submissions: 12.5K
# * Testcase Example: '"bcabc"'
# 返回字符串 text 中按字典序排列最小的子序列,该子序列包含 text 中所有不同字符一次。
#
# 示例 1:
# 输入:"cdadabcc"
# 输出:"adbc"
# 示例 2:
# 输入:"abcd"
# 输出:"abcd"
# 示例 3:
# 输入:"ecbacba"
# 输出:"eacb"
# 示例 4:
# 输入:"leetcode"
# 输出:"letcod"
#
# 提示:
# 1 <= text.length <= 1000
# text 由小写英文字母组成
#
# 注意:本题目与 316 https://leetcode-cn.com/problems/remove-duplicate-letters/ 相同
import collections
class Solution(object):
def smallestSubsequence(self, text):
seen = set()
stack = []
# 记录每个字母还可以删除几次,也可以保存每个字符最右边的位置用于判断
remain_counter = collections.Counter(text)
for c in text:
# 每个字母只能出现一次,之前出现过的,现在没有出现过的意义,这是一个单调递增的栈
if c not in seen:
# 栈顶太大了,而且后面还有
while stack and stack[-1] > c and remain_counter[stack[-1]] > 0:
seen.discard(stack.pop())
stack.append(c)
seen.add(c)
remain_counter[c] -= 1
return "".join(stack)
|
py | 1a30c85355b930c93afd047e14ad7652370579ee | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.transforms.config_validation."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import platform_parameter_domain as parameter_domain
from core.platform import models
from jobs import job_test_utils
from jobs.transforms import config_validation
from jobs.types import base_validation_errors
import apache_beam as beam
(base_models, config_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.config])
class ValidateConfigPropertySnapshotMetadataModelTests(
job_test_utils.PipelinedTestBase):
def test_validate_change_domain_implemented(self):
invalid_commit_cmd_model = (
config_models.ConfigPropertySnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation.ValidateConfigPropertySnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [])
def test_config_property_change_object_with_missing_cmd(self):
invalid_commit_cmd_model = (
config_models.ConfigPropertySnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{'invalid': 'data'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation.ValidateConfigPropertySnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'invalid': 'data'},
'Missing cmd key in change dict')
])
def test_config_property_change_object_with_invalid_cmd(self):
invalid_commit_cmd_model = (
config_models.ConfigPropertySnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{'cmd': 'invalid'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation.ValidateConfigPropertySnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'cmd': 'invalid'},
'Command invalid is not allowed')
])
def test_config_property_change_object_with_missing_attribute_in_cmd(self):
invalid_commit_cmd_model = (
config_models.ConfigPropertySnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{'cmd': 'change_property_value'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation.ValidateConfigPropertySnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'cmd': 'change_property_value'},
'The following required attributes are missing: '
'new_value')
])
def test_config_property_change_object_with_extra_attribute_in_cmd(self):
commit_dict = {
'cmd': 'change_property_value',
'new_value': 'new_value',
'invalid': 'invalid'
}
invalid_commit_cmd_model = (
config_models.ConfigPropertySnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[commit_dict])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation.ValidateConfigPropertySnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'The following extra attributes are present: invalid')
])
class ValidatePlatformParameterSnapshotMetadataModelTests(
job_test_utils.PipelinedTestBase):
CMD_EDIT_RULES = parameter_domain.PlatformParameterChange.CMD_EDIT_RULES
def test_validate_change_domain_implemented(self):
invalid_commit_cmd_model = (
config_models.PlatformParameterSnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation
.ValidatePlatformParameterSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [])
def test_param_change_object_with_missing_cmd_raises_exception(self):
invalid_commit_cmd_model = (
config_models.PlatformParameterSnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{'invalid': 'data'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation
.ValidatePlatformParameterSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'invalid': 'data'},
'Missing cmd key in change dict')
])
def test_param_change_object_with_invalid_cmd_raises_exception(self):
invalid_commit_cmd_model = (
config_models.PlatformParameterSnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{'cmd': 'invalid'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation
.ValidatePlatformParameterSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'cmd': 'invalid'},
'Command invalid is not allowed')
])
def test_param_change_object_missing_attribute_in_cmd_raises_exception(
self):
invalid_commit_cmd_model = (
config_models.PlatformParameterSnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{'cmd': self.CMD_EDIT_RULES}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation
.ValidatePlatformParameterSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'cmd': self.CMD_EDIT_RULES},
'The following required attributes are missing: new_rules')
])
def test_param_change_object_with_extra_attribute_in_cmd_raises_exception(
self):
commit_dict = {
'cmd': self.CMD_EDIT_RULES,
'new_rules': [],
'invalid': 'invalid'
}
invalid_commit_cmd_model = (
config_models.PlatformParameterSnapshotMetadataModel(
id='model_id-1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[commit_dict])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
config_validation
.ValidatePlatformParameterSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'The following extra attributes are present: invalid')
])
|
py | 1a30c87670ba473f0b83f3308ae6b632f3c91bc2 | #!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_autonomous_database_facts
short_description: Fetches details about one or multiple AutonomousDatabase resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple AutonomousDatabase resources in Oracle Cloud Infrastructure
- Gets a list of Autonomous Databases based on the query parameters specified.
- If I(autonomous_database_id) is specified, the details of a single AutonomousDatabase will be returned.
version_added: "2.9"
author: Oracle (@oracle)
options:
autonomous_database_id:
description:
- The database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to get a specific autonomous_database.
type: str
aliases: ["id"]
compartment_id:
description:
- The compartment L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to list multiple autonomous_databases.
type: str
autonomous_container_database_id:
description:
- The Autonomous Container Database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
type: str
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for TIMECREATED is descending. Default order for DISPLAYNAME
is ascending. The DISPLAYNAME sort order is case sensitive.
- "**Note:** If you do not include the availability domain filter, the resources are grouped by availability domain, then sorted."
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`).
type: str
choices:
- "ASC"
- "DESC"
infrastructure_type:
description:
- A filter to return only resources that match the given Infrastructure Type.
type: str
choices:
- "CLOUD"
- "CLOUD_AT_CUSTOMER"
lifecycle_state:
description:
- A filter to return only resources that match the given lifecycle state exactly.
type: str
choices:
- "PROVISIONING"
- "AVAILABLE"
- "STOPPING"
- "STOPPED"
- "STARTING"
- "TERMINATING"
- "TERMINATED"
- "UNAVAILABLE"
- "RESTORE_IN_PROGRESS"
- "RESTORE_FAILED"
- "BACKUP_IN_PROGRESS"
- "SCALE_IN_PROGRESS"
- "AVAILABLE_NEEDS_ATTENTION"
- "UPDATING"
- "MAINTENANCE_IN_PROGRESS"
- "RESTARTING"
- "RECREATING"
- "ROLE_CHANGE_IN_PROGRESS"
- "UPGRADING"
db_workload:
description:
- A filter to return only autonomous database resources that match the specified workload type.
type: str
choices:
- "OLTP"
- "DW"
- "AJD"
db_version:
description:
- A filter to return only autonomous database resources that match the specified dbVersion.
type: str
is_free_tier:
description:
- Filter on the value of the resource's 'isFreeTier' property. A value of `true` returns only Always Free resources.
A value of `false` excludes Always Free resources from the returned results. Omitting this parameter returns both Always Free and paid resources.
type: bool
display_name:
description:
- A filter to return only resources that match the entire display name given. The match is not case sensitive.
type: str
aliases: ["name"]
is_refreshable_clone:
description:
- Filter on the value of the resource's 'isRefreshableClone' property. A value of `true` returns only refreshable clones.
A value of `false` excludes refreshable clones from the returned results. Omitting this parameter returns both refreshable clones and databases
that are not refreshable clones.
type: bool
is_data_guard_enabled:
description:
- A filter to return only resources that have Data Guard enabled.
type: bool
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List autonomous_databases
oci_database_autonomous_database_facts:
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
- name: Get a specific autonomous_database
oci_database_autonomous_database_facts:
autonomous_database_id: ocid1.autonomousdatabase.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
autonomous_databases:
description:
- List of AutonomousDatabase resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Database.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
lifecycle_state:
description:
- The current state of the Autonomous Database.
returned: on success
type: string
sample: PROVISIONING
lifecycle_details:
description:
- Information about the current lifecycle state.
returned: on success
type: string
sample: lifecycle_details_example
db_name:
description:
- The database name.
returned: on success
type: string
sample: db_name_example
is_free_tier:
description:
- Indicates if this is an Always Free resource. The default value is false. Note that Always Free Autonomous Databases have 1 CPU and 20GB of
memory. For Always Free databases, memory and CPU cannot be scaled.
returned: on success
type: bool
sample: true
system_tags:
description:
- System tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
returned: on success
type: dict
sample: {}
time_reclamation_of_free_autonomous_database:
description:
- The date and time the Always Free database will be stopped because of inactivity. If this time is reached without any database activity, the
database will automatically be put into the STOPPED state.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_deletion_of_free_autonomous_database:
description:
- The date and time the Always Free database will be automatically deleted because of inactivity. If the database is in the STOPPED state and
without activity until this time, it will be deleted.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
backup_config:
description:
- ""
returned: on success
type: complex
contains:
manual_backup_bucket_name:
description:
- Name of L(Object Storage,https://docs.cloud.oracle.com/Content/Object/Concepts/objectstorageoverview.htm) bucket to use for storing
manual backups.
returned: on success
type: string
sample: manual_backup_bucket_name_example
manual_backup_type:
description:
- The manual backup destination type.
returned: on success
type: string
sample: NONE
cpu_core_count:
description:
- The number of OCPU cores to be made available to the database.
returned: on success
type: int
sample: 56
data_storage_size_in_tbs:
description:
- The quantity of data in the database, in terabytes.
returned: on success
type: int
sample: 56
infrastructure_type:
description:
- The infrastructure type this resource belongs to.
returned: on success
type: string
sample: CLOUD
is_dedicated:
description:
- True if the database uses L(dedicated Exadata infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm).
returned: on success
type: bool
sample: true
autonomous_container_database_id:
description:
- The Autonomous Container Database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
returned: on success
type: string
sample: ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- The date and time the Autonomous Database was created.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
display_name:
description:
- The user-friendly name for the Autonomous Database. The name does not have to be unique.
returned: on success
type: string
sample: display_name_example
service_console_url:
description:
- The URL of the Service Console for the Autonomous Database.
returned: on success
type: string
sample: service_console_url_example
connection_strings:
description:
- The connection string used to connect to the Autonomous Database. The username for the Service Console is ADMIN. Use the password you entered
when creating the Autonomous Database for the password value.
returned: on success
type: complex
contains:
high:
description:
- The High database service provides the highest level of resources to each SQL statement resulting in the highest performance, but
supports the fewest number of concurrent SQL statements.
returned: on success
type: string
sample: high_example
medium:
description:
- The Medium database service provides a lower level of resources to each SQL statement potentially resulting a lower level of
performance, but supports more concurrent SQL statements.
returned: on success
type: string
sample: medium_example
low:
description:
- The Low database service provides the least level of resources to each SQL statement, but supports the most number of concurrent SQL
statements.
returned: on success
type: string
sample: low_example
dedicated:
description:
- The database service provides the least level of resources to each SQL statement, but supports the most number of concurrent SQL
statements.
returned: on success
type: string
sample: dedicated_example
all_connection_strings:
description:
- Returns all connection strings that can be used to connect to the Autonomous Database.
For more information, please see L(Predefined Database Service Names for Autonomous Transaction
Processing,https://docs.oracle.com/en/cloud/paas/atp-cloud/atpug/connect-predefined.html#GUID-9747539B-FD46-44F1-8FF8-F5AC650F15BE)
returned: on success
type: dict
sample: {}
connection_urls:
description:
- ""
returned: on success
type: complex
contains:
sql_dev_web_url:
description:
- Oracle SQL Developer Web URL.
returned: on success
type: string
sample: sql_dev_web_url_example
apex_url:
description:
- Oracle Application Express (APEX) URL.
returned: on success
type: string
sample: apex_url_example
machine_learning_user_management_url:
description:
- Oracle Machine Learning user management URL.
returned: on success
type: string
sample: machine_learning_user_management_url_example
license_model:
description:
- The Oracle license model that applies to the Oracle Autonomous Database. Bring your own license (BYOL) allows you to apply your current on-
premises Oracle software licenses to equivalent, highly automated Oracle PaaS and IaaS services in the cloud.
License Included allows you to subscribe to new Oracle Database software licenses and the Database service.
Note that when provisioning an Autonomous Database on L(dedicated Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm), this attribute must be null because the attribute
is already set at the
Autonomous Exadata Infrastructure level. When using L(shared Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI), if a value is not specified, the system will
supply the value of `BRING_YOUR_OWN_LICENSE`.
returned: on success
type: string
sample: LICENSE_INCLUDED
used_data_storage_size_in_tbs:
description:
- The amount of storage that has been used, in terabytes.
returned: on success
type: int
sample: 56
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
subnet_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.
- "**Subnet Restrictions:**
- For bare metal DB systems and for single node virtual machine DB systems, do not use a subnet that overlaps with 192.168.16.16/28.
- For Exadata and virtual machine 2-node RAC systems, do not use a subnet that overlaps with 192.168.128.0/20.
- For Autonomous Database, setting this will disable public secure access to the database."
- These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet.
returned: on success
type: string
sample: ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx
nsg_ids:
description:
- "A list of the L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network security groups (NSGs) that this
resource belongs to. Setting this to an empty array after the list is created removes the resource from all NSGs. For more information about
NSGs, see L(Security Rules,https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).
**NsgIds restrictions:**
- Autonomous Databases with private access require at least 1 Network Security Group (NSG). The nsgIds array cannot be empty."
returned: on success
type: list
sample: []
private_endpoint:
description:
- The private endpoint for the resource.
returned: on success
type: string
sample: private_endpoint_example
private_endpoint_label:
description:
- The private endpoint label for the resource. Setting this to an empty string, after the private endpoint database gets created, will change
the same private endpoint database to the public endpoint database.
returned: on success
type: string
sample: private_endpoint_label_example
private_endpoint_ip:
description:
- The private endpoint Ip address for the resource.
returned: on success
type: string
sample: private_endpoint_ip_example
db_version:
description:
- A valid Oracle Database version for Autonomous Database.
returned: on success
type: string
sample: db_version_example
is_preview:
description:
- Indicates if the Autonomous Database version is a preview version.
returned: on success
type: bool
sample: true
db_workload:
description:
- "The Autonomous Database workload type. The following values are valid:"
- "- OLTP - indicates an Autonomous Transaction Processing database
- DW - indicates an Autonomous Data Warehouse database
- AJD - indicates an Autonomous JSON Database"
returned: on success
type: string
sample: OLTP
is_access_control_enabled:
description:
- Indicates if the database-level access control is enabled.
If disabled, database access is defined by the network security rules.
If enabled, database access is restricted to the IP addresses defined by the rules specified with the `whitelistedIps` property. While
specifying `whitelistedIps` rules is optional,
if database-level access control is enabled and no rules are specified, the database will become inaccessible. The rules can be added later
using the `UpdateAutonomousDatabase` API operation or edit option in console.
When creating a database clone, the desired access control setting should be specified. By default, database-level access control will be
disabled for the clone.
- This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform.
returned: on success
type: bool
sample: true
whitelisted_ips:
description:
- The client IP access control list (ACL). This feature is available for autonomous databases on L(shared Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.
- "For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `[\\"1.1.1.1\\",\\"1.1.1.0/24\\",\\"ocid1.vcn.oc1.sea.<unique_id>\\",\\"ocid1.vcn.oc1.sea.<unique_id1>;1.1.1.1\\",\\"ocid1.vcn.oc1.se
a.<unique_id2>;1.1.0.0/16\\"]`
For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `[\\"1.1.1.1\\",\\"1.1.1.0/24\\",\\"1.1.2.25\\"]`"
- For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry.
returned: on success
type: list
sample: []
is_auto_scaling_enabled:
description:
- Indicates if auto scaling is enabled for the Autonomous Database CPU core count.
returned: on success
type: bool
sample: true
data_safe_status:
description:
- Status of the Data Safe registration for this Autonomous Database.
returned: on success
type: string
sample: REGISTERING
operations_insights_status:
description:
- Status of Operations Insights for this Autonomous Database.
returned: on success
type: string
sample: ENABLING
time_maintenance_begin:
description:
- The date and time when maintenance will begin.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_maintenance_end:
description:
- The date and time when maintenance will end.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
is_refreshable_clone:
description:
- Indicates whether the Autonomous Database is a refreshable clone.
returned: on success
type: bool
sample: true
time_of_last_refresh:
description:
- The date and time when last refresh happened.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_of_last_refresh_point:
description:
- The refresh point timestamp (UTC). The refresh point is the time to which the database was most recently refreshed. Data created after the
refresh point is not included in the refresh.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_of_next_refresh:
description:
- The date and time of next refresh.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
open_mode:
description:
- The `DATABASE OPEN` mode. You can open the database in `READ_ONLY` or `READ_WRITE` mode.
returned: on success
type: string
sample: READ_ONLY
refreshable_status:
description:
- The refresh status of the clone. REFRESHING indicates that the clone is currently being refreshed with data from the source Autonomous
Database.
returned: on success
type: string
sample: REFRESHING
refreshable_mode:
description:
- The refresh mode of the clone. AUTOMATIC indicates that the clone is automatically being refreshed with data from the source Autonomous
Database.
returned: on success
type: string
sample: AUTOMATIC
source_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source Autonomous Database that was cloned to create
the current Autonomous Database.
returned: on success
type: string
sample: ocid1.source.oc1..xxxxxxEXAMPLExxxxxx
permission_level:
description:
- The Autonomous Database permission level. Restricted mode allows access only to admin users.
returned: on success
type: string
sample: RESTRICTED
time_of_last_switchover:
description:
- The timestamp of the last switchover operation for the Autonomous Database.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_of_last_failover:
description:
- The timestamp of the last failover operation.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
is_data_guard_enabled:
description:
- Indicates whether the Autonomous Database has Data Guard enabled.
returned: on success
type: bool
sample: true
failed_data_recovery_in_seconds:
description:
- Indicates the number of seconds of data loss for a Data Guard failover.
returned: on success
type: int
sample: 56
standby_db:
description:
- ""
returned: on success
type: complex
contains:
lag_time_in_seconds:
description:
- The amount of time, in seconds, that the data of the standby database lags the data of the primary database. Can be used to determine
the potential data loss in the event of a failover.
returned: on success
type: int
sample: 56
lifecycle_state:
description:
- The current state of the Autonomous Database.
returned: on success
type: string
sample: PROVISIONING
lifecycle_details:
description:
- Additional information about the current lifecycle state.
returned: on success
type: string
sample: lifecycle_details_example
available_upgrade_versions:
description:
- List of Oracle Database versions available for a database upgrade. If there are no version upgrades available, this list is empty.
returned: on success
type: list
sample: []
key_store_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the key store.
returned: on success
type: string
sample: ocid1.keystore.oc1..xxxxxxEXAMPLExxxxxx
key_store_wallet_name:
description:
- The wallet name for Oracle Key Vault.
returned: on success
type: string
sample: key_store_wallet_name_example
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "PROVISIONING",
"lifecycle_details": "lifecycle_details_example",
"db_name": "db_name_example",
"is_free_tier": true,
"system_tags": {},
"time_reclamation_of_free_autonomous_database": "2013-10-20T19:20:30+01:00",
"time_deletion_of_free_autonomous_database": "2013-10-20T19:20:30+01:00",
"backup_config": {
"manual_backup_bucket_name": "manual_backup_bucket_name_example",
"manual_backup_type": "NONE"
},
"cpu_core_count": 56,
"data_storage_size_in_tbs": 56,
"infrastructure_type": "CLOUD",
"is_dedicated": true,
"autonomous_container_database_id": "ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"display_name": "display_name_example",
"service_console_url": "service_console_url_example",
"connection_strings": {
"high": "high_example",
"medium": "medium_example",
"low": "low_example",
"dedicated": "dedicated_example",
"all_connection_strings": {}
},
"connection_urls": {
"sql_dev_web_url": "sql_dev_web_url_example",
"apex_url": "apex_url_example",
"machine_learning_user_management_url": "machine_learning_user_management_url_example"
},
"license_model": "LICENSE_INCLUDED",
"used_data_storage_size_in_tbs": 56,
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx",
"nsg_ids": [],
"private_endpoint": "private_endpoint_example",
"private_endpoint_label": "private_endpoint_label_example",
"private_endpoint_ip": "private_endpoint_ip_example",
"db_version": "db_version_example",
"is_preview": true,
"db_workload": "OLTP",
"is_access_control_enabled": true,
"whitelisted_ips": [],
"is_auto_scaling_enabled": true,
"data_safe_status": "REGISTERING",
"operations_insights_status": "ENABLING",
"time_maintenance_begin": "2013-10-20T19:20:30+01:00",
"time_maintenance_end": "2013-10-20T19:20:30+01:00",
"is_refreshable_clone": true,
"time_of_last_refresh": "2013-10-20T19:20:30+01:00",
"time_of_last_refresh_point": "2013-10-20T19:20:30+01:00",
"time_of_next_refresh": "2013-10-20T19:20:30+01:00",
"open_mode": "READ_ONLY",
"refreshable_status": "REFRESHING",
"refreshable_mode": "AUTOMATIC",
"source_id": "ocid1.source.oc1..xxxxxxEXAMPLExxxxxx",
"permission_level": "RESTRICTED",
"time_of_last_switchover": "2013-10-20T19:20:30+01:00",
"time_of_last_failover": "2013-10-20T19:20:30+01:00",
"is_data_guard_enabled": true,
"failed_data_recovery_in_seconds": 56,
"standby_db": {
"lag_time_in_seconds": 56,
"lifecycle_state": "PROVISIONING",
"lifecycle_details": "lifecycle_details_example"
},
"available_upgrade_versions": [],
"key_store_id": "ocid1.keystore.oc1..xxxxxxEXAMPLExxxxxx",
"key_store_wallet_name": "key_store_wallet_name_example"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database import DatabaseClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AutonomousDatabaseFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"autonomous_database_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_autonomous_database,
autonomous_database_id=self.module.params.get("autonomous_database_id"),
)
def list_resources(self):
optional_list_method_params = [
"autonomous_container_database_id",
"sort_by",
"sort_order",
"infrastructure_type",
"lifecycle_state",
"db_workload",
"db_version",
"is_free_tier",
"display_name",
"is_refreshable_clone",
"is_data_guard_enabled",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_autonomous_databases,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
AutonomousDatabaseFactsHelperCustom = get_custom_class(
"AutonomousDatabaseFactsHelperCustom"
)
class ResourceFactsHelper(
AutonomousDatabaseFactsHelperCustom, AutonomousDatabaseFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
autonomous_database_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
autonomous_container_database_id=dict(type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
infrastructure_type=dict(
type="str", choices=["CLOUD", "CLOUD_AT_CUSTOMER"]
),
lifecycle_state=dict(
type="str",
choices=[
"PROVISIONING",
"AVAILABLE",
"STOPPING",
"STOPPED",
"STARTING",
"TERMINATING",
"TERMINATED",
"UNAVAILABLE",
"RESTORE_IN_PROGRESS",
"RESTORE_FAILED",
"BACKUP_IN_PROGRESS",
"SCALE_IN_PROGRESS",
"AVAILABLE_NEEDS_ATTENTION",
"UPDATING",
"MAINTENANCE_IN_PROGRESS",
"RESTARTING",
"RECREATING",
"ROLE_CHANGE_IN_PROGRESS",
"UPGRADING",
],
),
db_workload=dict(type="str", choices=["OLTP", "DW", "AJD"]),
db_version=dict(type="str"),
is_free_tier=dict(type="bool"),
display_name=dict(aliases=["name"], type="str"),
is_refreshable_clone=dict(type="bool"),
is_data_guard_enabled=dict(type="bool"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="autonomous_database",
service_client_class=DatabaseClient,
namespace="database",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(autonomous_databases=result)
if __name__ == "__main__":
main()
|
py | 1a30c8c07446a29b3131db3c5e0363a440ade735 | import json
import os
from unittest import TestCase, mock
from climacell.api import Client, Measurement, Response, Error
from climacell.fields import (
FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY,
FIELD_WIND_SPEED, FIELD_WIND_GUST, FIELD_WIND_DIRECTION,
FIELD_SUNRISE, FIELD_SUNSET,
)
from climacell.utils import join_fields
ERROR_FILE = os.path.dirname(__file__) + '/data/error_example.json'
HOURLY_FILE = os.path.dirname(__file__) + '/data/hourly_example.json'
DAILY_FILE = os.path.dirname(__file__) + '/data/daily_example.json'
NOWCAST_FILE = os.path.dirname(__file__) + '/data/nowcast_example.json'
class MockResponse:
def __init__(self, data, status_code):
self.data = data
self.status_code = status_code
def json(self):
return self.data
def mock_requests_get(*args, **kwargs):
base_url = 'https://api.climacell.co/v3'
if args[0] is None:
return MockResponse(None, 404)
elif args[0] == base_url + '/weather/forecast/hourly':
file = HOURLY_FILE
elif args[0] == base_url + '/weather/nowcast':
file = NOWCAST_FILE
elif args[0] == base_url + '/weather/forecast/daily':
file = DAILY_FILE
with open(file) as json_file:
data = json.load(json_file)
return MockResponse(data, 200)
class TestMeasurement(TestCase):
def test_measurement__str__(self):
m = Measurement('temp', 13.04, 'C', '2021-01-14T21:00:00.000Z')
self.assertEqual('temp: 13.04 C at 2021-01-14 21:00:00+00:00', str(m))
m = Measurement('temp', 13.04, None, '2021-01-14T21:00:00.000Z')
self.assertEqual('temp: 13.04 at 2021-01-14 21:00:00+00:00', str(m))
class TestResponse(TestCase):
def test_get_measurements_error(self):
with open(ERROR_FILE) as f:
data = json.load(f)
mock_response = MockResponse(data, 400)
response = Response(mock_response, [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY])
self.assertTrue(response.has_error)
error = response.get_measurements()
self.assertTrue(isinstance(error, Error))
class TestError(TestCase):
def test_error(self):
with open(ERROR_FILE) as f:
data = json.load(f)
error = Error(data)
self.assertEqual(400, error.status_code)
self.assertEqual('Message body content not allowed.', error.message)
self.assertEqual('BadRequest', error.code)
def test_error_str(self):
with open(ERROR_FILE) as f:
data = json.load(f)
error = Error(data)
expected_str = 'BadRequest (400): Message body content not allowed.'
self.assertEqual(expected_str, str(error))
class TestClient(TestCase):
@mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)
def test_hourly(self, mock_get):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]
response = client.hourly(lat=lat, lon=lon, fields=fields)
measurements = response.get_measurements()
expected_params = {
'lat': 52.446023244274045,
'lon': 4.819207798979252,
'start_time': 'now',
'unit_system': 'si',
'fields': join_fields([FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]),
}
mock_get.assert_called_with(
'https://api.climacell.co/v3/weather/forecast/hourly',
params=expected_params,
headers={'apikey': 'apikey'}
)
self.assertEqual(6, len(measurements))
def test_hourly_invalid_start_time(self):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]
start_time = 'yesterday'
self.assertRaises(
ValueError, client.hourly, lat, lon, fields, start_time
)
def test_hourly_invalid_end_time(self):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]
start_time = 'now'
end_time = 'tomorrow'
self.assertRaises(
ValueError, client.hourly, lat, lon, fields, start_time, end_time
)
@mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)
def test_hourly_valid_end_time(self, mock_get):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]
start_time = 'now'
end_time = '2021-01-14T21:00:00.000Z'
response = client.hourly(lat, lon, fields, start_time, end_time)
self.assertFalse(response.has_error)
expected_params = {
'lat': 52.446023244274045,
'lon': 4.819207798979252,
'start_time': 'now',
'unit_system': 'si',
'fields': join_fields([FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]),
'end_time': '2021-01-14T21:00:00.000Z',
}
mock_get.assert_called_with(
'https://api.climacell.co/v3/weather/forecast/hourly',
params=expected_params,
headers={'apikey': 'apikey'}
)
@mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)
def test_nowcast(self, mock_get):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
timestep = 30
fields = [
FIELD_TEMP,
FIELD_DEW_POINT,
FIELD_HUMIDITY,
FIELD_WIND_SPEED,
FIELD_WIND_GUST,
FIELD_WIND_DIRECTION,
FIELD_SUNRISE,
FIELD_SUNSET,
]
response = client.nowcast(lat=lat, lon=lon, fields=fields, timestep=timestep)
measurements = response.get_measurements()
expected_params = {
'lat': 52.446023244274045,
'lon': 4.819207798979252,
'timestep': 30,
'start_time': 'now',
'unit_system': 'si',
'fields': join_fields(fields),
}
mock_get.assert_called_with(
'https://api.climacell.co/v3/weather/nowcast',
params=expected_params,
headers={'apikey': 'apikey'}
)
# 13 timesteps, 8 measurements per timestep
self.assertEqual(13 * 8, len(measurements))
@mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)
def test_nowcast_valid_end_time(self, mock_get):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
timestep = 30
fields = [FIELD_TEMP]
start_time = 'now'
end_time = '2021-01-14T21:00:00.000Z'
response = client.nowcast(
lat=lat, lon=lon, fields=fields, timestep=timestep,
start_time=start_time, end_time=end_time
)
self.assertFalse(response.has_error)
expected_params = {
'lat': 52.446023244274045,
'lon': 4.819207798979252,
'timestep': 30,
'start_time': 'now',
'unit_system': 'si',
'fields': join_fields(fields),
'end_time': end_time
}
mock_get.assert_called_with(
'https://api.climacell.co/v3/weather/nowcast',
params=expected_params,
headers={'apikey': 'apikey'}
)
def test_nowcast_invalid_start_time(self):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
timestep = 30
fields = [FIELD_TEMP]
start_time = 'yesterday'
self.assertRaises(
ValueError, client.nowcast, lat, lon, fields, timestep, start_time
)
def test_nowcast_invalid_end_time(self):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
timestep = 30
fields = [FIELD_TEMP]
start_time = 'now'
end_time = 'tomorrow'
self.assertRaises(
ValueError, client.nowcast, lat, lon, fields,
timestep, start_time, end_time
)
@mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)
def test_daily(self, mock_get):
client = Client('apikey')
lat = 52.446023244274045
lon = 4.819207798979252
fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]
response = client.daily(lat=lat, lon=lon, fields=fields)
measurements = response.get_measurements()
expected_params = {
'lat': 52.446023244274045,
'lon': 4.819207798979252,
'start_time': 'now',
'unit_system': 'si',
'fields': join_fields([FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]),
}
mock_get.assert_called_with(
'https://api.climacell.co/v3/weather/forecast/daily',
params=expected_params,
headers={'apikey': 'apikey'}
)
self.assertEqual(6, len(measurements))
|
py | 1a30c9835e32ccd041b586efe95296a87922e426 | import re
from django.db.models import query, Min, F
from django.views.generic import DetailView, ListView
from django.views.generic.edit import UpdateView
from django.urls import reverse
from django.shortcuts import redirect
from django.utils.http import is_safe_url
from django.http import QueryDict
from django.conf import settings
from .views import get_font
from .models import Notes, Source, Target
from .filters import NotesFilter, SourceFilter
class SourceListView(ListView):
model = Source
context_object_name = 'source'
template_name = 'lexicon/list_source.html'
paginate_by = 100
def get_queryset(self):
qs = super().get_queryset() # .values('id', 'token', 'lemma', 'morph', 'strongs_no_prefix', 'book', 'chapter', 'verse', 'notes')
filtered = SourceFilter(self.request.GET, queryset=qs)
return filtered.qs.distinct()[:100]
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['filter'] = SourceFilter(self.request.GET)
return context
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['filter'] = SourceFilter(self.request.GET)
query_dict = QueryDict(mutable=True)
for key,val in self.request.GET.items():
# removing page here, might have to add pagination to this view in its own right
if val and key != 'page':
query_dict[key] = val
context['query_dict'] = query_dict
context['base_page'] = reverse('navigate_source') + '?' + query_dict.urlencode()
return context
class NavigateSource(SourceListView):
paginate_by = 1
template_name = 'lexicon/navigate_source.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
qs = self.get_queryset()
# (<django.core.paginator.Paginator object at 0x7f1a605f99a0>, <Page 2 of 4>, <QuerySet [ix79: figs-euphemism]>, True)
paginator = self.paginate_queryset(qs, self.get_paginate_by(qs))
try:
# gets the first element of the queryset of the selected page
occurrence = paginator[2].first()
except:
# a fallback to show something, it will display the wrong text though (but the right note)
occurrence = qs.first()
context['occurrence'] = occurrence
# context['source'] = Source.objects.filter(book=occurrence['book'], chapter=occurrence['chapter'], verse=occurrence['verse'])
context['source'] = Source.objects.filter(book=occurrence.book, chapter=occurrence.chapter, verse=occurrence.verse)
context['target'] = Target.objects.filter(book=occurrence.book, chapter=occurrence.chapter, verse=occurrence.verse)
book_nr = int(occurrence.book.split('-')[0])
if book_nr > 40:
font = 'gk'
else:
font = 'hb'
context['font'] = font
# make sure you pass the GET parameters along
query_dict = QueryDict(mutable=True)
for key,val in self.request.GET.items():
if val and key != 'page':
query_dict[key] = val
context['query_dict'] = query_dict
context['url'] = reverse('navigate_source') + '?' + query_dict.urlencode()
context['base_page'] = reverse('list_source') + '?' + query_dict.urlencode()
# prepare some nagivation
page = paginator[1]
if page.has_previous():
context['previous_page'] = page.previous_page_number()
if page.has_next():
context['next_page'] = page.next_page_number()
return context
class NotesListView(ListView):
model = Notes
context_object_name = 'notes'
template_name = 'lexicon/list_notes.html'
paginate_by = 100
def get_queryset(self):
qs = super().get_queryset().annotate(min_source=Min('source__id')).distinct().order_by(F('min_source').asc(nulls_last=True))
filtered = NotesFilter(self.request.GET, queryset=qs)
return filtered.qs.distinct()
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['filter'] = NotesFilter(self.request.GET)
query_dict = QueryDict(mutable=True)
for key,val in self.request.GET.items():
# removing page here, might have to add pagination to this view in its own right
if val and key != 'page':
query_dict[key] = val
context['base_page'] = reverse('navigate_notes') + '?' + query_dict.urlencode()
return context
class NavigateNotes(NotesListView):
paginate_by = 1
template_name = 'lexicon/navigate_notes.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
'''
Here's the magic:
Although this is a LIST View it really is used to only display a SINGLE object
because it is paginated by 1.
This is done so the entire queryset can be filtered by the user and the user can then
go through each item in said queryset.
This means that we need to add the actual information based on the paginated queryset here
and not just the basic queryset.
'''
qs = self.get_queryset()
# (<django.core.paginator.Paginator object at 0x7f1a605f99a0>, <Page 2 of 4>, <QuerySet [ix79: figs-euphemism]>, True)
paginator = self.paginate_queryset(qs, self.get_paginate_by(qs))
try:
# gets the first element of the queryset of the selected page
note = paginator[2].first()
except:
# a fallback to show something, it will display the wrong text though (but the right note)
note = qs.first()
context['source'] = Source.objects.filter(book=note.book, chapter=note.chapter, verse=note.verse)
book_nr = int(note.book.split('-')[0])
if book_nr > 40:
font = 'gk'
else:
font = 'hb'
context['font'] = font
context['source'].first().strongs_no_prefix
context['target'] = Target.objects.filter(book=note.book, chapter=note.chapter, verse=note.verse)
# make sure you pass the GET parameters along
query_dict = QueryDict(mutable=True)
for key,val in self.request.GET.items():
if val and key != 'page':
query_dict[key] = val
context['url'] = reverse('navigate_notes') + '?' + query_dict.urlencode()
context['base_page'] = reverse('list_notes') + '?' + query_dict.urlencode()
# prepare some nagivation
page = paginator[1]
if page.has_previous():
context['previous_page'] = page.previous_page_number()
if page.has_next():
context['next_page'] = page.next_page_number()
return context
class NotesDetailView(DetailView):
model = Notes
pk_url_kwarg = 'index'
context_object_name = 'note'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
note = self.object
context['source'] = Source.objects.filter(book=note.book, chapter=note.chapter, verse=note.verse)
book_nr = int(note.book.split('-')[0])
if book_nr > 40:
font = 'gk'
else:
font = 'hb'
context['font'] = font
context['source'].first().strongs_no_prefix
context['target'] = Target.objects.filter(book=note.book, chapter=note.chapter, verse=note.verse)
context['previous_note'] = Notes.objects.filter(index__lt=note.index).order_by('-index').first()
context['next_note'] = Notes.objects.filter(index__gt=note.index).order_by('index').first()
return context
class NotesUpdateView(UpdateView):
model = Notes
pk_url_kwarg = 'index'
context_object_name = 'note'
fields = 'supportreference annotation sourceword sourcewordoccurrence'.split()
def get_success_url(self):
next_url = self.request.GET.get('next', None)
# do not accept any url
if is_safe_url(next_url, allowed_hosts=settings.ALLOWED_HOSTS):
return next_url |
py | 1a30ca4d0ebc2232b34fda07933d3d20d520defa | numbers = list()
while True:
num = int(input('Insert a number: '))
numbers.append(num)
cont = str(input('Do you want to continue? [y/n]: ')).lower().strip()[0]
while cont not in 'yn':
cont = str(input('Do you want to continue? [y/n]: ')).lower().strip()[0]
if cont == 'n':
break
print(f'You inserted a total of {len(numbers)} numbers.')
print(f'The numbers, in descending order, are: {sorted(numbers, reverse=True)}.')
if 5 in numbers:
print(f'The number 5 appear {numbers.count(5)} times.')
else:
print('The number 5 don\'t appear in the list.')
|
py | 1a30cad8a194e64f268edd1e1c6065f329126e9e | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import collections
from typing import Any, Iterable, cast, DefaultDict, TYPE_CHECKING, FrozenSet
from numpy import sqrt
from cirq import devices, ops, circuits, value
from cirq.devices.grid_qubit import GridQubit
from cirq.ops import raw_types
from cirq.value import Duration
from cirq.neutral_atoms import convert_to_neutral_atom_gates
if TYPE_CHECKING:
import cirq
def _subgate_if_parallel_gate(gate: 'cirq.Gate') -> 'cirq.Gate':
"""Returns gate.sub_gate if gate is a ParallelGate, else returns gate"""
return gate.sub_gate if isinstance(gate, ops.ParallelGate) else gate
def neutral_atom_gateset(max_parallel_z=None, max_parallel_xy=None):
return ops.Gateset(
ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CZPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),
ops.ParallelGateFamily(ops.ZPowGate, max_parallel_allowed=max_parallel_z),
ops.ParallelGateFamily(ops.XPowGate, max_parallel_allowed=max_parallel_xy),
ops.ParallelGateFamily(ops.YPowGate, max_parallel_allowed=max_parallel_xy),
ops.ParallelGateFamily(ops.PhasedXPowGate, max_parallel_allowed=max_parallel_xy),
ops.MeasurementGate,
ops.IdentityGate,
unroll_circuit_op=False,
accept_global_phase_op=False,
)
@value.value_equality
class NeutralAtomDevice(devices.Device):
"""A device with qubits placed on a grid."""
def __init__(
self,
measurement_duration: 'cirq.DURATION_LIKE',
gate_duration: 'cirq.DURATION_LIKE',
control_radius: float,
max_parallel_z: int,
max_parallel_xy: int,
max_parallel_c: int,
qubits: Iterable[GridQubit],
) -> None:
"""Initializes the description of the AQuA device.
Args:
measurement_duration: the maximum duration of a measurement.
gate_duration: the maximum duration of a gate
control_radius: the maximum distance between qubits for a controlled
gate. Distance is measured in units of the indices passed into
the GridQubit constructor.
max_parallel_z: The maximum number of qubits that can be acted on
in parallel by a Z gate
max_parallel_xy: The maximum number of qubits that can be acted on
in parallel by a local XY gate
max_parallel_c: the maximum number of qubits that can be acted on in
parallel by a controlled gate. Must be less than or equal to the
lesser of max_parallel_z and max_parallel_xy
qubits: Qubits on the device, identified by their x, y location.
Must be of type GridQubit
Raises:
ValueError: if the wrong qubit type is provided or if invalid
parallel parameters are provided
"""
self._measurement_duration = Duration(measurement_duration)
self._gate_duration = Duration(gate_duration)
self._control_radius = control_radius
self._max_parallel_z = max_parallel_z
self._max_parallel_xy = max_parallel_xy
if max_parallel_c > min(max_parallel_z, max_parallel_xy):
raise ValueError(
"max_parallel_c must be less than or equal to the"
"min of max_parallel_z and max_parallel_xy"
)
self._max_parallel_c = max_parallel_c
self.xy_gateset_all_allowed = ops.Gateset(
ops.ParallelGateFamily(ops.XPowGate),
ops.ParallelGateFamily(ops.YPowGate),
ops.ParallelGateFamily(ops.PhasedXPowGate),
unroll_circuit_op=False,
accept_global_phase_op=False,
)
self.controlled_gateset = ops.Gateset(
ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CZPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),
unroll_circuit_op=False,
accept_global_phase_op=False,
)
self.gateset = neutral_atom_gateset(max_parallel_z, max_parallel_xy)
for q in qubits:
if not isinstance(q, GridQubit):
raise ValueError(f'Unsupported qubit type: {q!r}')
self.qubits = frozenset(qubits)
def qubit_set(self) -> FrozenSet['cirq.GridQubit']:
return self.qubits
def qubit_list(self):
return [qubit for qubit in self.qubits]
def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:
return convert_to_neutral_atom_gates.ConvertToNeutralAtomGates().convert(operation)
def duration_of(self, operation: ops.Operation):
"""Provides the duration of the given operation on this device.
Args:
operation: the operation to get the duration of
Returns:
The duration of the given operation on this device
Raises:
ValueError: If the operation provided doesn't correspond to a native
gate
"""
self.validate_operation(operation)
if isinstance(operation, (ops.GateOperation, ops.ParallelGateOperation)):
if isinstance(operation.gate, ops.MeasurementGate):
return self._measurement_duration
return self._gate_duration
def validate_gate(self, gate: ops.Gate):
"""Raises an error if the provided gate isn't part of the native gate set.
Args:
gate: the gate to validate
Raises:
ValueError: If the given gate is not part of the native gate set.
"""
if gate not in self.gateset:
if isinstance(gate, (ops.CNotPowGate, ops.CZPowGate, ops.CCXPowGate, ops.CCZPowGate)):
raise ValueError('controlled gates must have integer exponents')
raise ValueError(f'Unsupported gate: {gate!r}')
def validate_operation(self, operation: ops.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: the operation to validate
Raises:
ValueError: If the operation is not valid
"""
if not isinstance(operation, (ops.GateOperation, ops.ParallelGateOperation)):
raise ValueError(f'Unsupported operation: {operation!r}')
# All qubits the operation acts on must be on the device
for q in operation.qubits:
if q not in self.qubits:
raise ValueError(f'Qubit not on device: {q!r}')
if operation not in self.gateset and not (
operation in self.xy_gateset_all_allowed and len(operation.qubits) == len(self.qubits)
):
raise ValueError(f'Unsupported operation: {operation!r}')
if operation in self.controlled_gateset:
if len(operation.qubits) > self._max_parallel_c:
raise ValueError(
'Too many qubits acted on in parallel by a controlled gate operation'
)
for p in operation.qubits:
for q in operation.qubits:
if self.distance(p, q) > self._control_radius:
raise ValueError(f"Qubits {p!r}, {q!r} are too far away")
def validate_moment(self, moment: ops.Moment):
"""Raises an error if the given moment is invalid on this device.
Args:
moment: The moment to validate
Raises:
ValueError: If the given moment is invalid
"""
super().validate_moment(moment)
CATEGORIES = {
'Z': (ops.ZPowGate,),
'XY': (
ops.XPowGate,
ops.YPowGate,
ops.PhasedXPowGate,
),
'controlled': (
ops.CNotPowGate,
ops.CZPowGate,
ops.CCXPowGate,
ops.CCZPowGate,
),
'measure': (ops.MeasurementGate,),
}
categorized_ops: DefaultDict = collections.defaultdict(list)
for op in moment.operations:
assert isinstance(op, (ops.GateOperation, ops.ParallelGateOperation))
for k, v in CATEGORIES.items():
assert isinstance(v, tuple)
gate = _subgate_if_parallel_gate(op.gate)
if isinstance(gate, v):
categorized_ops[k].append(op)
for k in ['Z', 'XY', 'controlled']:
if len(set(_subgate_if_parallel_gate(op.gate) for op in categorized_ops[k])) > 1:
raise ValueError(f"Non-identical simultaneous {k} gates")
num_parallel_xy = sum([len(op.qubits) for op in categorized_ops['XY']])
num_parallel_z = sum([len(op.qubits) for op in categorized_ops['Z']])
has_measurement = len(categorized_ops['measure']) > 0
controlled_qubits_lists = [op.qubits for op in categorized_ops['controlled']]
if sum([len(l) for l in controlled_qubits_lists]) > self._max_parallel_c:
raise ValueError("Too many qubits acted on by controlled gates")
if controlled_qubits_lists and (num_parallel_xy or num_parallel_z):
raise ValueError(
"Can't perform non-controlled operations at same time as controlled operations"
)
if self._are_qubit_lists_too_close(*controlled_qubits_lists):
raise ValueError("Interacting controlled operations")
if num_parallel_z > self._max_parallel_z:
raise ValueError("Too many simultaneous Z gates")
if num_parallel_xy > self._max_parallel_xy and num_parallel_xy != len(self.qubits):
raise ValueError("Bad number of simultaneous XY gates")
if has_measurement:
if controlled_qubits_lists or num_parallel_z or num_parallel_xy:
raise ValueError("Measurements can't be simultaneous with other operations")
def _are_qubit_lists_too_close(self, *qubit_lists: Iterable[raw_types.Qid]) -> bool:
if len(qubit_lists) < 2:
return False
if len(qubit_lists) == 2:
a, b = qubit_lists
return any(self.distance(p, q) <= self._control_radius for p in a for q in b)
return any(
self._are_qubit_lists_too_close(a, b) for a, b in itertools.combinations(qubit_lists, 2)
)
def can_add_operation_into_moment(self, operation: ops.Operation, moment: ops.Moment) -> bool:
"""Determines if it's possible to add an operation into a moment.
An operation can be added if the moment with the operation added is valid.
Args:
operation: The operation being added.
moment: The moment being transformed.
Returns:
Whether or not the moment will validate after adding the operation.
Raises:
ValueError: If either of the given moment or operation is invalid
"""
if not super().can_add_operation_into_moment(operation, moment):
return False
try:
self.validate_moment(moment.with_operation(operation))
except:
return False
return True
def validate_circuit(self, circuit: circuits.AbstractCircuit):
"""Raises an error if the given circuit is invalid on this device.
A circuit is invalid if any of its moments are invalid or if there is a
non-empty moment after a moment with a measurement.
Args:
circuit: The circuit to validate
Raises:
ValueError: If the given circuit can't be run on this device
"""
super().validate_circuit(circuit)
# Measurements must be in the last non-empty moment
has_measurement_occurred = False
for moment in circuit:
if has_measurement_occurred:
if len(moment.operations) > 0:
raise ValueError("Non-empty moment after measurement")
for operation in moment.operations:
if isinstance(operation.gate, ops.MeasurementGate):
has_measurement_occurred = True
def _value_equality_values_(self) -> Any:
return (
self._measurement_duration,
self._gate_duration,
self._max_parallel_z,
self._max_parallel_xy,
self._max_parallel_c,
self._control_radius,
self.qubits,
)
def __repr__(self) -> str:
return (
'cirq.NeutralAtomDevice('
f'measurement_duration={self._measurement_duration!r}, '
f'gate_duration={self._gate_duration!r}, '
f'max_parallel_z={self._max_parallel_z!r}, '
f'max_parallel_xy={self._max_parallel_xy!r}, '
f'max_parallel_c={self._max_parallel_c!r}, '
f'control_radius={self._control_radius!r}, '
f'qubits={sorted(self.qubits)!r})'
)
def neighbors_of(self, qubit: 'cirq.GridQubit') -> Iterable['cirq.GridQubit']:
"""Returns the qubits that the given qubit can interact with."""
possibles = [
GridQubit(qubit.row + 1, qubit.col),
GridQubit(qubit.row - 1, qubit.col),
GridQubit(qubit.row, qubit.col + 1),
GridQubit(qubit.row, qubit.col - 1),
]
return [e for e in possibles if e in self.qubits]
def distance(self, p: 'cirq.Qid', q: 'cirq.Qid') -> float:
p = cast(GridQubit, p)
q = cast(GridQubit, q)
return sqrt((p.row - q.row) ** 2 + (p.col - q.col) ** 2)
def __str__(self) -> str:
diagram = circuits.TextDiagramDrawer()
for q in self.qubits:
diagram.write(q.col, q.row, str(q))
for q2 in self.neighbors_of(q):
diagram.grid_line(q.col, q.row, q2.col, q2.row)
return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True)
|
py | 1a30cb28cfe924fdc6b5e5a99d42e03591c0a0d2 | """Tests related to creating ingest definition"""
import json
import os
import unittest
from rf.models import Scene
from rf.ingest.landsat8_ingest import get_landsat8_layer
class Landsat8LayerTestCase(unittest.TestCase):
"""Test that we can create a layer from Landsat 8 scenes"""
def setUp(self):
cwd = os.path.abspath(os.path.dirname(__file__))
scene_path = os.path.join(cwd, 'data', 'scene.json')
with open(scene_path) as fh:
self.scene = Scene.from_dict(json.load(fh))
def test_create_layer(self):
"""Minimal test to verify that a layer can be created"""
layer = get_landsat8_layer(self.scene)
num_sources = len(layer.sources)
self.assertEqual(
num_sources, 11, 'Found {} sources, expected 11'.format(num_sources)
)
|
py | 1a30cc3695724c9c29237602f5781066bcb092bd | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import tensorflow as tf
NUM_CLASSES = 10
EMBEDDING_DIM = 7
def model_fn(features, labels, mode, params):
# build model
global_step = tf.train.get_global_step()
embedding_table = tf.get_variable('embedding_table', shape=(NUM_CLASSES, EMBEDDING_DIM), dtype=tf.float32)
embeddings = tf.nn.embedding_lookup(embedding_table, features)
# lstm model
batch_size = params['train_batch_size']
sequence_length = params['sequence_length']
cell = tf.nn.rnn_cell.BasicLSTMCell(EMBEDDING_DIM)
outputs, final_state = tf.nn.dynamic_rnn(cell, embeddings, dtype=tf.float32)
# flatten the batch and sequence dimensions
flattened = tf.reshape(outputs, (-1, EMBEDDING_DIM))
flattened_logits = tf.layers.dense(flattened, NUM_CLASSES)
logits = tf.reshape(flattened_logits, (-1, sequence_length, NUM_CLASSES))
predictions = tf.multinomial(flattened_logits, num_samples=1)
loss = None
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
# define loss
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))
# define train_op
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05)
# wrapper to make the optimizer work with TPUs
if params['use_tpu']:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=global_step)
if params['use_tpu']:
# TPU version of EstimatorSpec
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
def train_input_fn(params={}):
# make some fake data of labels
data_length = 100
x = np.random.randint(0, NUM_CLASSES, data_length)
y = np.random.randint(0, NUM_CLASSES, data_length)
x_tensor = tf.constant(x, dtype=tf.int32)
y_tensor = tf.constant(y, dtype=tf.int32)
dataset = tf.data.Dataset.from_tensors((x_tensor, y_tensor))
dataset = dataset.repeat()
# TPUs need to know the full shape of tensors
# so we use a fixed sequence length
sequence_length = params.get('sequence_length', 5)
def get_sequences(x_tensor, y_tensor):
index = tf.random_uniform([1], minval=0, maxval=data_length-sequence_length, dtype=tf.int32)[0]
x_sequence = x_tensor[index:index+sequence_length]
y_sequence = y_tensor[index:index+sequence_length]
return (x_sequence, y_sequence)
dataset = dataset.map(get_sequences)
# TPUEstimator passes params when calling input_fn
batch_size = params.get('train_batch_size', 16)
dataset = dataset.batch(batch_size, drop_remainder=True)
# TPUs need to know all dimensions when the graph is built
# Datasets know the batch size only when the graph is run
def set_shapes(features, labels):
features_shape = features.get_shape().merge_with([batch_size, sequence_length])
labels_shape = labels.get_shape().merge_with([batch_size, sequence_length])
features.set_shape(features_shape)
labels.set_shape(labels_shape)
return features, labels
dataset = dataset.map(set_shapes)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
def main(args):
# pass the args as params so the model_fn can use
# the TPU specific args
params = vars(args)
if args.use_tpu:
# additional configs required for using TPUs
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(args.tpu)
tpu_config = tf.contrib.tpu.TPUConfig(
num_shards=8, # using Cloud TPU v2-8
iterations_per_loop=args.save_checkpoints_steps)
# use the TPU version of RunConfig
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=args.model_dir,
tpu_config=tpu_config,
save_checkpoints_steps=args.save_checkpoints_steps,
save_summary_steps=100)
# TPUEstimator
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=config,
params=params,
train_batch_size=args.train_batch_size,
eval_batch_size=32,
export_to_tpu=False)
else:
config = tf.estimator.RunConfig(model_dir=args.model_dir)
estimator = tf.estimator.Estimator(
model_fn,
config=config,
params=params)
estimator.train(train_input_fn, max_steps=args.max_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model-dir',
type=str,
default='/tmp/tpu-template',
help='Location to write checkpoints and summaries to. Must be a GCS URI when using Cloud TPU.')
parser.add_argument(
'--max-steps',
type=int,
default=1000,
help='The total number of steps to train the model.')
parser.add_argument(
'--sequence-length',
type=int,
default=5,
help='The sequence length for an LSTM model.')
parser.add_argument(
'--train-batch-size',
type=int,
default=16,
help='The training batch size. The training batch is divided evenly across the TPU cores.')
parser.add_argument(
'--save-checkpoints-steps',
type=int,
default=100,
help='The number of training steps before saving each checkpoint.')
parser.add_argument(
'--use-tpu',
action='store_true',
help='Whether to use TPU.')
parser.add_argument(
'--tpu',
default=None,
help='The name or GRPC URL of the TPU node. Leave it as `None` when training on AI Platform.')
args, _ = parser.parse_known_args()
main(args)
|
py | 1a30cdb47628e61535589b874104f41f740099a1 | '''
TorMySQL: presents a Tornado and asyncio Future-based API and greenlet for non-blocking access to MySQL.
The MIT License (MIT)
Copyright (c) 2014, 2015 TorMySQL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from .client import Client
from .cursor import Cursor, DictCursor, SSCursor, SSDictCursor
from .pool import ConnectionPool
from .cursor import CursorNotReadAllDataError, CursorNotIterError
from .pool import ConnectionPoolClosedError, ConnectionPoolUsedError, ConnectionNotFoundError, ConnectionNotUsedError, ConnectionUsedError, WaitConnectionTimeoutError
from .log import set_log
from . import helpers
version = "0.4.3"
version_info = (0, 4, 3)
def connect(*args, **kwargs):
client = Client(*args, **kwargs)
from .platform import current_ioloop
current_ioloop()
return client.connect()
Connection = connect |
py | 1a30cedd51c7910003261ce113a8040019783092 | """
Guidelines for enum classes:
1. Write members names extensively, with no abbreviation, i.e., 'Watt' instead of 'W'.
2. Attributes should follow the International System of Units (SI) [https://en.wikipedia.org/wiki/International_System_of_Units], i.e., for power the attribute is 'W'.
3. Do not use multipliers such as 'Kilowatt'.
3.1 Exceptions to this rule are: 'Kilometer', 'Kilogram'.
3.2 In case of an exception, the simple form should be avoided altogether, e.g., given the 'Kilometer' is an Unit, then 'Meter' should not be used.
"""
import enum
@enum.unique
class DisplayNames(str, enum.Enum):
ElectricityOutput = "ElectricityOutput"
ElectricityInput = "ElectricityInput"
@enum.unique
class LoadTypes(str, enum.Enum):
Any = "Any"
Electricity = "Electricity"
Irradiance = "Irradiance"
Speed = "Speed"
Heating = "Heating"
Cooling = "Cooling"
Volume = "Volume"
Temperature = "Temperature"
Time = "Time"
# Substance
Gas = "Gas"
Hydrogen = "Hydrogen"
Oxygen = "Oxygen"
Water = "Water"
WarmWater = "WarmWater"
Price = "Price"
@enum.unique
class Units(str, enum.Enum):
# Unphysical
Any = "-"
Percent = "%"
# Power
Watt = "W"
kW = "kW"
kWh_per_timestep = "kWh per timestep"
# Power per area
Wm2 = "W per square meter"
Whm2 = "Wh per square meter"
# Speed
MeterPerSecond = "m/s"
# Energy
Wh = "Wh"
kWh = "kWh"
# Volume
Liter = "L"
# Volume per time
l_per_timestep = "Liter per timestep"
# Mass
kg = "kg"
# Mass flow
kg_per_sec = "kg/s"
# Degrees
Celsius = "°C"
Kelvin = 'K'
# Degrees
Degrees = "Degrees"
# Time
Seconds = "s"
# Cost
c_per_kWh = "Cents per kWh"
|
py | 1a30cfa53f348743fd4bf86be5cbcb11e737e4d5 | import argparse
import collections
import json
import os
import numpy as np
import torch
import yaml
__all__ = [
"load_config",
"save_config",
"flatten_dict",
"sanitize_dict",
"update_namespace",
"extract",
"s2b",
"g",
]
# Load config file
def load_yaml(f_path):
with open(f_path, "r") as stream:
return yaml.safe_load(stream)
def load_json(f_path):
with open(f_path, "r") as f:
return json.load(f)
def load_config(path, flatten=True):
_, ext = os.path.splitext(path)
assert ext in [
".json",
".yaml",
".yml",
], f"Only support yaml and json config, but '{ext}' given."
if ext == "json":
cfg = load_json(path)
else:
cfg = load_yaml(path)
if cfg is None:
cfg = dict()
if flatten:
cfg = flatten_dict(cfg)
return cfg
# Dump config file
def save_json(obj, f_path):
with open(f_path, "w") as f:
json.dump(obj, f, ensure_ascii=False, indent=4)
def save_yaml(obj, f_path):
with open(f_path, "w") as f:
yaml.dump(obj, f)
def save_config(obj, path, ext=None):
_, fext = os.path.splitext(path)
if fext.startswith("."):
fext = fext[1:]
if fext != "":
assert (
ext == None or fext == ext
), f"Extension conflict between '{path}' and '{ext}'."
ext = fext
if ext in ["yaml", "yml"]:
save_yaml(obj, path)
else:
save_json(obj, path)
# Utils
def flatten_dict(d, keep_parent=False, sep="_", parent_key=""):
"""Flatten dict to only one nest
Args:
d (dict): dictionary to flatten
keep_parent (bool, optional): If True, keep parent's key name, and keys should all be str. Defaults to False.
sep (str, optional): Effective only keep_parent=True, separator between keys. Defaults to "_".
parent_key (str, optional): For recursive call. Defaults to "".
Returns:
dict: flattened dict
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key and keep_parent else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(
flatten_dict(v, keep_parent, parent_key=new_key, sep=sep).items()
)
else:
items.append((new_key, v))
items_key = [i[0] for i in items]
assert len(items_key) == len(set(items_key))
return dict(items)
def sanitize_dict(params, to_str=True, none_fill="N/A"):
"""Convert all items into tensorboard supported values or str
Args:
params (dict): dict to sanitize
to_str (bool, optional): If True, turn all items to string. Defaults to True.
Returns:
dict: sanitized dict
"""
items = []
for k in params.keys():
# numpy to float
if isinstance(params[k], (np.bool_, np.integer, np.floating)):
items.append([k, params[k].item()])
elif isinstance(params[k], np.ndarray):
items.append([k, str(params[k].tolist())])
# torch to float
elif isinstance(params[k], torch.Tensor):
items.append([k, str(params[k].tolist())])
# None to str
elif params[k] is None:
items.append([k, none_fill])
# Others to str
elif type(params[k]) not in [bool, int, float, str, torch.Tensor]:
items.append([k, str(params[k])])
else:
items.append([k, params[k]])
# All to str
if to_str:
items[-1][-1] = str(items[-1][-1])
return dict(items)
def update_namespace(args, dictionary, overwrite=True, rest=False):
"""update Namespace with given dictionary
Args:
args (Namespace): Namespace to be updated
dictionary (dict): dictionary
overwrite (bool, optional): If True, All Namespace value will overwritten by dictionary value. Otherwise, only Namespace with None will be overwritten. Defaults to True.
rest: Effective only if overwrite=True. If True, add keys in dictionary but not in args into args. Otherwise raise an error.
Returns:
Namespace
"""
dict_args = vars(args)
if overwrite:
dict_args.update(dictionary)
else:
for k, v in dict_args.items():
if v is not None:
pass
elif k in dictionary:
dict_args[k] = dictionary[k]
for k, v in dictionary.items():
if k not in dict_args:
if rest:
dict_args[k] = v
else:
raise KeyError(f"no key {k}")
args = argparse.Namespace(**dict_args)
return args
def extract(s, delimit="-", num=0):
"""Extract the num_th word from string s
Args:
s (str): string to be parsed
delimit (str, optional): delimiter. Defaults to "-".
num (int, optional): . Defaults to 0.
Returns:
(str, List[str])
"""
s_list = s.split(delimit)
first = s_list[num]
s_list.pop(num)
s_rest = delimit.join(s_list)
return first, s_rest
# argparse type
def s2b(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
# template generator for params.py
def g(template, name_list, placeholder="{}"):
items = []
for name in name_list:
t = []
t.append(template[0].replace(placeholder, name))
t.append(template[1].replace(placeholder, name))
t.extend(template[2:])
items.append(t)
return items
|
py | 1a30cfa669f7ecab7cef4bb662d4d5ff96d72048 | # -*- coding:utf-8 -*-
import unittest
class TestZip(unittest.TestCase):
TESTDATA = [
("aabbb" , "a2b3"),
("aaaa", "a4"),
("abc", "abc"),
("abcdd","abcdd")
]
def setUp(self):
self.judge = Zipper()
def testsame(self):
for src, exp in self.TESTDATA:
self.assertEqual(self.judge.zipString(src),exp)
class Zipper:
def zipString(self, iniString):
# write code here
record = []
prevchar = None
prevlen = 0
for letter in iniString:
if letter == prevchar:
prevlen += 1
else:
if prevlen > 0:
record.append({prevchar : prevlen})
prevlen = 1
prevchar = letter
if prevlen > 0:
record.append({prevchar : prevlen})
newstring = ''
for item in record:
for key,value in item.iteritems():
newstring += "{}{}".format(key,value)
return newstring if len(newstring) < len(iniString) else iniString
if __name__ == '__main__':
unittest.main()
|
py | 1a30d1395283ec9bc60dcceaa1b5963b9176b60a | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
paddle.enable_static()
class TestAny8DOp(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (3, 5, 4)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAnyOpWithDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]}
self.outputs = {'Out': self.inputs['X'].any(axis=1)}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAny8DOpWithDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (3, 6)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAnyOpWithKeepDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAny8DOpWithKeepDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
if __name__ == '__main__':
unittest.main()
|
py | 1a30d2ed1b74ceaffb3883db6e04fdf92eac0e39 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Awab Abdoun and Mohammed Elamged and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestWorkstation(unittest.TestCase):
pass
|
py | 1a30d485a01b4350dc9549cb1de8c8e27d31ae76 | # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: jglaser / All Developers are free to add commands for new features
R""" Potentials between special pairs of particles
Special pairs are used to implement interactions between designated pairs of particles.
They act much like bonds, except that the interaction potential is typically a pair potential,
such as LJ.
By themselves, special pairs that have been specified in an initial configuration do nothing. Only when you
specify an force (i.e. special_pairs.lj), are forces actually calculated between the
listed particles.
"""
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import force;
from hoomd.md import bond;
import hoomd;
import math;
import sys;
class coeff:
R""" Define special_pair coefficients.
The coefficients for all special pair potentials are specified using this class. Coefficients are
specified per pair type.
There are two ways to set the coefficients for a particular special_pair potential.
The first way is to save the special_pair potential in a variable and call :py:meth:`set()` directly.
See below for an example of this.
The second method is to build the coeff class first and then assign it to the
special_pair potential. There are some advantages to this method in that you could specify a
complicated set of special_pair potential coefficients in a separate python file and import
it into your job script.
Example::
my_coeffs = hoomd.md.special_pair.coeff();
special_pair_force.pair_coeff.set('pairtype1', epsilon=1, sigma=1)
special_pair_force.pair_coeff.set('backbone', epsilon=1.2, sigma=1)
"""
## \internal
# \brief Initializes the class
# \details
# The main task to be performed during initialization is just to init some variables
# \param self Python required class instance variable
def __init__(self):
self.values = {};
self.default_coeff = {}
## \var values
# \internal
# \brief Contains the vector of set values in a dictionary
## \var default_coeff
# \internal
# \brief default_coeff['coeff'] lists the default value for \a coeff, if it is set
## \internal
# \brief Sets a default value for a given coefficient
# \details
# \param name Name of the coefficient to for which to set the default
# \param value Default value to set
#
# Some coefficients have reasonable default values and the user should not be burdened with typing them in
# all the time. set_default_coeff() sets
def set_default_coeff(self, name, value):
self.default_coeff[name] = value;
def set(self, type, **coeffs):
R""" Sets parameters for special_pair types.
Args:
type (str): Type of special_pair (or a list of type names)
coeffs: Named coefficients (see below for examples)
Calling :py:meth:`set()` results in one or more parameters being set for a special_pair type. Types are identified
by name, and parameters are also added by name. Which parameters you need to specify depends on the special_pair
potential you are setting these coefficients for, see the corresponding documentation.
All possible special_pair types as defined in the simulation box must be specified before executing run().
You will receive an error if you fail to do so. It is not an error, however, to specify coefficients for
special_pair types that do not exist in the simulation. This can be useful in defining a potential field for many
different types of special_pairs even when some simulations only include a subset.
Examples::
my_special_pair_force.special_pair_coeff.set('pair1', epsilon=1, sigma=1)
my_special_pair_force.pair_coeff.set('pair2', epsilon=0.5, sigma=0.7)
my_special_pair_force.pair_coeff.set(['special_pairA','special_pairB'], epsilon=0, sigma=1)
Note:
Single parameters can be updated. If both ``k`` and ``r0`` have already been set for a particle type,
then executing ``coeff.set('polymer', r0=1.0)`` will update the value of ``r0`` and leave the other
parameters as they were previously set.
"""
hoomd.util.print_status_line();
# listify the input
type = hoomd.util.listify(type)
for typei in type:
self.set_single(typei, coeffs);
## \internal
# \brief Sets a single parameter
def set_single(self, type, coeffs):
type = str(type);
# create the type identifier if it hasn't been created yet
if (not type in self.values):
self.values[type] = {};
# update each of the values provided
if len(coeffs) == 0:
hoomd.context.msg.error("No coefficients specified\n");
for name, val in coeffs.items():
self.values[type][name] = val;
# set the default values
for name, val in self.default_coeff.items():
# don't override a coeff if it is already set
if not name in self.values[type]:
self.values[type][name] = val;
## \internal
# \brief Verifies that all values are set
# \details
# \param self Python required self variable
# \param required_coeffs list of required variables
#
# This can only be run after the system has been initialized
def verify(self, required_coeffs):
# first, check that the system has been initialized
if not hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot verify special_pair coefficients before initialization\n");
raise RuntimeError('Error verifying force coefficients');
# get a list of types from the particle data
ntypes = hoomd.context.current.system_definition.getPairData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getPairData().getNameByType(i));
valid = True;
# loop over all possible types and verify that all required variables are set
for i in range(0,ntypes):
type = type_list[i];
if type not in self.values.keys():
hoomd.context.msg.error("Pair type " +str(type) + " not found in pair coeff\n");
valid = False;
continue;
# verify that all required values are set by counting the matches
count = 0;
for coeff_name in self.values[type].keys():
if not coeff_name in required_coeffs:
hoomd.context.msg.notice(2, "Notice: Possible typo? Force coeff " + str(coeff_name) + " is specified for type " + str(type) + \
", but is not used by the special pair force\n");
else:
count += 1;
if count != len(required_coeffs):
hoomd.context.msg.error("Special pair type " + str(type) + " is missing required coefficients\n");
valid = False;
return valid;
## \internal
# \brief Gets the value of a single %special_pair %force coefficient
# \detail
# \param type Name of special_pair type
# \param coeff_name Coefficient to get
def get(self, type, coeff_name):
if type not in self.values.keys():
hoomd.context.msg.error("Bug detected in force.coeff. Please report\n");
raise RuntimeError("Error setting special_pair coeff");
return self.values[type][coeff_name];
## \internal
# \brief Return metadata
def get_metadata(self):
return self.values
## \internal
# \brief Base class for special pair potentials
#
# A special pair in hoomd.* reflects a PotentialSpecialPair in c++. It is responsible
# for all high-level management that happens behind the scenes for hoomd
# writers. 1) The instance of the c++ bond force itself is tracked and added to the
# System 2) methods are provided for disabling the force from being added to the
# net force on each particle
class _special_pair(force._force):
## \internal
# \brief Constructs the bond potential
#
# \param name name of the bond potential instance
#
# Initializes the cpp_force to None.
# If specified, assigns a name to the instance
# Assigns a name to the force in force_name;
def __init__(self, name=None):
# initialize the base class
force._force.__init__(self, name);
self.cpp_force = None;
# setup the coefficient vector (use bond coefficients for that)
self.pair_coeff = coeff();
self.enabled = True;
def update_coeffs(self):
coeff_list = self.required_coeffs;
# check that the force coefficients are valid
if not self.pair_coeff.verify(coeff_list):
hoomd.context.msg.error("Not all force coefficients are set\n");
raise RuntimeError("Error updating force coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getPairData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getPairData().getNameByType(i));
for i in range(0,ntypes):
# build a dict of the coeffs to pass to proces_coeff
coeff_dict = {};
for name in coeff_list:
coeff_dict[name] = self.pair_coeff.get(type_list[i], name);
param = self.process_coeff(coeff_dict);
self.cpp_force.setParams(i, param);
## \internal
# \brief Get metadata
def get_metadata(self):
data = force._force.get_metadata(self)
# make sure coefficients are up-to-date
self.update_coeffs()
data['pair_coeff'] = self.pair_coeff
return data
class lj(_special_pair):
R""" LJ special pair potential.
Args:
name (str): Name of the special_pair instance.
:py:class:`lj` specifies a Lennard-Jones potential energy between the two particles in each defined pair.
This is useful for implementing e.g. special 1-4 interactions in all-atom force fields.
The pair potential uses the standard LJ definition.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{LJ}}(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\alpha \left( \frac{\sigma}{r} \right)^{6} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\vec{r}` is the vector pointing from one particle to the other in the bond.
Coefficients:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`\alpha` - *alpha* (unitless) - *optional*: defaults to 1.0
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
Example::
lj = special_pair.lj(name="my_pair")
lj.pair_coeff.set('pairtype_1', epsilon=5.4, sigma=0.47, r_cut=1.1)
Note:
The energy of special pair interactions is reported in a log quantity **special_pair_lj_energy**, which
is separate from those of other non-bonded interactions. Therefore, the total energy of nonbonded interactions
is obtained by adding that of standard and special interactions.
.. versionadded:: 2.1
"""
def __init__(self,name=None):
hoomd.util.print_status_line();
# initialize the base class
_special_pair.__init__(self);
# check that some bonds are defined
if hoomd.context.current.system_definition.getPairData().getNGlobal() == 0:
hoomd.context.msg.error("No pairs are defined.\n");
raise RuntimeError("Error creating special pair forces");
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialSpecialPairLJ(hoomd.context.current.system_definition,self.name);
else:
self.cpp_force = _md.PotentialSpecialPairLJGPU(hoomd.context.current.system_definition,self.name);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['epsilon','sigma','alpha','r_cut'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
r_cut = coeff['r_cut'];
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
r_cut_squared = r_cut * r_cut
return _hoomd.make_scalar3(lj1, lj2, r_cut_squared);
class coulomb(_special_pair):
R""" Coulomb special pair potential.
Args:
name (str): Name of the special_pair instance.
:py:class:`coulomb` specifies a Coulomb potential energy between the two particles in each defined pair.
This is useful for implementing e.g. special 1-4 interactions in all-atom force fields. It uses a standard Coulomb interaction with a scaling parameter. This allows for using this for scaled 1-4 interactions like in OPLS where both the 1-4 LJ and Coulomb interactions are scaled by 0.5.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{Coulomb}}(r) = & \alpha \cdot \left[ \frac{q_{a}q_{b}}{r} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\vec{r}` is the vector pointing from one particle to the other in the bond.
Coefficients:
- :math:`\alpha` - Coulomb scaling factor (defaults to 1.0)
- :math:`q_{a}` - charge of particle a (in hoomd charge units)
- :math:`q_{b}` - charge of particle b (in hoomd charge units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
Example::
coul = special_pair.coulomb(name="myOPLS_style")
coul.pair_coeff.set('pairtype_1', alpha=0.5, r_cut=1.1)
Note:
The energy of special pair interactions is reported in a log quantity **special_pair_coul_energy**, which
is separate from those of other non-bonded interactions. Therefore, the total energy of non-bonded interactions
is obtained by adding that of standard and special interactions.
.. versionadded:: 2.2
.. versionchanged:: 2.2
"""
def __init__(self, name=None):
hoomd.util.print_status_line();
# initialize the base class
_special_pair.__init__(self);
# check that some bonds are defined
if hoomd.context.current.system_definition.getPairData().getNGlobal() == 0:
hoomd.context.msg.error("No pairs are defined.\n");
raise RuntimeError("Error creating special pair forces");
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialSpecialPairCoulomb(hoomd.context.current.system_definition,self.name);
else:
self.cpp_force = _md.PotentialSpecialPairCoulombGPU(hoomd.context.current.system_definition,self.name);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['alpha', 'r_cut'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
r_cut = coeff['r_cut'];
alpha = coeff['alpha'];
r_cut_squared = r_cut * r_cut;
return _hoomd.make_scalar2(alpha, r_cut_squared);
|
py | 1a30d52820578b9a5b4b5ce25638b5f0564f77fa | #!/usr/bin/python3
"""
fasttextRun.py: run fasttext via python interface
usage: fasttextRun.py -f file [-n N]
note: default number of N is 10 (10-fold cross validation)
20180105 erikt(at)xs4all.nl
"""
import fasttext
import os
import random
import splitFile
import sys
COMMAND = sys.argv.pop(0)
DIM = 300
LARGENUMBER = 100000
MINCOUNT = 5
random.seed()
TMPFILENAME = "fasttextRun."+str(os.getpid())+"."+str(random.randint(0,LARGENUMBER))
def makeTrainFile(inFileName,i,n):
outFileName = TMPFILENAME+".train"
outFile = open(outFileName,"w")
for j in range(0,n):
if j != i:
inFile = open(inFileName+"."+str(j),"r")
for line in inFile: outFile.write(line)
inFile.close()
outFile.close()
return(outFileName)
def fasttextRun(inFileName,i,n):
trainFileName = makeTrainFile(inFileName,i,n)
modelFileName = TMPFILENAME+".model"
testFileName = inFileName+"."+str(i)
classifier = fasttext.supervised(trainFileName,modelFileName,dim=DIM,min_count=MINCOUNT)
# ,pretrained_vectors="/home/erikt/software/fastText/wiki.nl.vec")
result = classifier.test(testFileName)
os.unlink(trainFileName)
os.unlink(modelFileName+".bin")
return(result.precision)
def main(argv):
inFileName, n = splitFile.processOpts(list(argv))
data = splitFile.readData(inFileName)
splitFile.writeData(inFileName,data,n)
accuracyTotal = 0.0
for i in range(0,n):
accuracy = fasttextRun(inFileName,i,n)
accuracyTotal += accuracy
print("Fold: {0:0d}; Accuracy: {1:0.3f}".format(i,accuracy))
print("Average accuracy {0:0.3f}".format(accuracyTotal/float(n)))
return(0)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
py | 1a30d5333d9aa11674a21e729d83eab98d8c4682 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Remove useless index
Revision ID: 7750037b351a
Revises: f449e5bff5a5
Create Date: 2016-12-17 21:10:27.781900
"""
from alembic import op
revision = "7750037b351a"
down_revision = "f449e5bff5a5"
def upgrade():
op.drop_index("release_files_name_idx", table_name="release_files")
def downgrade():
op.create_index("release_files_name_idx", "release_files", ["name"], unique=False)
|
py | 1a30d74ada71fef6bf218e5e7da43d24872e31fd | # coding: utf8
from .tsv_utils import complementary_list, find_label, baseline_df, chi2
from clinicaaddl.tools.deep_learning.iotools import return_logger
from scipy.stats import ttest_ind
import shutil
import pandas as pd
from os import path
import numpy as np
import os
import logging
sex_dict = {'M': 0, 'F': 1}
def create_split(diagnosis, diagnosis_df, n_test,
pval_threshold_ttest=0.80, t_val_chi2_threshold=0.0642,
ignore_demographics=False, logger=None):
"""
Split data at the subject-level in training and test set with equivalent age and sex distributions
:param diagnosis: (str) diagnosis on which the split is done
:param diagnosis_df: DataFrame with columns including ['participant_id', 'session_id', 'diagnosis']
:param n_test: (float)
If >= 1 number of subjects to put in the test set.
If < 1 proportion of subjects to put in the test set.
:param pval_threshold_ttest: (float) threshold for the t-test on age
:param t_val_chi2_threshold: (float) threshold for the chi2 test on sex
:param ignore_demographics: (bool): If True the diagnoses are split without taking into account the demographics
distributions (age, sex).
:param logger: Logger object from logging library
:return:
train_df (DataFrame) subjects in the train set
test_df (DataFrame) subjects in the test set
"""
if logger is None:
logger = logging
logger.basicConfig(level=logging.DEBUG)
diagnosis_baseline_df = baseline_df(diagnosis_df)
if n_test >= 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(diagnosis_baseline_df))
if not ignore_demographics:
try:
sex_label = find_label(diagnosis_baseline_df.columns.values, "sex")
age_label = find_label(diagnosis_baseline_df.columns.values, "age")
except ValueError:
raise ValueError("This dataset do not have age or sex values. "
"Please add the flag --ignore_demographics to split "
"without trying to balance age or sex distributions.")
sex = list(diagnosis_baseline_df[sex_label].values)
age = list(diagnosis_baseline_df[age_label].values)
idx = np.arange(len(diagnosis_baseline_df))
flag_selection = True
n_try = 0
while flag_selection:
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
# Find similarity of distribution for the age variable
if len(set(age)) != 1:
age_test = [float(age[idx]) for idx in idx_test]
age_train = [float(age[idx]) for idx in idx_train]
t_age, p_age = ttest_ind(age_test, age_train)
else:
p_age = 1
# Find the a similar distribution for the sex variable
if len(set(sex)) != 1:
sex_test = [sex_dict[sex[idx]] for idx in idx_test]
sex_train = [sex_dict[sex[idx]] for idx in idx_train]
T_sex = chi2(sex_test, sex_train)
else:
T_sex = 0
logger.debug("p=%.2f, T=%.4f" % (p_age, T_sex))
if T_sex < t_val_chi2_threshold and p_age > pval_threshold_ttest:
flag_selection = False
test_df = diagnosis_baseline_df.loc[idx_test]
train_df = diagnosis_baseline_df.loc[idx_train]
n_try += 1
logger.info("Split for diagnosis %s was found after %i trials" % (diagnosis, n_try))
else:
idx = np.arange(len(diagnosis_baseline_df))
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
test_df = diagnosis_baseline_df.loc[idx_test]
train_df = diagnosis_baseline_df.loc[idx_train]
return train_df, test_df
def split_diagnoses(formatted_data_path,
n_test=100, subset_name="test", MCI_sub_categories=True,
t_val_threshold=0.0642, p_val_threshold=0.80,
ignore_demographics=False, verbose=0):
"""
Performs a single split for each label independently on the subject level.
The train folder will contain two lists per diagnosis (baseline and longitudinal),
whereas the test folder will only include the list of baseline sessions.
The age and sex distributions between the two sets must be non-significant (according to T-test and chi-square).
Args:
formatted_data_path (str): Path to the folder containing data extracted by clinicaaddl tsvtool getlabels.
n_test (float):
If > 1, number of subjects to put in set with name 'subset_name'.
If < 1, proportion of subjects to put in set with name 'subset_name'.
If 0, no training set is created and the whole dataset is considered as one set with name 'subset_name'.
subset_name (str): Name of the subset that is complementary to train.
MCI_sub_categories (bool): If True, manages MCI sub-categories to avoid data leakage.
t_val_threshold (float): The threshold used for the chi2 test on sex distributions.
p_val_threshold (float): The threshold used for the T-test on age distributions.
ignore_demographics (bool): If True the diagnoses are split without taking into account the demographics
distributions (age, sex).
verbose (int): level of verbosity.
Returns:
writes three files per <label>.tsv file present in formatted_data_path:
- formatted_data_path/train/<label>.tsv
- formatted_data_path/train/<label>_baseline.tsv
- formatted_data_path/<subset_name>/<label>_baseline.tsv
"""
logger = return_logger(verbose, "split")
# Read files
results_path = formatted_data_path
train_path = path.join(results_path, 'train')
if path.exists(train_path):
shutil.rmtree(train_path)
if n_test > 0:
os.makedirs(train_path)
test_path = path.join(results_path, subset_name)
if path.exists(test_path):
shutil.rmtree(test_path)
os.makedirs(test_path)
diagnosis_df_paths = os.listdir(results_path)
diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith('.tsv')]
diagnosis_df_paths = [x for x in diagnosis_df_paths if not x.endswith('_baseline.tsv')]
MCI_special_treatment = False
if 'MCI.tsv' in diagnosis_df_paths and n_test > 0:
if MCI_sub_categories:
diagnosis_df_paths.remove('MCI.tsv')
MCI_special_treatment = True
elif 'sMCI.tsv' in diagnosis_df_paths or 'pMCI.tsv' in diagnosis_df_paths:
logger.warning("MCI special treatment was deactivated though MCI subgroups were found."
"Be aware that it may cause data leakage in transfer learning tasks.")
# The baseline session must be kept before or we are taking all the sessions to mix them
for diagnosis_df_path in diagnosis_df_paths:
diagnosis_df = pd.read_csv(path.join(results_path, diagnosis_df_path),
sep='\t')
interest_columns = diagnosis_df.columns.values
diagnosis = diagnosis_df_path.split('.')[0]
logger.info("Running split for diagnosis %s" % diagnosis)
if n_test > 0:
train_df, test_df = create_split(diagnosis, diagnosis_df, n_test=n_test,
t_val_chi2_threshold=t_val_threshold,
pval_threshold_ttest=p_val_threshold,
ignore_demographics=ignore_demographics,
logger=logger)
# Save baseline splits
train_df = train_df[interest_columns]
train_df.to_csv(path.join(train_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
test_df = test_df[interest_columns]
test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
# Retrieve all sessions for the training set
complete_train_df = pd.DataFrame()
for idx in train_df.index.values:
subject = train_df.loc[idx, 'participant_id']
subject_df = diagnosis_df[diagnosis_df.participant_id == subject]
complete_train_df = pd.concat([complete_train_df, subject_df])
complete_train_df.to_csv(path.join(train_path, str(diagnosis) + '.tsv'), sep='\t', index=False)
else:
diagnosis_baseline_df = baseline_df(diagnosis_df)
test_df = diagnosis_baseline_df[interest_columns]
test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
if MCI_special_treatment:
# Extraction of MCI subjects without intersection with the sMCI / pMCI train
diagnosis_df = pd.read_csv(path.join(results_path, 'MCI.tsv'), sep='\t')
MCI_df = diagnosis_df.set_index(['participant_id', 'session_id'])
baseline_MCI_df = baseline_df(MCI_df, set_index=False)
supplementary_diagnoses = []
if n_test > 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(baseline_MCI_df))
logger.debug('Before subjects removal for MCI special treatment')
if n_test > 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(baseline_MCI_df))
sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(diagnosis_df)))
if 'sMCI.tsv' in diagnosis_df_paths:
sMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'sMCI_baseline.tsv'), sep='\t')
sMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'sMCI_baseline.tsv'), sep='\t')
sMCI_baseline_df = pd.concat([sMCI_baseline_train_df, sMCI_baseline_test_df])
sMCI_baseline_df.reset_index(drop=True, inplace=True)
for idx in sMCI_baseline_df.index.values:
subject = sMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True)
supplementary_diagnoses.append('sMCI')
logger.debug('Removed %i subjects based on sMCI label' % len(sMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if 'pMCI.tsv' in diagnosis_df_paths:
pMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'pMCI_baseline.tsv'), sep='\t')
pMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'pMCI_baseline.tsv'), sep='\t')
pMCI_baseline_df = pd.concat([pMCI_baseline_train_df, pMCI_baseline_test_df])
pMCI_baseline_df.reset_index(drop=True, inplace=True)
for idx in pMCI_baseline_df.index.values:
subject = pMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True)
supplementary_diagnoses.append('pMCI')
logger.debug('Removed %i subjects based on pMCI label' % len(pMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if len(supplementary_diagnoses) == 0:
raise ValueError('The MCI_sub_categories flag is not needed as there are no intersections with'
'MCI subcategories.')
# Construction of supplementary train
supplementary_train_df = pd.DataFrame()
for diagnosis in supplementary_diagnoses:
sup_baseline_train_df = pd.read_csv(path.join(train_path, diagnosis + '_baseline.tsv'), sep='\t')
supplementary_train_df = pd.concat([supplementary_train_df, sup_baseline_train_df])
sub_df = supplementary_train_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('supplementary_train_df %i subjects, %i scans' % (len(sub_df), len(supplementary_train_df)))
supplementary_train_df.reset_index(drop=True, inplace=True)
# MCI selection
MCI_df.reset_index(inplace=True)
diagnosis_baseline_df = baseline_df(MCI_df)
if not ignore_demographics:
sex_label = find_label(diagnosis_baseline_df.columns.values, "sex")
age_label = find_label(diagnosis_baseline_df.columns.values, "age")
sex = list(diagnosis_baseline_df[sex_label].values)
age = list(diagnosis_baseline_df[age_label].values)
sup_train_sex = list(supplementary_train_df[sex_label].values)
sup_train_age = list(supplementary_train_df[age_label].values)
sup_train_sex = [sex_dict[x] for x in sup_train_sex]
sup_train_age = [float(x) for x in sup_train_age]
idx = np.arange(len(diagnosis_baseline_df))
flag_selection = True
n_try = 0
while flag_selection:
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
# Find similarity of distribution for the age variable
if len(set(age)) != 1:
age_test = [float(age[idx]) for idx in idx_test]
age_train = [float(age[idx]) for idx in idx_train]
t_age, p_age = ttest_ind(age_test, age_train)
else:
p_age = 1
# Find similarity of distribution for the sex variable
if len(set(sex)) != 1:
sex_test = [sex_dict[sex[idx]] for idx in idx_test]
sex_train = [sex_dict[sex[idx]] for idx in idx_train]
T_sex = chi2(sex_test, sex_train)
else:
T_sex = 0
logger.debug("p=%.2f, T=%.4f" % (p_age, T_sex))
if T_sex < t_val_threshold and p_age > p_val_threshold:
flag_selection = False
MCI_baseline_test_df = diagnosis_baseline_df.loc[idx_test]
train_df = diagnosis_baseline_df.loc[idx_train]
MCI_baseline_train_df = pd.concat([train_df, supplementary_train_df])
logger.debug('Supplementary train df %i' % len(supplementary_train_df))
MCI_baseline_train_df.reset_index(drop=True, inplace=True)
n_try += 1
logger.info('Split for diagnosis MCI was found after %i trials' % n_try)
else:
idx = np.arange(len(diagnosis_baseline_df))
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
MCI_baseline_test_df = diagnosis_baseline_df.loc[idx_test]
train_df = diagnosis_baseline_df.loc[idx_train]
MCI_baseline_train_df = pd.concat([train_df, supplementary_train_df])
MCI_baseline_train_df.reset_index(drop=True, inplace=True)
# Write selection of MCI
MCI_baseline_train_df = MCI_baseline_train_df[interest_columns]
MCI_baseline_train_df.to_csv(path.join(train_path, 'MCI_baseline.tsv'), sep='\t', index=False)
MCI_baseline_test_df = MCI_baseline_test_df[interest_columns]
MCI_baseline_test_df.to_csv(path.join(test_path, 'MCI_baseline.tsv'), sep='\t', index=False)
# Retrieve all sessions for the training set
MCI_complete_train_df = pd.DataFrame()
for idx in MCI_baseline_train_df.index.values:
subject = MCI_baseline_train_df.loc[idx, 'participant_id']
subject_df = diagnosis_df[diagnosis_df.participant_id == subject]
MCI_complete_train_df = pd.concat([MCI_complete_train_df, subject_df])
MCI_complete_train_df.to_csv(path.join(train_path, 'MCI.tsv'), sep='\t', index=False)
|
py | 1a30d8d4aef7461996b7aa5e1b8ebbc7e1a661b6 | # -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRun
class ConstantOfShape(OpRun):
atts = {'value': numpy.array([0], dtype=numpy.float32)}
def __init__(self, onnx_node, desc=None, **options):
OpRun.__init__(self, onnx_node, desc=desc,
expected_attributes=ConstantOfShape.atts,
**options)
self.cst = (self.value[0]
if isinstance(self.value, numpy.ndarray)
else self.value)
if not isinstance(self.cst, (float, numpy.float32, numpy.float64)):
raise TypeError("cst must be a real not {}".format(type(self.cst)))
def _run(self, data): # pylint: disable=W0221
res = numpy.full(tuple(data), self.cst)
return (res, )
|
py | 1a30db0846b18c8770feddae9c5305158d14ef18 | #!/usr/bin/env python3
import argparse
import json
import os
from patrace import (
InputFile,
OutputFile,
Call,
CreateInt32Value,
)
class Arg:
def __init__(self, type, name, value):
self.type = type
self.name = name
self.value = value
def get(self):
arg = self.type(self.value)
if self.name:
arg.mName = self.name
return arg
class Function:
def __init__(self, name, args):
self.name = name
self.args = args
def write(self, output, tid):
call = Call(self.name)
call.thread_id = tid
for arg in self.args[1:]:
call.args.push_back(arg.get())
call.return_value = self.args[0].get()
output.WriteCall(call)
class Remapper:
def __init__(self):
self.num_calls_remapped = 0
def run(self, input, output):
# Modify header, if we are remaping the default tid
header = json.loads(input.jsonHeader)
default_tid = header['defaultTid']
output.jsonHeader = json.dumps(header)
print('Searching for relevant calls...')
call_lists = {
'eglMakeCurrent': [],
'eglCreateContext': [],
'eglDestroyContext': [],
}
context_calls = []
highest_thread_id = -1
for call in input.Calls():
highest_thread_id = max(call.thread_id, highest_thread_id)
# call_list = call_lists.get(call.name, None)
if call.name in list(call_lists.keys()):
context_calls.append({
'name': call.name,
'tid': call.thread_id,
'params': call.GetArgumentsDict().copy(),
'retval': call.GetReturnValue(),
'number': call.number,
})
# if call_list is not None:
# call_list.append({
# 'call_name': call.name,
# 'tid': call.thread_id,
# 'params': call.GetArgumentsDict(),
# 'retval': call.GetReturnValue(),
# 'number': call.number,
# })
num_threads = highest_thread_id + 1
print("Renumbering context ids...")
# Sometimes, contexts can get the same pointer values
# Hence, the contexts pointers will not be unique. Therefor,
# we create an unique, sequential id.
context_sequential_id = 1
# Maps original context id with sequential context id.
contexts_idmap = {0: 0}
for call in context_calls:
if call['name'] == 'eglCreateContext':
contexts_idmap[call['retval']] = context_sequential_id
call['retval'] = context_sequential_id
context_sequential_id += 1
elif call['name'] == 'eglDestroyContext':
old_id = call['params']['ctx']
seq_id = contexts_idmap[old_id]
del contexts_idmap[old_id]
call['params']['ctx'] = seq_id
elif call['name'] == 'eglMakeCurrent':
# Change ctx parameter to our new sequential id
call['params']['ctx'] = contexts_idmap[call['params']['ctx']]
print("Finding relevant context and surfaces...")
make_current_args = [
(call['params']['draw'], call['params']['ctx'])
for call in context_calls
if (
call['name'] == 'eglMakeCurrent'
# Excluding the following test made things work for GunJack
# call['tid'] in [default_tid, 0]
)
]
import pprint
pprint.pprint(make_current_args)
surfaces = []
contexts = []
for draw, ctx in make_current_args:
if draw:
surfaces.append(draw)
if ctx:
contexts.append(ctx)
# Find all relevant shared contexts
shared_contexts = []
for context in contexts:
for context_call in context_calls:
if context_call['name'] != 'eglCreateContext':
continue
if context_call['retval'] == context:
shared_contexts.append(context_call['params']['share_context'])
for share_context in shared_contexts:
contexts.append(share_context)
contexts = set(contexts)
surfaces = set(surfaces)
print("Surfaces {}".format(surfaces))
print("Contexts: {}".format(contexts))
class Thread:
def __init__(self):
self.current_ctx_seq = 0
self.current_ctx_old = 0
self.remap = 0
threads = [Thread() for i in range(num_threads)]
# Used to indicate if inside a relevant "eglMakeCurrent-block"
print("Remap calls...")
contextid_to_use = None
contexts_idmap = {0: 0}
context_sequential_id = 1
active_thread = -1
for call in input.Calls():
current_thread = call.thread_id
thread_switch = False
if active_thread != current_thread:
thread_switch = True
active_thread = current_thread
if call.name == 'eglCreateContext':
oldid = call.GetReturnValue()
contexts_idmap[oldid] = context_sequential_id
if context_sequential_id in contexts:
contextid_to_use = oldid
print("We will map all calls of the context:", contextid_to_use)
self.remap(call, default_tid)
context_sequential_id += 1
elif call.name == 'eglDestroyContext':
ad = call.GetArgumentsDict()
oldid = ad['ctx']
# seqid = contexts_idmap[oldid]
del contexts_idmap[oldid]
elif (
call.name.startswith('eglCreateWindowSurface') or
call.name == 'eglCreatePbufferSurface'
):
if call.GetReturnValue() in surfaces:
self.remap(call, default_tid)
elif call.name == 'eglDestroySurface':
ad = call.GetArgumentsDict()
if ad['surface'] in surfaces:
self.remap(call, default_tid)
elif call.name == 'eglMakeCurrent':
t = threads[call.thread_id]
ad = call.GetArgumentsDict()
t.current_dpy = ad['dpy']
t.current_draw = ad['draw']
t.current_read = ad['read']
t.current_ctx_old = ad['ctx']
t.current_ctx_seq = contexts_idmap[ad['ctx']]
if t.current_ctx_seq in contexts:
# call.SetArgument(3, contextid_to_use)
t.remap = True
if ad['ctx'] == 0:
t.remap = False
if threads[call.thread_id].remap:
# If a context is already active on the default thread
# We need to inject an eglMakeCurrent the first time
if thread_switch and call.name != 'eglMakeCurrent':
t = threads[call.thread_id]
Function(
'eglMakeCurrent', [
Arg(CreateInt32Value, '', 1),
Arg(CreateInt32Value, 'dpy', t.current_dpy),
Arg(CreateInt32Value, 'draw', t.current_draw),
Arg(CreateInt32Value, 'read', t.current_read),
Arg(CreateInt32Value, 'ctx', t.current_ctx_old),
]
).write(output, default_tid)
self.remap(call, default_tid)
output.WriteCall(call)
def remap(self, call, newtid):
call.thread_id = newtid
self.num_calls_remapped += 1
def remap(oldfile, newfile):
remapper = Remapper()
if not os.path.exists(oldfile):
print("File does not exists: {}".format(oldfile))
return
with InputFile(oldfile) as input:
with OutputFile(newfile) as output:
remapper.run(input, output)
return remapper.num_calls_remapped
def main():
parser = argparse.ArgumentParser(description='Automatically remap thread ids in a .pat trace. This should be used if an eglContext is used by more threads than the default thread.')
parser.add_argument('oldfile', help='Path to the .pat trace file')
parser.add_argument('newfile', help='New .pat file to create')
args = parser.parse_args()
num = remap(args.oldfile, args.newfile)
print("Number of calls remapped {num}".format(num=num))
if __name__ == '__main__':
main()
|
py | 1a30de5d5fd395c60bbbcfe468f5f8bb2ea0c880 | #!/usr/bin/env python3
# md_lj_ll_module.py
#------------------------------------------------------------------------------------------------#
# This software was written in 2016/17 #
# by Michael P. Allen <[email protected]>/<[email protected]> #
# and Dominic J. Tildesley <[email protected]> ("the authors"), #
# to accompany the book "Computer Simulation of Liquids", second edition, 2017 ("the text"), #
# published by Oxford University Press ("the publishers"). #
# #
# LICENCE #
# Creative Commons CC0 Public Domain Dedication. #
# To the extent possible under law, the authors have dedicated all copyright and related #
# and neighboring rights to this software to the PUBLIC domain worldwide. #
# This software is distributed without any warranty. #
# You should have received a copy of the CC0 Public Domain Dedication along with this software. #
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #
# #
# DISCLAIMER #
# The authors and publishers make no warranties about the software, and disclaim liability #
# for all uses of the software, to the fullest extent permitted by applicable law. #
# The authors and publishers do not recommend use of this software for any purpose. #
# It is made freely available, solely to clarify points made in the text. When using or citing #
# the software, you should not imply endorsement by the authors or publishers. #
#------------------------------------------------------------------------------------------------#
"""Force routine for MD simulation, LJ atoms, using neighbour lists."""
fast = True # Change this to replace NumPy force evaluation with slower Python
class PotentialType:
"""A composite variable for interactions."""
def __init__(self, cut, pot, vir, lap, ovr):
self.cut = cut # the potential energy cut (but not shifted) at r_cut
self.pot = pot # the potential energy cut-and-shifted at r_cut
self.vir = vir # the virial
self.lap = lap # the Laplacian
self.ovr = ovr # a flag indicating overlap (i.e. pot too high to use)
def __add__(self, other):
cut = self.cut + other.cut
pot = self.pot + other.pot
vir = self.vir + other.vir
lap = self.lap + other.lap
ovr = self.ovr or other.ovr
return PotentialType(cut,pot,vir,lap,ovr)
def introduction():
"""Prints out introductory statements at start of run."""
print('Lennard-Jones potential')
print('Cut-and-shifted version for dynamics')
print('Cut (but not shifted) version also calculated')
print('Diameter, sigma = 1')
print('Well depth, epsilon = 1')
if fast:
print('Fast NumPy force routine')
else:
print('Slow Python force routine')
print('Uses neighbour lists')
def conclusion():
"""Prints out concluding statements at end of run."""
print('Program ends')
def force ( box, r_cut, r ):
"""Takes in box, cutoff range, and coordinate array, and calculates forces and potentials etc."""
import numpy as np
from itertools import product
import math
# It is assumed that positions are in units where box = 1
# Forces are calculated in units where sigma = 1 and epsilon = 1
# Uses neighbour lists
n = r.shape[0]
# Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice
# The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.
d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 1, 0], [-1, 1, 0],
[ 0, 1, 0], [ 0, 0, 1], [-1, 0, 1], [ 1, 0, 1], [-1,-1, 1],
[ 0,-1, 1], [ 1,-1, 1], [-1, 1, 1], [ 0, 1, 1], [ 1, 1, 1] ] )
r = r - np.rint(r) # Ensure all atoms in periodic box
sr2_ovr = 1.77 # Overlap threshold (pot > 100)
r_cut_box = r_cut / box
r_cut_box_sq = r_cut_box ** 2
box_sq = box ** 2
# Calculate potential at cutoff
sr2 = 1.0 / r_cut**2 # in sigma=1 units
sr6 = sr2 ** 3
sr12 = sr6 **2
pot_cut = sr12 - sr6 # Without numerical factor 4
# Initialize
f = np.zeros_like(r)
total = PotentialType ( cut=0.0, pot=0.0, vir=0.0, lap=0.0, ovr=False )
# Calculate cell index triplets
sc = math.floor(box/r_cut) # Number of cells along box edge
c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms
assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic "guard" against roundoff
if fast:
# Build list of arrays, each array holding positions of atoms in a cell
# At the same time, define a matching set of force arrays in each cell
# i and j number the atoms in each cell; we do not refer explicitly to indices in r
rc, fc = [], [] # Initially empty lists of positions and forces
for ci in product(range(sc),repeat=3): # Triple loop over cells
mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell
rc.append(r[mask,:]) # Copy atom coordinates into array, add to list
fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list
for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array
ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices
if rci.size==0: # Handle empty cell
continue
for dj in d: # Loop over neighbouring j-cells
cj = ci + dj # Compute neighbour j-cell triple-indices
cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index
rcj = rc[cj1] # Get atoms in j-cell as an array
if rcj.size==0: # Handle empty cell
continue
rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j
rij = rij - np.rint(rij) # PBCs in box=1 units
rij_sq = np.sum(rij**2,axis=2) # Squared separations
in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff
if ci1==cj1:
np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell
np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below
rij_sq = rij_sq * box_sq # Now in sigma=1 units
rij = rij * box # Now in sigma=1 units
sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range
ovr = sr2 > sr2_ovr # Overlap if too close
sr6 = sr2 ** 3
sr12 = sr6 ** 2
cut = sr12 - sr6 # LJ potential (cut but not shifted)
vir = cut + sr12 # LJ virial
pot = np.where ( in_range, cut-pot_cut, 0.0 ) # LJ potential (cut-and-shifted)
lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian
fij = vir * sr2 # LJ scalar part of forces
fij = rij * fij[:,:,np.newaxis] # LJ pair forces
if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell
fij = fij / 2
total = total + PotentialType ( cut=np.sum(cut)/2, pot=np.sum(pot)/2,
vir=np.sum(vir)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )
else:
total = total + PotentialType ( cut=np.sum(cut), pot=np.sum(pot),
vir=np.sum(vir), lap=np.sum(lap), ovr=np.any(ovr) )
fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell
fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell
# Copy forces from list of cell arrays to main force array
for ci in product(range(sc),repeat=3): # Triple loop over cells
mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell
ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index
f[mask,:] = fc[ci1] # Copy atom forces from correct cell
else:
# Build list of arrays, each array holding indices of atoms in a cell
# ki and kj are atom indices in the r array; i and j number the atoms in each cell
k_array = np.arange(n) # Atom indices 0..N-1
kc = [] # Initially empty list of indices
for ci in product(range(sc),repeat=3): # Triple loop over cells
mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell
kc.append(k_array[mask]) # Copy atom indices into array, add to list
for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array
ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices
for dj in d: # Loop over neighbouring j-cells
cj = ci + dj # Compute neighbour j-cell triple-indices
cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index
kcj = kc[cj1] # Get indices of atoms in j-cell as an array
for i, ki in enumerate(kci): # Loop over individual atoms in i-cell
j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell
if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case
continue # where j-cell==i-cell and i is last atom
for kj in kcj[j0:]: # Loop over individual atoms in j-cell
rij = r[ki,:]-r[kj,:] # Separation vector
rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units
rij_sq = np.sum(rij**2) # Squared separation
if rij_sq < r_cut_box_sq: # Check within cutoff
rij_sq = rij_sq * box_sq # Now in sigma=1 units
rij = rij * box # Now in sigma=1 units
sr2 = 1.0 / rij_sq # (sigma/rij)**2
ovr = sr2 > sr2_ovr # Overlap if too close
sr6 = sr2 ** 3
sr12 = sr6 ** 2
cut = sr12 - sr6 # LJ potential (cut but not shifted)
vir = cut + sr12 # LJ virial
pot = cut - pot_cut # LJ potential (cut-and-shifted)
lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian
fij = rij * vir * sr2 # LJ forces
total = total + PotentialType ( cut=cut, pot=pot, vir=vir, lap=lap, ovr=ovr )
f[ki,:] = f[ki,:] + fij
f[kj,:] = f[kj,:] - fij
# Multiply results by numerical factors
f = f * 24.0 # 24*epsilon
total.cut = total.cut * 4.0 # 4*epsilon
total.pot = total.pot * 4.0 # 4*epsilon
total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3
total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji
return total, f
def hessian ( box, r_cut, r, f ):
"""Calculates Hessian function (for 1/N correction to config temp)."""
import numpy as np
from itertools import product
import math
# This routine is only needed in a constant-energy ensemble
# It is assumed that positions are in units where box = 1
# but the result is given in units where sigma = 1 and epsilon = 1
# It is assumed that forces have already been calculated in array f
# Uses neighbour lists
n = r.shape[0]
assert np.all ( r.shape==f.shape ), 'Dimension mismatch in hessian'
# Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice
# The cells are chosen so that if (d1,d2,d3) appears, then (-d1,-d2,-d3) does not.
d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 1, 0], [-1, 1, 0],
[ 0, 1, 0], [ 0, 0, 1], [-1, 0, 1], [ 1, 0, 1], [-1,-1, 1],
[ 0,-1, 1], [ 1,-1, 1], [-1, 1, 1], [ 0, 1, 1], [ 1, 1, 1] ] )
r = r - np.rint(r) # Ensure all atoms in periodic box
r_cut_box = r_cut / box
r_cut_box_sq = r_cut_box ** 2
box_sq = box ** 2
hes = 0.0
# Calculate cell index triplets
sc = math.floor(box/r_cut) # Number of cells along box edge
c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms
assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic "guard" against roundoff
if fast:
# Build list of arrays, each array holding positions of atoms in a cell
# At the same time, build a matching set of force arrays in each cell
# i and j number the atoms in each cell; we do not refer explicitly to indices in r
rc, fc = [], [] # Initially empty lists of positions and forces
for ci in product(range(sc),repeat=3): # Triple loop over cells
mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell
rc.append(r[mask,:]) # Copy atom coordinates into array, add to list
fc.append(f[mask,:]) # Copy corresponding forces, add to list
for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array
ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices
fci = fc[ci1] # Get i-cell atom forces
if rci.size==0: # Handle empty cell
continue
for dj in d: # Loop over neighbouring j-cells
cj = ci + dj # Compute neighbour j-cell triple-indices
cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index
rcj = rc[cj1] # Get atoms in j-cell as an array
fcj = fc[cj1] # Get j-cell atom forces
if rcj.size==0: # Handle empty cell
continue
rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j
rij = rij - np.rint(rij) # PBCs in box=1 units
rij_sq = np.sum(rij**2,axis=2) # Squared separations
in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff
if ci1==cj1:
np.fill_diagonal(in_range,False) # Eliminate i=j when i-cell is j-cell
np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below
rij_sq = rij_sq * box_sq # Now in sigma=1 units
rij = rij * box # Now in sigma=1 units
fij = fci[:,np.newaxis,:]-fcj[np.newaxis,:,:] # Differences in forces for all i and j
ff = np.sum(fij*fij,axis=2)
rf = np.sum(rij*fij,axis=2)
sr2 = np.where ( in_range, 1.0 / rij_sq, 0.0 ) # Only where in range
sr6 = sr2 ** 3
sr8 = sr6 * sr2
sr10 = sr8 * sr2
v1 = 24.0 * ( 1.0 - 2.0 * sr6 ) * sr8
v2 = 96.0 * ( 7.0 * sr6 - 2.0 ) * sr10
if ci1==cj1: # Correct for double-counting ij and ji
hes = hes + np.sum(v1 * ff)/2 + np.sum(v2 * rf**2)/2
else:
hes = hes + np.sum(v1 * ff) + np.sum(v2 * rf**2)
else:
# Build list of arrays, each array holding indices of atoms in a cell
# ki and kj are atom indices in the r array; i and j number the atoms in each cell
k_array = np.arange(n) # Atom indices 0..N-1
kc = [] # Initially empty list of indices covering each cell
for ci in product(range(sc),repeat=3): # Triple loop over cells
mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell
kc.append(k_array[mask]) # Copy atom indices into array, add to list
for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array
ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices
for dj in d: # Loop over neighbouring j-cells
cj = ci + dj # Compute neighbour j-cell triple-indices
cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index
kcj = kc[cj1] # Get indices of atoms in j-cell as an array
for i, ki in enumerate(kci): # Loop over individual atoms in i-cell
j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell
if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case
continue # where j-cell==i-cell and i is last atom
for kj in kcj[j0:]: # Loop over individual atoms in j-cell
rij = r[ki,:]-r[kj,:] # Separation vector
rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units
rij_sq = np.sum(rij**2) # Squared separation
if rij_sq < r_cut_box_sq:
rij_sq = rij_sq * box_sq # Now in sigma=1 units
rij = rij * box # Now in sigma=1 units
fij = f[ki,:] - f[kj,:] # Difference in forces
ff = np.dot(fij,fij)
rf = np.dot(rij,fij)
sr2 = 1.0 / rij_sq
sr6 = sr2 ** 3
sr8 = sr6 * sr2
sr10 = sr8 * sr2
v1 = 24.0 * ( 1.0 - 2.0 * sr6 ) * sr8
v2 = 96.0 * ( 7.0 * sr6 - 2.0 ) * sr10
hes = hes + v1 * ff + v2 * rf**2
return hes
|
py | 1a30de7e64cb793b53cfa5d7f193eb6e863803db | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
class gemini(Exchange):
def describe(self):
return self.deep_extend(super(gemini, self).describe(), {
'id': 'gemini',
'name': 'Gemini',
'countries': ['US'],
# 600 requests a minute = 10 requests per second => 1000ms / 10 = 100ms between requests(private endpoints)
# 120 requests a minute = 2 requests per second =>( 1000ms / rateLimit ) / 2 = 5(public endpoints)
'rateLimit': 100,
'version': 'v1',
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createDepositAddress': True,
'createMarketOrder': None,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchBalance': True,
'fetchBidsAsks': None,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': None,
'fetchDepositAddress': None, # TODO
'fetchDepositAddressesByNetwork': True,
'fetchDeposits': None,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchIsolatedPositions': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': None,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': True,
'fetchWithdrawals': None,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27816857-ce7be644-6096-11e7-82d6-3c257263229c.jpg',
'api': {
'public': 'https://api.gemini.com',
'private': 'https://api.gemini.com',
'web': 'https://docs.gemini.com',
},
'www': 'https://gemini.com/',
'doc': [
'https://docs.gemini.com/rest-api',
'https://docs.sandbox.gemini.com',
],
'test': {
'public': 'https://api.sandbox.gemini.com',
'private': 'https://api.sandbox.gemini.com',
# use the True doc instead of the sandbox doc
# since they differ in parsing
# https://github.com/ccxt/ccxt/issues/7874
# https://github.com/ccxt/ccxt/issues/7894
'web': 'https://docs.gemini.com',
},
'fees': [
'https://gemini.com/api-fee-schedule',
'https://gemini.com/trading-fees',
'https://gemini.com/transfer-fees',
],
},
'api': {
'web': {
'get': [
'rest-api',
],
},
'public': {
'get': {
'v1/symbols': 5,
'v1/symbols/details/{symbol}': 5,
'v1/pubticker/{symbol}': 5,
'v2/ticker/{symbol}': 5,
'v2/candles/{symbol}/{timeframe}': 5,
'v1/trades/{symbol}': 5,
'v1/auction/{symbol}': 5,
'v1/auction/{symbol}/history': 5,
'v1/pricefeed': 5,
'v1/book/{symbol}': 5,
'v1/earn/rates': 5,
},
},
'private': {
'post': {
'v1/order/new': 1,
'v1/order/cancel': 1,
'v1/wrap/{symbol}': 1,
'v1/order/cancel/session': 1,
'v1/order/cancel/all': 1,
'v1/order/status': 1,
'v1/orders': 1,
'v1/mytrades': 1,
'v1/notionalvolume': 1,
'v1/tradevolume': 1,
'v1/clearing/new': 1,
'v1/clearing/status': 1,
'v1/clearing/cancel': 1,
'v1/clearing/confirm': 1,
'v1/balances': 1,
'v1/notionalbalances/{currency}': 1,
'v1/transfers': 1,
'v1/addresses/{network}': 1,
'v1/deposit/{network}/newAddress': 1,
'v1/deposit/{currency}/newAddress': 1,
'v1/withdraw/{currency}': 1,
'v1/account/transfer/{currency}': 1,
'v1/payments/addbank': 1,
'v1/payments/methods': 1,
'v1/payments/sen/withdraw': 1,
'v1/balances/earn': 1,
'v1/earn/interest': 1,
'v1/approvedAddresses/{network}/request': 1,
'v1/approvedAddresses/account/{network}': 1,
'v1/approvedAddresses/{network}/remove': 1,
'v1/account': 1,
'v1/account/create': 1,
'v1/account/list': 1,
'v1/heartbeat': 1,
},
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'taker': 0.0035,
'maker': 0.001,
},
},
'httpExceptions': {
'400': BadRequest, # Auction not open or paused, ineligible timing, market not open, or the request was malformed, in the case of a private API request, missing or malformed Gemini private API authentication headers
'403': PermissionDenied, # The API key is missing the role necessary to access self private API endpoint
'404': OrderNotFound, # Unknown API entry point or Order not found
'406': InsufficientFunds, # Insufficient Funds
'429': RateLimitExceeded, # Rate Limiting was applied
'500': ExchangeError, # The server encountered an error
'502': ExchangeNotAvailable, # Technical issues are preventing the request from being satisfied
'503': OnMaintenance, # The exchange is down for maintenance
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1hr',
'6h': '6hr',
'1d': '1day',
},
'exceptions': {
'exact': {
'AuctionNotOpen': BadRequest, # Failed to place an auction-only order because there is no current auction open for self symbol
'ClientOrderIdTooLong': BadRequest, # The Client Order ID must be under 100 characters
'ClientOrderIdMustBeString': BadRequest, # The Client Order ID must be a string
'ConflictingOptions': BadRequest, # New orders using a combination of order execution options are not supported
'EndpointMismatch': BadRequest, # The request was submitted to an endpoint different than the one in the payload
'EndpointNotFound': BadRequest, # No endpoint was specified
'IneligibleTiming': BadRequest, # Failed to place an auction order for the current auction on self symbol because the timing is not eligible, new orders may only be placed before the auction begins.
'InsufficientFunds': InsufficientFunds, # The order was rejected because of insufficient funds
'InvalidJson': BadRequest, # The JSON provided is invalid
'InvalidNonce': InvalidNonce, # The nonce was not greater than the previously used nonce, or was not present
'InvalidOrderType': InvalidOrder, # An unknown order type was provided
'InvalidPrice': InvalidOrder, # For new orders, the price was invalid
'InvalidQuantity': InvalidOrder, # A negative or otherwise invalid quantity was specified
'InvalidSide': InvalidOrder, # For new orders, and invalid side was specified
'InvalidSignature': AuthenticationError, # The signature did not match the expected signature
'InvalidSymbol': BadRequest, # An invalid symbol was specified
'InvalidTimestampInPayload': BadRequest, # The JSON payload contained a timestamp parameter with an unsupported value.
'Maintenance': OnMaintenance, # The system is down for maintenance
'MarketNotOpen': InvalidOrder, # The order was rejected because the market is not accepting new orders
'MissingApikeyHeader': AuthenticationError, # The X-GEMINI-APIKEY header was missing
'MissingOrderField': InvalidOrder, # A required order_id field was not specified
'MissingRole': AuthenticationError, # The API key used to access self endpoint does not have the required role assigned to it
'MissingPayloadHeader': AuthenticationError, # The X-GEMINI-PAYLOAD header was missing
'MissingSignatureHeader': AuthenticationError, # The X-GEMINI-SIGNATURE header was missing
'NoSSL': AuthenticationError, # You must use HTTPS to access the API
'OptionsMustBeArray': BadRequest, # The options parameter must be an array.
'OrderNotFound': OrderNotFound, # The order specified was not found
'RateLimit': RateLimitExceeded, # Requests were made too frequently. See Rate Limits below.
'System': ExchangeError, # We are experiencing technical issues
'UnsupportedOption': BadRequest, # This order execution option is not supported.
},
'broad': {
'The Gemini Exchange is currently undergoing maintenance.': OnMaintenance, # The Gemini Exchange is currently undergoing maintenance. Please check https://status.gemini.com/ for more information.
'We are investigating technical issues with the Gemini Exchange.': ExchangeNotAvailable, # We are investigating technical issues with the Gemini Exchange. Please check https://status.gemini.com/ for more information.
},
},
'options': {
'fetchMarketsMethod': 'fetch_markets_from_web',
'fetchTickerMethod': 'fetchTickerV1', # fetchTickerV1, fetchTickerV2, fetchTickerV1AndV2
'networkIds': {
'bitcoin': 'BTC',
'ethereum': 'ERC20',
'bitcoincash': 'BCH',
'litecoin': 'LTC',
'zcash': 'ZEC',
'filecoin': 'FIL',
'dogecoin': 'DOGE',
'tezos': 'XTZ',
},
'networks': {
'BTC': 'bitcoin',
'ERC20': 'ethereum',
'BCH': 'bitcoincash',
'LTC': 'litecoin',
'ZEC': 'zcash',
'FIL': 'filecoin',
'DOGE': 'dogecoin',
'XTZ': 'tezos',
},
},
})
def fetch_markets(self, params={}):
method = self.safe_value(self.options, 'fetchMarketsMethod', 'fetch_markets_from_api')
return getattr(self, method)(params)
def fetch_markets_from_web(self, params={}):
response = self.webGetRestApi(params)
sections = response.split('<h1 id="symbols-and-minimums">Symbols and minimums</h1>')
numSections = len(sections)
error = self.id + ' the ' + self.name + ' API doc HTML markup has changed, breaking the parser of order limits and precision info for ' + self.name + ' markets.'
if numSections != 2:
raise NotSupported(error)
tables = sections[1].split('tbody>')
numTables = len(tables)
if numTables < 2:
raise NotSupported(error)
rows = tables[1].split("\n<tr>\n") # eslint-disable-line quotes
numRows = len(rows)
if numRows < 2:
raise NotSupported(error)
result = []
# skip the first element(empty string)
for i in range(1, numRows):
row = rows[i]
cells = row.split("</td>\n") # eslint-disable-line quotes
numCells = len(cells)
if numCells < 5:
raise NotSupported(error)
# [
# '<td>btcusd', # currency
# '<td>0.00001 BTC(1e-5)', # min order size
# '<td>0.00000001 BTC(1e-8)', # tick size
# '<td>0.01 USD', # quote currency price increment
# '</tr>'
# ]
marketId = cells[0].replace('<td>', '')
# base = self.safe_currency_code(baseId)
minAmountString = cells[1].replace('<td>', '')
minAmountParts = minAmountString.split(' ')
minAmount = self.safe_number(minAmountParts, 0)
amountPrecisionString = cells[2].replace('<td>', '')
amountPrecisionParts = amountPrecisionString.split(' ')
idLength = len(marketId) - 0
startingIndex = idLength - 3
quoteId = marketId[startingIndex:idLength]
quote = self.safe_currency_code(quoteId)
pricePrecisionString = cells[3].replace('<td>', '')
pricePrecisionParts = pricePrecisionString.split(' ')
baseId = marketId.replace(quoteId, '')
base = self.safe_currency_code(baseId)
result.append({
'id': marketId,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': None,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(amountPrecisionParts, 0),
'price': self.safe_number(pricePrecisionParts, 0),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': row,
})
return result
def fetch_markets_from_api(self, params={}):
response = self.publicGetV1Symbols(params)
result = []
for i in range(0, len(response)):
marketId = response[i]
market = marketId
idLength = len(marketId) - 0
baseId = marketId[0:idLength - 3]
quoteId = marketId[idLength - 3:idLength]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
result.append({
'id': marketId,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': None,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'price': None,
'amount': None,
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['limit_bids'] = limit
request['limit_asks'] = limit
response = self.publicGetV1BookSymbol(self.extend(request, params))
return self.parse_order_book(response, symbol, None, 'bids', 'asks', 'price', 'amount')
def fetch_ticker_v1(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetV1PubtickerSymbol(self.extend(request, params))
#
# {
# "bid":"9117.95",
# "ask":"9117.96",
# "volume":{
# "BTC":"1615.46861748",
# "USD":"14727307.57545006088",
# "timestamp":1594982700000
# },
# "last":"9115.23"
# }
#
return self.parse_ticker(response, market)
def fetch_ticker_v2(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetV2TickerSymbol(self.extend(request, params))
#
# {
# "symbol":"BTCUSD",
# "open":"9080.58",
# "high":"9184.53",
# "low":"9063.56",
# "close":"9116.08",
# # Hourly prices descending for past 24 hours
# "changes":["9117.33","9105.69","9106.23","9120.35","9098.57","9114.53","9113.55","9128.01","9113.63","9133.49","9133.49","9137.75","9126.73","9103.91","9119.33","9123.04","9124.44","9117.57","9114.22","9102.33","9076.67","9074.72","9074.97","9092.05"],
# "bid":"9115.86",
# "ask":"9115.87"
# }
#
return self.parse_ticker(response, market)
def fetch_ticker_v1_and_v2(self, symbol, params={}):
tickerA = self.fetch_ticker_v1(symbol, params)
tickerB = self.fetch_ticker_v2(symbol, params)
return self.deep_extend(tickerA, {
'open': tickerB['open'],
'high': tickerB['high'],
'low': tickerB['low'],
'change': tickerB['change'],
'percentage': tickerB['percentage'],
'average': tickerB['average'],
'info': tickerB['info'],
})
def fetch_ticker(self, symbol, params={}):
method = self.safe_value(self.options, 'fetchTickerMethod', 'fetchTickerV1')
return getattr(self, method)(symbol, params)
def parse_ticker(self, ticker, market=None):
#
# fetchTickers
#
# {
# "pair": "BATUSD",
# "price": "0.20687",
# "percentChange24h": "0.0146"
# }
#
# fetchTickerV1
#
# {
# "bid":"9117.95",
# "ask":"9117.96",
# "volume":{
# "BTC":"1615.46861748",
# "USD":"14727307.57545006088",
# "timestamp":1594982700000
# },
# "last":"9115.23"
# }
#
# fetchTickerV2
#
# {
# "symbol":"BTCUSD",
# "open":"9080.58",
# "high":"9184.53",
# "low":"9063.56",
# "close":"9116.08",
# # Hourly prices descending for past 24 hours
# "changes":["9117.33","9105.69","9106.23","9120.35","9098.57","9114.53","9113.55","9128.01","9113.63","9133.49","9133.49","9137.75","9126.73","9103.91","9119.33","9123.04","9124.44","9117.57","9114.22","9102.33","9076.67","9074.72","9074.97","9092.05"],
# "bid":"9115.86",
# "ask":"9115.87"
# }
#
volume = self.safe_value(ticker, 'volume', {})
timestamp = self.safe_integer(volume, 'timestamp')
symbol = None
marketId = self.safe_string_lower(ticker, 'pair')
baseId = None
quoteId = None
base = None
quote = None
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
idLength = len(marketId) - 0
if idLength == 7:
baseId = marketId[0:4]
quoteId = marketId[4:7]
else:
baseId = marketId[0:3]
quoteId = marketId[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
baseId = market['baseId'].upper()
quoteId = market['quoteId'].upper()
base = market['base']
quote = market['quote']
price = self.safe_string(ticker, 'price')
last = self.safe_string_2(ticker, 'last', 'close', price)
percentage = self.safe_string(ticker, 'percentChange24h')
open = self.safe_string(ticker, 'open')
baseVolume = self.safe_string(volume, baseId)
quoteVolume = self.safe_string(volume, quoteId)
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetV1Pricefeed(params)
#
# [
# {
# "pair": "BATUSD",
# "price": "0.20687",
# "percentChange24h": "0.0146"
# },
# {
# "pair": "LINKETH",
# "price": "0.018",
# "percentChange24h": "0.0000"
# },
# ]
#
return self.parse_tickers(response, symbols)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "timestamp":1601617445,
# "timestampms":1601617445144,
# "tid":14122489752,
# "price":"0.46476",
# "amount":"28.407209",
# "exchange":"gemini",
# "type":"buy"
# }
#
# private fetchTrades
#
# {
# "price":"3900.00",
# "amount":"0.00996",
# "timestamp":1638891173,
# "timestampms":1638891173518,
# "type":"Sell",
# "aggressor":false,
# "fee_currency":"EUR",
# "fee_amount":"0.00",
# "tid":73621746145,
# "order_id":"73621746059",
# "exchange":"gemini",
# "is_auction_fill":false,
# "is_clearing_fill":false,
# "symbol":"ETHEUR",
# "client_order_id":"1638891171610"
# }
#
timestamp = self.safe_integer(trade, 'timestampms')
id = self.safe_string(trade, 'tid')
orderId = self.safe_string(trade, 'order_id')
feeCurrencyId = self.safe_string(trade, 'fee_currency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': self.safe_string(trade, 'fee_amount'),
'currency': feeCurrencyCode,
}
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
side = self.safe_string_lower(trade, 'type')
symbol = self.safe_symbol(None, market)
return self.safe_trade({
'id': id,
'order': orderId,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': None,
'price': priceString,
'cost': None,
'amount': amountString,
'fee': fee,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetV1TradesSymbol(self.extend(request, params))
#
# [
# {
# "timestamp":1601617445,
# "timestampms":1601617445144,
# "tid":14122489752,
# "price":"0.46476",
# "amount":"28.407209",
# "exchange":"gemini",
# "type":"buy"
# },
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_balance(self, response):
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['total'] = self.safe_string(balance, 'amount')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostV1Balances(params)
return self.parse_balance(response)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'timestampms')
amount = self.safe_string(order, 'original_amount')
remaining = self.safe_string(order, 'remaining_amount')
filled = self.safe_string(order, 'executed_amount')
status = 'closed'
if order['is_live']:
status = 'open'
if order['is_cancelled']:
status = 'canceled'
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'avg_execution_price')
type = self.safe_string(order, 'type')
if type == 'exchange limit':
type = 'limit'
elif type == 'market buy' or type == 'market sell':
type = 'market'
else:
type = order['type']
fee = None
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
id = self.safe_string(order, 'order_id')
side = self.safe_string_lower(order, 'side')
clientOrderId = self.safe_string(order, 'client_order_id')
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': average,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
'trades': None,
}, market)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'order_id': id,
}
response = self.privatePostV1OrderStatus(self.extend(request, params))
return self.parse_order(response)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privatePostV1Orders(params)
market = None
if symbol is not None:
market = self.market(symbol) # throws on non-existent symbol
return self.parse_orders(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
nonce = self.nonce()
amountString = self.amount_to_precision(symbol, amount)
priceString = self.price_to_precision(symbol, price)
request = {
'client_order_id': str(nonce),
'symbol': self.market_id(symbol),
'amount': amountString,
'price': priceString,
'side': side,
'type': 'exchange limit', # gemini allows limit orders only
}
response = self.privatePostV1OrderNew(self.extend(request, params))
return {
'info': response,
'id': response['order_id'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'order_id': id,
}
return self.privatePostV1OrderCancel(self.extend(request, params))
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit_trades'] = limit
if since is not None:
request['timestamp'] = int(since / 1000)
response = self.privatePostV1Mytrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
'address': address,
}
response = self.privatePostV1WithdrawCurrency(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'txHash'),
}
def nonce(self):
return self.milliseconds()
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
if limit is not None:
request['limit_transfers'] = limit
if since is not None:
request['timestamp'] = since
response = self.privatePostV1Transfers(self.extend(request, params))
return self.parse_transactions(response)
def parse_transaction(self, transaction, currency=None):
timestamp = self.safe_integer(transaction, 'timestampms')
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
address = self.safe_string(transaction, 'destination')
type = self.safe_string_lower(transaction, 'type')
status = 'pending'
# When deposits show as Advanced or Complete they are available for trading.
if transaction['status']:
status = 'ok'
fee = None
feeAmount = self.safe_number(transaction, 'feeAmount')
if feeAmount is not None:
fee = {
'cost': feeAmount,
'currency': code,
}
return {
'info': transaction,
'id': self.safe_string(transaction, 'eid'),
'txid': self.safe_string(transaction, 'txHash'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': None, # or is it defined?
'tagTo': None,
'tagFrom': None,
'type': type, # direction of the transaction,('deposit' | 'withdraw')
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: "0xed6494Fe7c1E56d1bd6136e89268C51E32d9708B",
# timestamp: "1636813923098",
# addressVersion: "eV1" }
# }
#
address = self.safe_string(depositAddress, 'address')
return {
'currency': currency,
'network': None,
'address': address,
'tag': None,
'info': depositAddress,
}
def fetch_deposit_addresses_by_network(self, code, params={}):
self.load_markets()
network = self.safe_string(params, 'network')
if network is None:
raise ArgumentsRequired(self.id + 'fetchDepositAddressesByNetwork() requires a network parameter')
params = self.omit(params, 'network')
networks = self.safe_value(self.options, 'networks', {})
networkId = self.safe_string(networks, network, network)
networkIds = self.safe_value(self.options, 'networkIds', {})
networkCode = self.safe_string(networkIds, networkId, network)
request = {
'network': networkId,
}
response = self.privatePostV1AddressesNetwork(self.extend(request, params))
results = self.parse_deposit_addresses(response, [code], False, {'network': networkCode, 'currency': code})
return self.group_by(results, 'network')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
apiKey = self.apiKey
if apiKey.find('account') < 0:
raise AuthenticationError(self.id + ' sign() requires an account-key, master-keys are not-supported')
nonce = self.nonce()
request = self.extend({
'request': url,
'nonce': nonce,
}, query)
payload = self.json(request)
payload = self.string_to_base64(payload)
signature = self.hmac(payload, self.encode(self.secret), hashlib.sha384)
headers = {
'Content-Type': 'text/plain',
'X-GEMINI-APIKEY': self.apiKey,
'X-GEMINI-PAYLOAD': self.decode(payload),
'X-GEMINI-SIGNATURE': signature,
}
else:
if query:
url += '?' + self.urlencode(query)
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
if isinstance(body, basestring):
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
return # fallback to default error handler
#
# {
# "result": "error",
# "reason": "BadNonce",
# "message": "Out-of-sequence nonce <1234> precedes previously used nonce <2345>"
# }
#
result = self.safe_string(response, 'result')
if result == 'error':
reason = self.safe_string(response, 'reason')
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + message
self.throw_exactly_matched_exception(self.exceptions['exact'], reason, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
def create_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privatePostV1DepositCurrencyNewAddress(self.extend(request, params))
address = self.safe_string(response, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'timeframe': self.timeframes[timeframe],
'symbol': market['id'],
}
response = self.publicGetV2CandlesSymbolTimeframe(self.extend(request, params))
#
# [
# [1591515000000,0.02509,0.02509,0.02509,0.02509,0],
# [1591514700000,0.02503,0.02509,0.02503,0.02509,44.6405],
# [1591514400000,0.02503,0.02503,0.02503,0.02503,0],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
|
py | 1a30de7f68d314dd049d90fb37b192f967c53371 | import sympy as sym
# Computing with Dirichlet conditions: -u''=2 and sines
x, L = sym.symbols('x L')
e_Galerkin = x*(L-x) - 8*L**2*sym.pi**(-3)*sym.sin(sym.pi*x/L)
e_colloc = x*(L-x) - 2*L**2*sym.pi**(-2)*sym.sin(sym.pi*x/L)
# Verify max error for x=L/2
dedx_Galerkin = sym.diff(e_Galerkin, x)
print((dedx_Galerkin.subs(x, L/2)))
dedx_colloc = sym.diff(e_colloc, x)
print((dedx_colloc.subs(x, L/2)))
# Compute max error: x=L/2, evaluate numerical, and simplify
print(('Max error Galerkin/least.sq.:', \
sym.simplify(e_Galerkin.subs(x, L/2).evalf(n=3))))
print(('Max error colloc.:', \
sym.simplify(e_colloc.subs(x, L/2).evalf(n=3))))
import sys
#sys.exit(0)
# Computing with Neumann and Dirichlet conditions: -u''=2
x, C, D = sym.symbols('x C D')
i, j = sym.symbols('i j', integer=True)
integrand = (i+1)*(j+1)*(1-x)**(i+j)
A_ij = sym.integrate(integrand, (x, 0, 1))
A_ij = sym.simplify(A_ij)
print(A_ij)
psi_i = (1-x)**(i+1)
integrand = 2*psi_i - D*(i+1)*(1-x)**i
b_i = sym.integrate(integrand, (x, 0, 1)) - C*psi_i.subs(x, 0)
b_i = sym.factor(sym.simplify(b_i))
print(b_i)
print((sym.expand(2 - (2+i)*(D+C))))
# Solving model2 problem with f(x) and fe1D.py
from u_xx_f_sympy import model2, x, C, D, L
m = 2
u = model2(x**m, L, C, D)
print(u)
#u_exact = lambda x: D + C*(x-L) + (1./6)*(L**3 - x**3)
u_exact = sym.lambdify([x, C, D, L], u)
import numpy as np
from fe1D import finite_element1D_naive, mesh_uniform
# Override C, D and L with numeric values
C = 5
D = 2
L = 4
d = 1
vertices, cells, dof_map = mesh_uniform(
N_e=2, d=d, Omega=[0,L], symbolic=False)
vertices[1] = 3
essbc = {}
essbc[dof_map[-1][-1]] = D
c, A, b, timing = finite_element1D_naive(
vertices, cells, dof_map,
essbc,
ilhs=lambda e, phi, r, s, X, x, h:
phi[1][r](X, h)*phi[1][s](X, h),
irhs=lambda e, phi, r, X, x, h:
x**m*phi[0][r](X),
blhs=lambda e, phi, r, s, X, x, h: 0,
brhs=lambda e, phi, r, X, x, h:
-C*phi[0][r](-1) if e == 0 else 0,
intrule='GaussLegendre',
verbose=False,
)
# Visualize
from fe1D import u_glob
x, u, nodes = u_glob(c, cells, vertices, dof_map)
u_e = u_exact(x, C, D, L)
print((u_exact(nodes, C, D, L) - c)) # difference at the nodes
import matplotlib.pyplot as plt
plt.plot(x, u, 'b-', x, u_e, 'r--')
plt.legend(['finite elements, d=%d' %d, 'exact'], loc='upper left')
plt.savefig('tmp.png'); plt.savefig('tmp.pdf')
plt.show()
|
py | 1a30def6c9ec654c39777ee271e1182c32e9da01 | # -*- coding: utf-8 -*-
"""
This module exports functions to initialize the Flask application.
"""
import random
from typing import Callable, Dict
import flask
import flask_babel
import orchard.errors
import orchard.extensions
import orchard.system_status
def create_app(config: str = 'Development') -> flask.Flask:
"""
Create and initialize the Flask application.
:param config: The name of the configuration class, valid values are ``Development``
(default), ``Production``, and ``Testing``.
:return: The initialized Flask application.
"""
configuration_values = {'Development', 'Production', 'Testing'}
if config in configuration_values:
config = 'orchard.configuration.{config}'.format(config = config)
else: # pragma: no cover.
config = 'orchard.configuration.Development'
name = __name__.split('.')[0]
app = flask.Flask(name, instance_relative_config = True)
app.config.from_object(config)
app.config.from_object('instance.Configuration')
# Always use English as default language during testing.
if app.testing: # pragma: no branch.
app.config['BABEL_DEFAULT_LOCALE'] = 'en'
_configure_blueprints(app)
_configure_context_processor(app)
_configure_extensions(app)
_configure_logging(app)
_configure_request_handlers(app)
return app
def _configure_blueprints(app: flask.Flask):
"""
Register the blueprints.
:param app: The application instance.
"""
app.register_blueprint(orchard.errors.blueprint)
app.register_blueprint(orchard.system_status.blueprint)
def _configure_context_processor(app: flask.Flask):
"""
Set up the global context processors.
:param app: The application instance.
"""
@app.context_processor
def inject_jinja2() -> Dict[str, Callable]:
"""
Inject more functions into the scope of Jinja2 templates.
:return: A dictionary
"""
jinja2_functions = {
'hasattr': hasattr,
'random_int': random.randint
}
return jinja2_functions
def _configure_extensions(app: flask.Flask):
"""
Register the extensions with the app and configure them as needed.
:param app: The application instance.
"""
orchard.extensions.babel.init_app(app)
orchard.extensions.cache.init_app(app)
def _configure_logging(app: flask.Flask): # pragma: no cover.
"""
Set up a file and a mail logger, unless the app is being debugged or tested.
:param app: The application instance.
"""
if app.debug or app.testing:
return
# noinspection PyUnresolvedReferences
import logging
import logging.handlers
import os
# Set up the file logger.
log_path = app.config['LOG_PATH']
if not os.path.isdir(log_path):
os.makedirs(log_path)
log_file = os.path.join(log_path, '{file_name}.log'.format(file_name = app.name))
log_format = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
file_handler = logging.handlers.RotatingFileHandler(log_file, 'a', 1 * 1024 * 1024, 10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(log_format))
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('{name} Startup'.format(name = app.config['PROJECT_NAME']))
# Set up the mail logger.
if app.config.get('MAIL_SERVER', '') == '':
return
credentials = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
server = (app.config['MAIL_SERVER'], app.config['MAIL_PORT'])
sender = app.config['MAIL_FROM']
receivers = app.config['ADMINS']
subject = '{name} Failure'.format(name = app.config['PROJECT_NAME'])
secure = None
if app.config['MAIL_SSL']:
secure = ()
mail_handler = logging.handlers.SMTPHandler(server, sender, receivers, subject, credentials,
secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
def _configure_request_handlers(app: flask.Flask):
"""
Set up the global before and after request handlers.
:param app: The application instance.
"""
@app.before_request
def before_request():
"""
Set up a few things before handling the actual request.
"""
flask.g.locale = flask_babel.get_locale()
flask.g.project_name = app.config['PROJECT_NAME']
# Set a default title.
flask.g.title = app.config['PROJECT_NAME']
@app.after_request
def after_request(response: flask.Response) -> flask.Response:
"""
Modify the response after the request has been handled.
:return: The modified response.
"""
# http://www.gnuterrypratchett.com/
response.headers.add("X-Clacks-Overhead", "GNU Terry Pratchett")
return response
|
py | 1a30df187b06ad1ada7e013edba49b48570bd05e | import functools
import re
from typing import Any, Dict, Optional, Tuple, Union
from urllib.parse import urlsplit
from django.apps import apps
from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest, JsonResponse
from django.utils import timezone
from rest_framework import authentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.request import Request
class PersonalAPIKeyAuthentication(authentication.BaseAuthentication):
"""A way of authenticating with personal API keys.
Only the first key candidate found in the request is tried, and the order is:
1. Request Authorization header of type Bearer.
2. Request body.
3. Request query string.
"""
keyword = "Bearer"
@classmethod
def find_key_with_source(
cls,
request: Union[HttpRequest, Request],
request_data: Optional[Dict[str, Any]] = None,
extra_data: Optional[Dict[str, Any]] = None,
) -> Optional[Tuple[str, str]]:
"""Try to find personal API key in request and return it along with where it was found."""
if "HTTP_AUTHORIZATION" in request.META:
authorization_match = re.match(fr"^{cls.keyword}\s+(\S.+)$", request.META["HTTP_AUTHORIZATION"])
if authorization_match:
return authorization_match.group(1).strip(), "Authorization header"
data = request.data if request_data is None and isinstance(request, Request) else request_data
if data and "personal_api_key" in data:
return data["personal_api_key"], "body"
if "personal_api_key" in request.GET:
return request.GET["personal_api_key"], "query string"
if extra_data and "personal_api_key" in extra_data:
# compatibility with /capture endpoint
return extra_data["personal_api_key"], "query string data"
return None
@classmethod
def find_key(
cls,
request: Union[HttpRequest, Request],
request_data: Optional[Dict[str, Any]] = None,
extra_data: Optional[Dict[str, Any]] = None,
) -> Optional[str]:
"""Try to find personal API key in request and return it."""
key_with_source = cls.find_key_with_source(request, request_data, extra_data)
return key_with_source[0] if key_with_source is not None else None
@classmethod
def authenticate(cls, request: Union[HttpRequest, Request]) -> Optional[Tuple[Any, None]]:
personal_api_key_with_source = cls.find_key_with_source(request)
if not personal_api_key_with_source:
return None
personal_api_key, source = personal_api_key_with_source
PersonalAPIKey = apps.get_model(app_label="posthog", model_name="PersonalAPIKey")
try:
personal_api_key_object = (
PersonalAPIKey.objects.select_related("user").filter(user__is_active=True).get(value=personal_api_key)
)
except PersonalAPIKey.DoesNotExist:
raise AuthenticationFailed(detail=f"Personal API key found in request {source} is invalid.")
personal_api_key_object.last_used_at = timezone.now()
personal_api_key_object.save()
assert personal_api_key_object.user is not None
return personal_api_key_object.user, None
@classmethod
def authenticate_header(cls, request) -> str:
return cls.keyword
class TemporaryTokenAuthentication(authentication.BaseAuthentication):
def authenticate(self, request: Request):
# if the Origin is different, the only authentication method should be temporary_token
# This happens when someone is trying to create actions from the editor on their own website
if (
request.headers.get("Origin")
and urlsplit(request.headers["Origin"]).netloc not in urlsplit(request.build_absolute_uri("/")).netloc
):
if not request.GET.get("temporary_token"):
raise AuthenticationFailed(
detail="No temporary_token set. "
+ "That means you're either trying to access this API from a different site, "
+ "or it means your proxy isn't sending the correct headers. "
+ "See https://posthog.com/docs/deployment/running-behind-proxy for more information."
)
if request.GET.get("temporary_token"):
User = apps.get_model(app_label="posthog", model_name="User")
user = User.objects.filter(temporary_token=request.GET.get("temporary_token"))
if not user.exists():
raise AuthenticationFailed(detail="User doesn't exist")
return (user.first(), None)
return None
class PublicTokenAuthentication(authentication.BaseAuthentication):
def authenticate(self, request: Request):
if request.GET.get("share_token") and request.parser_context and request.parser_context.get("kwargs"):
Dashboard = apps.get_model(app_label="posthog", model_name="Dashboard")
dashboard = Dashboard.objects.filter(
share_token=request.GET.get("share_token"), pk=request.parser_context["kwargs"].get("pk"),
).first()
if dashboard is None:
raise AuthenticationFailed(detail="Dashboard doesn't exist")
if dashboard.team.organization.for_internal_metrics:
return None
return (AnonymousUser(), None)
return None
def authenticate_secondarily(endpoint):
"""
DEPRECATED: Used for supporting legacy endpoints not on DRF.
Authentication for function views.
"""
@functools.wraps(endpoint)
def wrapper(request: HttpRequest):
if not request.user.is_authenticated:
try:
auth_result = PersonalAPIKeyAuthentication.authenticate(request)
if isinstance(auth_result, tuple) and auth_result[0].__class__.__name__ == "User":
request.user = auth_result[0]
else:
raise AuthenticationFailed("Authentication credentials were not provided.")
except AuthenticationFailed as e:
return JsonResponse({"detail": e.detail}, status=401)
return endpoint(request)
return wrapper
|
py | 1a30e0a3525f29b83a01f77bce16e1f125b7a3b9 | import attr
import logging
import os
from datetime import datetime
from feedparser import parse as parse_feed
from typing import List, Optional
from telegram_rss.config import FeedConfig
from telegram_rss.utils import save_as, get_default_directory, load_dict
from . import Entry, Channel, Feed
class FeedUpdater:
def __init__(self, feed_config: FeedConfig, ext: str = ".json"):
self.feed_config = feed_config
self._feed: Optional[Feed] = None
self._local_feed: Optional[Feed] = None
self.local_file = os.path.join(
get_default_directory(),
"data",
f"{self.feed_config.name}" + ext,
)
self.logger = logging.getLogger(feed_config.name)
def __call__(self, save: bool = True) -> List[Entry]:
return self.get_new_entries(save=save)
def get_new_entries(self, save: bool = True) -> List[Entry]:
entries: List[Entry] = list()
if not self.feed or self.feed == self.local_feed:
self.logger.info("No new feeds found")
return entries
for feed in self.feed:
if feed in entries:
continue
if feed not in self.local_feed:
entries.append(feed)
if not entries:
self.logger.debug("All feeds aleady in local_feeds")
return entries
if self.feed_config.only_today:
self.logger.debug("Filter feeds published only today")
now = datetime.now()
for i in range(len(entries)):
entry = entries[i]
if not entry.time:
continue
if entry.time.date() == now.date():
continue
else:
del entries[i]
self.logger.info(f"Found new {len(entries)} feeds")
if entries and save:
self.save_feed(self.feed)
self.logger.debug(f"Saved {len(entries)} as {self.local_file}")
return entries
@property
def feed(self) -> Feed:
if self._feed:
return self._feed
if self.feed_config.save_bandwith:
raw_feed = parse_feed(
self.feed_config.source,
etag=self.feed_config.etag,
modified=self.feed_config.modified,
)
else:
raw_feed = parse_feed(self.feed_config.source)
if raw_feed.status == 304:
return Feed()
self.feed_config.etag = raw_feed.etag
self.feed_config.modified = raw_feed.modified
self._feed = Feed.from_feedparser(raw_feed)
return self._feed
@property
def local_feed(self) -> Feed:
if self._local_feed:
return self._local_feed
if not os.path.isfile(self.local_file):
return Feed()
feed_data = load_dict(self.local_file)
self._local_feed = Feed(**feed_data)
return self._local_feed
def save_feed(self, feed: Feed):
feed_data = attr.asdict(feed, recurse=True)
save_as(feed_data, self.local_file)
@property
def channel(self) -> Optional[Channel]:
return self.feed.channel or self.local_feed.channel
|
py | 1a30e27f6752935ad2570aabd89899bbd08e2c0c | #Imports library
import socket
#Creates instance of 'Socket'
s = socket.socket()
hostname = 'tutorialspi' #Server IP/Hostname
port = 8000 #Server Port
s.connect((hostname,port)) #Connects to server
while True:
x = raw_input("Enter message: ") #Gets the message to be sent
s.send(x.encode()) #Encodes and sends message (x)
|
py | 1a30e28713306fca74d538ca5d38151d109a7df6 | from cs50 import get_string
import re
def letters_counter(t, a):
c = 0
for i in t:
if i in a or i in [j.upper() for j in a]:
c += 1
return c
def words_counter(t):
match = re.split(" ", t)
return len(match)
def sentences_counter(t):
match = re.split("[.!?]", t)
return len(match) - 1
def calculate(lc, wc, sc):
l = (lc / wc) * 100
s = (sc / wc) * 100
index = 0.0588 * l - 0.296 * s - 15.8
return round(index)
alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
text = get_string("Text: ")
letter_count = letters_counter(text, alphabet)
word_count = words_counter(text)
sentece_count = sentences_counter(text)
calc = calculate(letter_count, word_count, sentece_count)
if calc < 1:
print("Before Grade 1")
elif calc >= 16:
print("Grade 16+")
else:
print(f"Grade {calc}") |
py | 1a30e2c0feb5cde5e43ac6cc54319171ee726324 | import pytest
from .common import JSON, Cookies, Headers, Query, Resp, get_paths
from .test_plugin_falcon import api as falcon_api
from .test_plugin_flask import api as flask_api
from .test_plugin_flask_blueprint import api as flask_bp_api
from .test_plugin_flask_view import api as flask_view_api
from .test_plugin_starlette import api as starlette_api
@pytest.mark.parametrize(
"api", [flask_api, flask_bp_api, flask_view_api, falcon_api, starlette_api]
)
def test_plugin_spec(api):
models = {
m.__name__: m.schema(ref_template="#/components/schemas/{model}")
for m in (Query, JSON, Resp, Cookies, Headers)
}
for name, schema in models.items():
assert api.spec["components"]["schemas"][name] == schema
assert api.spec["tags"] == [{"name": tag} for tag in ("test", "health", "api")]
assert get_paths(api.spec) == [
"/api/user/{name}",
"/api/user_annotated/{name}",
"/ping",
]
ping = api.spec["paths"]["/ping"]["get"]
assert ping["tags"] == ["test", "health"]
assert ping["parameters"][0]["in"] == "header"
assert ping["summary"] == "summary"
assert ping["description"] == "description"
assert ping["operationId"] == "get_/ping"
user = api.spec["paths"]["/api/user/{name}"]["post"]
assert user["tags"] == ["api", "test"]
assert (
user["requestBody"]["content"]["application/json"]["schema"]["$ref"]
== "#/components/schemas/JSON"
)
assert len(user["responses"]) == 3
params = user["parameters"]
for param in params:
if param["in"] == "path":
assert param["name"] == "name"
elif param["in"] == "query":
assert param["name"] == "order"
|
py | 1a30e2c66974a7d59914df556af92bb1590f9655 | from sqlalchemy.orm import Session
from fastapi import Depends, Security
from starlette.requests import Request
from app.core.jwt import validate_token, reusable_oauth2
from app.api.utils.db import get_db
def get_current_user(request: Request, token: str = Security(reusable_oauth2)):
print(token)
return request.state.user
def get_validated_current_user(db: Session = Depends(get_db), token: str = Security(reusable_oauth2)): # yapf: disable
return validate_token(db, token)
|
py | 1a30e3f815910632b27d740eb900bc201b72cffd | from os.path import dirname, basename, isfile, join
import glob
modules = glob.glob(join(dirname(__file__), '*.py'))
__all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')] |
py | 1a30e4e56a5cb708fa316d595117561f9149bfae | import os
import argparse
import re
import utils
def aws_refresh_report(manifest, fname):
"""
Generate a aws refresh report by looking into the log file
generated during aws refresh script running. The output is a report
containing the number of files is copied, total amount in GB was copied
Args:
manifest(tsv): GDC manifest (active or legacy)
fname(str): the log file of running aws refresh script
"""
try:
with open(fname) as f:
content = f.readlines()
except IOError as e:
print(e)
os._exit(1)
lines = [x.strip() for x in content]
total_copying_files = 0
total_data = 0
awscli_copied_objects = set()
awscli_copied_data = 0
streaming_copied_objects = set()
streaming_copied_data = 0
for line in lines:
pattern = "Total files need to be replicated: (.*)$"
m = re.search(pattern, line)
if m:
total_copying_files = max(total_copying_files, int(m.group(1)))
pattern = ".*aws s3 mv s3://.*/(.{36})/.*"
m = re.search(pattern, line)
if m:
awscli_copied_objects.add(m.group(1))
pattern = ".*aws s3 cp s3://gdcbackup/(.{36})/.*"
m = re.search(pattern, line)
if m:
awscli_copied_objects.add(m.group(1))
pattern = "successfully stream file (.{36})/.*"
m = re.search(pattern, line)
if m:
streaming_copied_objects.add(m.group(1))
files, headers = utils.get_fileinfo_list_from_csv_manifest(manifest)
file_dict = {}
for fi in files:
file_dict[fi["id"]] = fi
manifest_copied_files = 0
for uuid in awscli_copied_objects:
if uuid in file_dict:
manifest_copied_files +=1
awscli_copied_data += file_dict[uuid]["size"]*1.0/1024/1024/1024
for uuid in streaming_copied_objects:
if uuid in file_dict:
manifest_copied_files +=1
streaming_copied_data += file_dict[uuid]["size"]*1.0/1024/1024/1024
report = """
Number of files need to be copied {}. Total {} (GiB)
Number of files were copied successfully via aws cli {}. Total {}(GiB)
Number of files were copied successfully via gdc api {}. Total {}(GiB)
""".format(
total_copying_files,
total_data,
len(awscli_copied_objects),
awscli_copied_data,
len(streaming_copied_objects),
streaming_copied_data
)
print(report)
copied_files = []
for uuid in awscli_copied_objects.union(streaming_copied_objects):
if uuid in file_dict:
copied_files.append(file_dict[uuid])
print("Saving list of copied files")
utils.write_csv(manifest.split("/")[-1][:-4] + "_aws_copied.tsv", copied_files, fieldnames=headers)
return report
def aws_refresh_validate(fname):
"""
Validate the aws data refresh by looking into the log after validation
script finished.
"""
try:
with open(fname) as f:
content = f.readlines()
except IOError as e:
print(e)
print("Please run the dcf validation job first")
os._exit(1)
lines = [x.strip() for x in content]
for line in lines:
if "TOTAL AWS COPY FAILURE CASES" in line:
print(line)
return False
return True
def parse_arguments():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title="action", dest="action")
aws_refresh_cmd = subparsers.add_parser("aws_refresh_report")
aws_refresh_cmd.add_argument("--manifest", required=True)
aws_refresh_cmd.add_argument("--log_file", required=True)
aws_validate_cmd = subparsers.add_parser("aws_refresh_validate")
aws_validate_cmd.add_argument("--manifest", required=True)
aws_validate_cmd.add_argument("--log_file", required=True)
return parser.parse_args()
def main():
args = parse_arguments()
fname = args.log_file
manifest = args.manifest
if args.action == "aws_refresh_report":
aws_refresh_report(manifest, fname)
elif args.action == "aws_refresh_validate":
if aws_refresh_validate(fname):
print("All the files in the manifest have been copied to aws dcf buckets")
else:
print("The manifest validation fails")
if __name__ == "__main__":
main()
|
py | 1a30e56323ca7c988e51a1356d3ea28a9c3f27a9 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphcalendarGroup(Model):
"""MicrosoftgraphcalendarGroup.
:param id:
:type id: str
:param name:
:type name: str
:param class_id:
:type class_id: str
:param change_key:
:type change_key: str
:param calendars:
:type calendars: list[~users.models.Microsoftgraphcalendar]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'class_id': {'key': 'classId', 'type': 'str'},
'change_key': {'key': 'changeKey', 'type': 'str'},
'calendars': {'key': 'calendars', 'type': '[Microsoftgraphcalendar]'},
}
def __init__(self, id=None, name=None, class_id=None, change_key=None, calendars=None):
super(MicrosoftgraphcalendarGroup, self).__init__()
self.id = id
self.name = name
self.class_id = class_id
self.change_key = change_key
self.calendars = calendars
|
py | 1a30e57001c8cc5aa33737bd6d5005b00ce4a8d5 | from collections import OrderedDict
from collections.abc import Iterable
from cached_property import cached_property
import numpy as np
import sympy
from devito.finite_differences.finite_difference import (generic_derivative,
first_derivative,
cross_derivative)
from devito.finite_differences.differentiable import Differentiable
from devito.finite_differences.tools import direct, transpose
from devito.tools import as_mapper, as_tuple, filter_ordered, frozendict
from devito.types.array import Array
from devito.types.dimension import StencilDimension
from devito.types.utils import DimensionTuple
__all__ = ['Derivative', 'Weights']
class Derivative(sympy.Derivative, Differentiable):
"""
An unevaluated Derivative, which carries metadata (Dimensions,
derivative order, etc) describing how the derivative will be expanded
upon evaluation.
Parameters
----------
expr : expr-like
Expression for which the Derivative is produced.
dims : Dimension or tuple of Dimension
Dimenions w.r.t. which to differentiate.
fd_order : int or tuple of int, optional
Coefficient discretization order. Note: this impacts the width of
the resulting stencil. Defaults to 1.
deriv_order: int or tuple of int, optional
Derivative order. Defaults to 1.
side : Side or tuple of Side, optional
Side of the finite difference location, centered (at x), left (at x - 1)
or right (at x +1). Defaults to ``centered``.
transpose : Transpose, optional
Forward (matvec=direct) or transpose (matvec=transpose) mode of the
finite difference. Defaults to ``direct``.
subs : dict, optional
Substitutions to apply to the finite-difference expression after evaluation.
x0 : dict, optional
Origin (where the finite-difference is evaluated at) for the finite-difference
scheme, e.g. {x: x, y: y + h_y/2}.
Examples
--------
Creation
>>> from devito import Function, Derivative, Grid
>>> grid = Grid((10, 10))
>>> x, y = grid.dimensions
>>> u = Function(name="u", grid=grid, space_order=2)
>>> Derivative(u, x)
Derivative(u(x, y), x)
This can also be obtained via the differential shortcut
>>> u.dx
Derivative(u(x, y), x)
You can also specify the order as a keyword argument
>>> Derivative(u, x, deriv_order=2)
Derivative(u(x, y), (x, 2))
Or as a tuple
>>> Derivative(u, (x, 2))
Derivative(u(x, y), (x, 2))
Once again, this can be obtained via shortcut notation
>>> u.dx2
Derivative(u(x, y), (x, 2))
Derivative object are also callable to change default setup:
>>> u.dx2(x0=x + x.spacing)
Derivative(u(x, y), (x, 2))
will create the second derivative at x=x + x.spacing. Accepted arguments for dynamic
evaluation are `x0`, `fd_order` and `side`.
"""
_state = ('expr', 'dims', 'side', 'fd_order', 'transpose', '_ppsubs', 'x0')
_fd_priority = 3
def __new__(cls, expr, *dims, **kwargs):
if type(expr) == sympy.Derivative:
raise ValueError("Cannot nest sympy.Derivative with devito.Derivative")
if not isinstance(expr, Differentiable):
raise ValueError("`expr` must be a Differentiable object")
new_dims, orders, fd_o, var_count = cls._process_kwargs(expr, *dims, **kwargs)
# Construct the actual Derivative object
obj = Differentiable.__new__(cls, expr, *var_count)
obj._dims = tuple(OrderedDict.fromkeys(new_dims))
skip = kwargs.get('preprocessed', False) or obj.ndims == 1
obj._fd_order = fd_o if skip else DimensionTuple(*fd_o, getters=obj._dims)
obj._deriv_order = orders if skip else DimensionTuple(*orders, getters=obj._dims)
obj._side = kwargs.get("side")
obj._transpose = kwargs.get("transpose", direct)
obj._ppsubs = as_tuple(frozendict(i) for i in kwargs.get("subs", []))
obj._x0 = frozendict(kwargs.get('x0', {}))
return obj
@classmethod
def _process_kwargs(cls, expr, *dims, **kwargs):
"""
Process arguments for the construction of a Derivative
"""
# Skip costly processing if constructiong from preprocessed
if kwargs.get('preprocessed', False):
fd_orders = kwargs.get('fd_order')
deriv_orders = kwargs.get('deriv_order')
if len(dims) == 1:
dims = tuple([dims[0]]*deriv_orders)
variable_count = [sympy.Tuple(s, dims.count(s))
for s in filter_ordered(dims)]
return dims, deriv_orders, fd_orders, variable_count
# Check `dims`. It can be a single Dimension, an iterable of Dimensions, or even
# an iterable of 2-tuple (Dimension, deriv_order)
if len(dims) == 0:
raise ValueError("Expected Dimension w.r.t. which to differentiate")
elif len(dims) == 1:
if isinstance(dims[0], Iterable):
# Iterable of Dimensions
if len(dims[0]) != 2:
raise ValueError("Expected `(dim, deriv_order)`, got %s" % dims[0])
orders = kwargs.get('deriv_order', dims[0][1])
if dims[0][1] != orders:
raise ValueError("Two different values of `deriv_order`")
new_dims = tuple([dims[0][0]]*dims[0][1])
else:
# Single Dimension
orders = kwargs.get('deriv_order', 1)
if isinstance(orders, Iterable):
orders = orders[0]
new_dims = tuple([dims[0]]*orders)
else:
# Iterable of 2-tuple, e.g. ((x, 2), (y, 3))
new_dims = []
orders = []
d_ord = kwargs.get('deriv_order', tuple([1]*len(dims)))
for d, o in zip(dims, d_ord):
if isinstance(d, Iterable):
new_dims.extend([d[0] for _ in range(d[1])])
orders.append(d[1])
else:
new_dims.extend([d for _ in range(o)])
orders.append(o)
new_dims = as_tuple(new_dims)
orders = as_tuple(orders)
# Finite difference orders depending on input dimension (.dt or .dx)
fd_orders = kwargs.get('fd_order', tuple([expr.time_order if
getattr(d, 'is_Time', False) else
expr.space_order for d in dims]))
if len(dims) == 1 and isinstance(fd_orders, Iterable):
fd_orders = fd_orders[0]
# SymPy expects the list of variable w.r.t. which we differentiate to be a list
# of 2-tuple `(s, count)` where s is the entity to diff wrt and count is the order
# of the derivative
variable_count = [sympy.Tuple(s, new_dims.count(s))
for s in filter_ordered(new_dims)]
return new_dims, orders, fd_orders, variable_count
def __call__(self, x0=None, fd_order=None, side=None):
if self.ndims == 1:
_fd_order = fd_order or self._fd_order
_side = side or self._side
new_x0 = {self.dims[0]: x0} if x0 is not None else self.x0
return self._new_from_self(fd_order=_fd_order, side=_side, x0=new_x0)
if side is not None:
raise TypeError("Side only supported for first order single"
"Dimension derivative such as `.dxl` or .dx(side=left)")
# Cross derivative
_x0 = dict(self._x0)
_fd_order = dict(self.fd_order._getters)
try:
_fd_order.update(**(fd_order or {}))
_fd_order = tuple(_fd_order.values())
_fd_order = DimensionTuple(*_fd_order, getters=self.dims)
_x0.update(x0)
except AttributeError:
raise TypeError("Multi-dimensional Derivative, input expected as a dict")
return self._new_from_self(fd_order=_fd_order, x0=_x0)
def _new_from_self(self, **kwargs):
expr = kwargs.pop('expr', self.expr)
_kwargs = {'deriv_order': self.deriv_order, 'fd_order': self.fd_order,
'side': self.side, 'transpose': self.transpose, 'subs': self._ppsubs,
'x0': self.x0, 'preprocessed': True}
_kwargs.update(**kwargs)
return Derivative(expr, *self.dims, **_kwargs)
@property
def func(self):
return lambda *a, **kw: self._new_from_self(expr=a[0], **kw)
def subs(self, *args, **kwargs):
"""
Bypass sympy.Subs as Devito has its own lazy evaluation mechanism.
"""
try:
rules = dict(*args)
except TypeError:
rules = dict((args,))
kwargs.pop('simultaneous', None)
return self.xreplace(rules, **kwargs)
def _xreplace(self, subs):
"""
This is a helper method used internally by SymPy. We exploit it to postpone
substitutions until evaluation.
"""
subs = self._ppsubs + (subs,) # Postponed substitutions
return self._new_from_self(subs=subs), True
@cached_property
def _metadata(self):
state = list(self._state)
state.remove('expr')
ret = [getattr(self, i) for i in state]
ret.append(self.expr.staggered or (None,))
return tuple(ret)
@property
def dims(self):
return self._dims
@property
def ndims(self):
return len(self._dims)
@property
def x0(self):
return self._x0
@property
def fd_order(self):
return self._fd_order
@property
def deriv_order(self):
return self._deriv_order
@property
def side(self):
return self._side
@property
def transpose(self):
return self._transpose
@property
def is_TimeDependent(self):
return self.expr.is_TimeDependent
@property
def T(self):
"""Transpose of the Derivative.
FD derivatives can be represented as matrices and have adjoint/transpose.
This is really useful for more advanced FD definitions. For example
the conventional Laplacian is `.dxl.T * .dxl`
"""
if self._transpose == direct:
adjoint = transpose
else:
adjoint = direct
return self._new_from_self(transpose=adjoint)
def _eval_at(self, func):
"""
Evaluates the derivative at the location of `func`. It is necessary for staggered
setup where one could have Eq(u(x + h_x/2), v(x).dx)) in which case v(x).dx
has to be computed at x=x + h_x/2.
"""
# If an x0 already exists do not overwrite it
x0 = self.x0 or dict(func.indices_ref._getters)
if self.expr.is_Add:
# If `expr` has both staggered and non-staggered terms such as
# `(u(x + h_x/2) + v(x)).dx` then we exploit linearity of FD to split
# it into `u(x + h_x/2).dx` and `v(x).dx`, since they require
# different FD indices
mapper = as_mapper(self.expr._args_diff, lambda i: i.staggered)
args = [self.expr.func(*v) for v in mapper.values()]
args.extend([a for a in self.expr.args if a not in self.expr._args_diff])
args = [self._new_from_self(expr=a, x0=x0) for a in args]
return self.expr.func(*args)
elif self.expr.is_Mul:
# For Mul, We treat the basic case `u(x + h_x/2) * v(x) which is what appear
# in most equation with div(a * u) for example. The expression is re-centered
# at the highest priority index (see _gather_for_diff) to compute the
# derivative at x0.
return self._new_from_self(x0=x0, expr=self.expr._gather_for_diff)
else:
# For every other cases, that has more functions or more complexe arithmetic,
# there is not actual way to decide what to do so it’s as safe to use
# the expression as is.
return self._new_from_self(x0=x0)
@property
def evaluate(self):
# Evaluate finite-difference.
# NOTE: `evaluate` and `_eval_fd` split for potential future different
# types of discretizations
return self._eval_fd(self.expr)
@property
def _eval_deriv(self):
return self._eval_fd(self.expr)
def _eval_fd(self, expr):
"""
Evaluate the finite-difference approximation of the Derivative.
Evaluation is carried out via the following three steps:
- 1: Evaluate derivatives within the expression. For example given
`f.dx * g`, `f.dx` will be evaluated first.
- 2: Evaluate the finite difference for the (new) expression.
This in turn is a two-step procedure, for Functions that may
may need to be evaluated at a different point due to e.g. a
shited derivative.
- 3: Apply substitutions.
"""
# Step 1: Evaluate derivatives within expression
try:
expr = expr._eval_deriv
except AttributeError:
pass
# Step 2: Evaluate FD of the new expression
if self.side is not None and self.deriv_order == 1:
res = first_derivative(expr, self.dims[0], self.fd_order,
side=self.side, matvec=self.transpose,
x0=self.x0)
elif len(self.dims) > 1:
res = cross_derivative(expr, self.dims, self.fd_order, self.deriv_order,
matvec=self.transpose, x0=self.x0)
else:
res = generic_derivative(expr, *self.dims, self.fd_order, self.deriv_order,
matvec=self.transpose, x0=self.x0)
# Step 3: Apply substitutions
for e in self._ppsubs:
res = res.xreplace(e)
return res
class Weights(Array):
"""
The weights (or coefficients) of a finite-difference expansion.
"""
def __init_finalize__(self, *args, **kwargs):
dimensions = as_tuple(kwargs.get('dimensions'))
weights = kwargs.get('initvalue')
assert len(dimensions) == 1
d = dimensions[0]
assert isinstance(d, StencilDimension) and d.symbolic_size == len(weights)
assert isinstance(weights, (list, tuple, np.ndarray))
kwargs['scope'] = 'static'
super().__init_finalize__(*args, **kwargs)
@property
def dimension(self):
return self.dimensions[0]
weights = Array.initvalue
|
py | 1a30e5b4cc1ec8cf662272ff61dfa7f1fa47b9da | #!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by Olivier Huin on 2010-02-20.
Copyright (c) 2010 Flarebyte.com Limited. All rights reserved.
"""
import sys
import os
activitykinds={
('shortid', 'uuid', 'visiting', 'visiting', ['visiting']),
('shortid', 'uuid', 'booking', 'booking', ['booking']),
('shortid', 'uuid', 'learning', 'learning', ['learning']),
('shortid', 'uuid', 'eating', 'eating', ['eating']),
('shortid', 'uuid', 'drinking', 'drinking', ['drinking']),
('shortid', 'uuid', 'volunteering', 'volunteering', ['volunteering']),
('shortid', 'uuid', 'fundraising', 'fundraising', ['fundraising']),
}
|
py | 1a30e65961d5c26c1690ebb3f1d085d6c8241ea4 | #!/usr/bin/env python3
# Write a program that simulates random BAC coverage over a genome
# Command line arguments include
# Genome size (e.g. 1000)
# X coverage (e.g. 5)
# Use assert() to check parameter bounds
# Report min, max, and histogram of coverage
# Note that your output may vary due to random function
import sys
import random
assert(len(sys.argv) == 3)
bins = int(sys.argv[1])
x = float(sys.argv[2])
assert(bins > 0)
assert(x > 0)
bacs = int(bins * x)
genome = [0] * bins
#1st array
for i in range(bacs):
r = random.randint(0, bins -1)
genome[r] += 1
genome.sort()
min = genome[0]
max = genome[-1]
#2nd array
hist = [0] * (max + 1)
for v in genome:
hist[v] += 1
#output
print(f'Size: {bins}')
print(f'X: {x}')
print(f'BACs: {bacs}')
print(f'Min: {genome[0]}')
print(f'Max: {genome[-1]}')
print(f'Counts:')
for i in range(len(hist)):
print(i, hist[i])
"""
Size: 1000
X: 5.0
BACs: 5000
Min: 0
Max: 13
Counts:
0 5
1 39
2 88
3 144
4 175
5 150
6 151
7 116
8 59
9 40
10 20
11 5
12 6
13 2
"""
|
py | 1a30e709ba83c0d2e9d1a7afd432a1c711a977bb | import logging
from typing import Any, Dict, List, TypedDict
from utility import Utility
log: logging.Logger = logging.getLogger(__name__)
class CamoIDs(TypedDict):
"""Structure of loot/camo_ids.csv"""
id: int
ref: str
rarity: int
price: int
salvage: int
license: int
premium: int # bool
class CamoTable(TypedDict):
"""Structure of mp/camotable.csv"""
index: int
ref: str
botValid: int # bool
category: str
unlockType: str
unlockString: str
hideInUI: int # bool
name: str
image: str
availableOffline: int # bool
platformExclusiveType: str
class Camos:
"""Camo XAssets."""
def Compile(self: Any) -> None:
"""Compile the Camo XAssets."""
camos: List[Dict[str, Any]] = []
camos = Camos.IDs(self, camos)
camos = Camos.Table(self, camos)
Utility.WriteFile(self, f"{self.eXAssets}/camos.json", camos)
log.info(f"Compiled {len(camos):,} Camos")
def IDs(self: Any, camos: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the loot/camo_ids.csv XAsset."""
ids: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/loot/camo_ids.csv", CamoIDs
)
if ids is None:
return camos
for entry in ids:
camos.append(
{
"id": entry.get("id"),
"altId": entry.get("ref"),
"name": None,
"category": None,
"type": self.ModernWarfare.GetLootType(entry.get("id")),
"rarity": self.ModernWarfare.GetLootRarity(entry.get("rarity")),
"season": self.ModernWarfare.GetLootSeason(entry.get("license")),
"exclusive": None,
"available": self.ModernWarfare.GetTitleAvailability(
entry.get("id")
),
"hidden": None,
"image": None,
}
)
return camos
def Table(self: Any, camos: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the mp/camotable.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/camotable.csv", CamoTable
)
if table is None:
return camos
for camo in camos:
for entry in table:
if camo.get("altId") != entry.get("ref"):
continue
camo["name"] = self.localize.get(entry.get("name"))
camo["category"] = self.ModernWarfare.GetCamoCategory(
entry.get("category")
)
camo["exclusive"] = self.ModernWarfare.GetPlatformExclusivity(
entry.get("platformExclusiveType")
)
camo["hidden"] = bool(entry.get("hidden"))
camo["image"] = entry.get("image")
return camos
|
py | 1a30e7652169a437c848f9b1ca1c98927301347f | class Pagelet(object):
def __init__(self, parent_request, target_element_id, route_view, params, method: str = 'GET', depends_on: str= None):
self.parent_request = parent_request
self.target = target_element_id
self.route_view = route_view
self.params = params
self.method = method
self.depends_on = depends_on
def render(self):
return self.route_view(self.parent_request, **self.params)
|
py | 1a30e871a167b1e77ffaf8a14121b8009989b2a7 | import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow.keras.preprocessing.sequence import skipgrams
from keras.utils import np_utils
from keras.preprocessing.sequence import make_sampling_table
import scipy.io as sio
import os
def train(cleaned_tweets, tweets, hashtags, sentiment, source_idx, target_idx):
# Obtain skipgram embedding only
# Create feature representation: TFIDF-Variants and skipgram embedding with 1000 dimension and negative sampling
# Output will be saved to disk
# get_glove_embedding_matrix(cleaned_tweets)
# get_skipgram_gensim_embedding_matrix(cleaned_tweets)
# Sentence Skipgram is the base feature representation of the datatset
X = get_skipgram_sentence_embedding_matrix(cleaned_tweets)
# Create bytes file for the visualization
X.dtype=np.float32
X.tofile("data/skipgram_tensors.bytes")
create_domain_adaptation_dataset(X, tweets, source_idx, target_idx, sentiment)
def get_skipgram_sentence_embedding_matrix(text, dim=200, batch_size=256, window_size=5, epochs=1):
print("get_skipgram_sentence_embedding_matrix")
if os.path.isfile("data/sentqs_skipgram_sentence_embedding.npz"):
loaded_embedding = np.load("data/sentqs_skipgram_sentence_embedding.npz")
loaded_embedding = loaded_embedding["embedding"]
print('Loaded Skipgram embedding.')
return loaded_embedding
else:
text = [''.join(x) for x in text]
t = Tokenizer()
t.fit_on_texts(text)
corpus = t.texts_to_sequences(text)
# print(corpus)
V = len(t.word_index)
step_size = len(corpus) // batch_size
model = Sequential()
model.add(Dense(units=dim, input_dim=V, activation="softmax"))
model.add(Dense(units=V, input_dim=dim, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.summary()
model.fit(generate_data(corpus, window_size, V), epochs=epochs, steps_per_epoch=step_size)
# model.save("data/sentqs_full_skigram_arc.h5")
mlb = MultiLabelBinarizer()
enc = mlb.fit_transform(corpus)
emb = enc @ model.get_weights()[0]
np.savez_compressed("data/sentqs_skipgram_sentence_embedding", embedding=emb)
return emb
def create_domain_adaptation_dataset(X, tweets, source_idx, target_idx, sentiment):
Xs = X[source_idx]
Xt = X[target_idx]
Ys = sentiment[source_idx]
Yt = sentiment[target_idx]
data = [Xs, Ys, Xt, Yt]
np.savez('data/sentqs_dataset.npz', *data)
sio.savemat('data/sentqs_dataset.mat', {'Xs': Xs, 'Xt': Xt, 'Ys': Ys, 'Yt': Yt})
source_tweets = [tweets[i] for i in source_idx]
target_tweets = [tweets[i] for i in target_idx]
pd.DataFrame(source_tweets).to_csv("data/sentqs_source_tweets.csv")
pd.DataFrame(target_tweets).to_csv("data/sentqs_target_tweets.csv")
return Xs, Ys, Xt, Yt
def generate_data(corpus, window_size, V):
for words in corpus:
couples, labels = skipgrams(words, V, window_size, negative_samples=1, shuffle=True,
sampling_table=make_sampling_table(V, sampling_factor=1e-05))
if couples:
X, y = zip(*couples)
X = np_utils.to_categorical(X, V)
y = np_utils.to_categorical(y, V)
yield X, y
|
py | 1a30e8d20a58f8425ab9facc0fa55f1e3ed9bfef | from commndata.models import TimeLinedTable
from django.db import models
from django.utils.translation import gettext_lazy as _
from enum import Enum
class SalaryTable(TimeLinedTable):
class SALARY_TABLE(models.IntegerChoices):
GS1 = (1010, '行(一)')
GS2 = (1020, '行(二)')
SGS = (1110, '専門行政')
ZM = (1210, '税務')
KA1 = (1310, '公安(一)')
KA2 = (1320, '公安(二)')
KJ1 = (1410, '海(一)')
KJ2 = (1420, '海(二)')
KI1 = (1510, '教(一)')
KI2 = (1520, '教(二)')
KK = (1610, '研究')
IR1 = (1710, '医(一)')
IR2 = (1720, '医(二)')
IR3 = (1730, '医(三)')
FS = (1810, '福祉')
NK1 = (1910, '任研(一)') # 任期付き研究員
NK2 = (1920, '任研(二)')
TNK = (1930, '特任研') # 特定任期付き研究員
SS = (2010, '専門スタッフ')
ST = (2110, '指定職') # 指定職
class STAFF_TYPE(models.IntegerChoices):
TY = (1, '定員')
SNY = (2, '再任用')
salary_table = models.IntegerField(verbose_name=_('salary table'), blank=False,
choices=SALARY_TABLE.choices, default=SALARY_TABLE.GS1) # 俸給表
salary_level = models.IntegerField(verbose_name=_('salary level')) # 級
salary_no = models.IntegerField(verbose_name=_('salary no')) # 号俸
salary_monthly = models.IntegerField(verbose_name=_('salary monthly')) # 俸給月額
salary_adjustment = models.IntegerField(verbose_name=_('salary adjustment')) # 俸給の調整額
@property
def sny_salary_no():
"""
再任用職員の号俸
"""
return 999
class Meta:
permissions = [
('import_salary_table', 'Can import salary_table'),
('export_salary_table', 'Can export salary_table'),
]
verbose_name = _('salary table')
verbose_name_plural = _('salary table')
constraints = [
models.UniqueConstraint(name='salary_table_unique', fields = ['start_date', 'salary_table', 'salary_level', 'salary_adjustment']),
]
ordering = ['-start_date', 'salary_table', 'salary_level', 'salary_no']
def __str__(self):
return self.salary_table
class SalaryTableExcel(TimeLinedTable):
salary_table = models.IntegerField(verbose_name=_('salary table'), blank=False,
choices=SalaryTable.SALARY_TABLE.choices, default=SalaryTable.SALARY_TABLE.GS1) # 俸給表
sheet_name = models.CharField(max_length=10, verbose_name=_('シート名'))
rows = models.IntegerField(verbose_name=_('級'), default=1)
cols = models.IntegerField(verbose_name=_('号俸'), default=1)
sny_flg = models.BooleanField(verbose_name=_('再任用有無'), default=True)
start_cell = models.CharField(max_length=10, verbose_name=_('データ開始セル'))
class Meta:
db_table = 'salary_table_excel'
verbose_name = _('俸給表取込エクセル設定')
verbose_name_plural = _('俸給表取込エクセル設定')
constraints = [
models.UniqueConstraint(name='salary_table_excel_unique', fields = ['start_date', 'salary_table',]),
]
ordering = ['-start_date', 'salary_table', ]
|
py | 1a30e926fcd000f43102fb752e44a5336141a0cd | # from electrum_vtc.i18n import _
#
# fullname = _('Two Factor Authentication')
# description = ''.join([
# _("This plugin adds two-factor authentication to your wallet."), '<br/>',
# _("For more information, visit"),
# " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
# ])
# requires_wallet_type = ['2fa']
# registers_wallet_type = '2fa'
# available_for = ['qt', 'cmdline', 'kivy']
|
py | 1a30ea2e7047e2719bac3d3d2a071f5de7e58fc5 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
Default is k=3, a cubic spline.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If None (default), ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan`` . A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
if check_finite:
if not np.isfinite(x).all() or not np.isfinite(y).all():
raise ValueError("x and y array must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None,None,None,None,None,k,None,len(t),t,
c,None,None,None,None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: x can be unordered but the
evaluation is more efficient if x is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of `cos(x) = sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not np.isfinite(w).all()):
raise ValueError("Input must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is k=3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
Constructing lsq spline using the knots from another spline:
>>> x = np.arange(10)
>>> s = UnivariateSpline(x, x, s=0)
>>> s.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
>>> knt = s.get_knots()
>>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
>>> s1.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not np.isfinite(w).all() or not np.isfinite(t).all()):
raise ValueError("Input(s) must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
mth : str
Deprecated argument. Has no effect.
"""
x = np.asarray(x)
y = np.asarray(y)
if mth is not None:
warnings.warn("The `mth` argument is deprecated and will be removed",
FutureWarning)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx, ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=1)
if ier > 10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians, and must lie within (0, 2pi).
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
|
py | 1a30ea46239fd94a37b77ac11726303b6c87fcbb | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from .modeling_bert import BertModel, BertForMaskedLM, BertForSequenceClassification, BertForQuestionAnswering, BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_openai import OpenAIGPTModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_gpt2 import GPT2Model, GPT2LMHeadModel, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_ctrl import CTRLModel, CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_transfo_xl import TransfoXLModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_xlnet import XLNetModel, XLNetLMHeadModel, XLNetForSequenceClassification, XLNetForQuestionAnswering, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_xlm import XLMModel, XLMWithLMHeadModel, XLMForSequenceClassification, XLMForQuestionAnswering, XLM_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_roberta import RobertaModel, RobertaForMaskedLM, RobertaForSequenceClassification, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_distilbert import DistilBertModel, DistilBertForQuestionAnswering, DistilBertForMaskedLM, DistilBertForSequenceClassification, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_camembert import CamembertModel, CamembertForMaskedLM, CamembertForSequenceClassification, CamembertForMultipleChoice, CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_albert import AlbertModel, AlbertForMaskedLM, AlbertForSequenceClassification, AlbertForQuestionAnswering, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_t5 import T5Model, T5WithLMHeadModel, T5_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_utils import PreTrainedModel, SequenceSummary
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
ALL_PRETRAINED_MODEL_ARCHIVE_MAP = dict((key, value)
for pretrained_map in [
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
T5_PRETRAINED_MODEL_ARCHIVE_MAP,
]
for key, value, in pretrained_map.items())
class AutoModel(object):
r"""
:class:`~transformers.AutoModel` is a generic model class
that will be instantiated as one of the base model classes of the library
when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The base model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: T5Model (T5 model)
- contains `distilbert`: DistilBertModel (DistilBERT model)
- contains `albert`: AlbertModel (ALBERT model)
- contains `camembert`: CamembertModel (CamemBERT model)
- contains `roberta`: RobertaModel (RoBERTa model)
- contains `bert`: BertModel (Bert model)
- contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model)
- contains `gpt2`: GPT2Model (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLModel (Transformer-XL model)
- contains `xlnet`: XLNetModel (XLNet model)
- contains `xlm`: XLMModel (XLM model)
- contains `ctrl`: CTRLModel (Salesforce CTRL model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError("AutoModel is designed to be instantiated "
"using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the base model classes of the library
from a pre-trained model configuration.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: T5Model (T5 model)
- contains `distilbert`: DistilBertModel (DistilBERT model)
- contains `albert`: AlbertModel (ALBERT model)
- contains `camembert`: CamembertModel (CamemBERT model)
- contains `roberta`: RobertaModel (RoBERTa model)
- contains `bert`: BertModel (Bert model)
- contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model)
- contains `gpt2`: GPT2Model (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLModel (Transformer-XL model)
- contains `xlnet`: XLNetModel (XLNet model)
- contains `xlm`: XLMModel (XLM model)
- contains `ctrl`: CTRLModel (Salesforce CTRL model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if 't5' in pretrained_model_name_or_path:
return T5Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'distilbert' in pretrained_model_name_or_path:
return DistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'albert' in pretrained_model_name_or_path:
return AlbertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'camembert' in pretrained_model_name_or_path:
return CamembertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'roberta' in pretrained_model_name_or_path:
return RobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'openai-gpt' in pretrained_model_name_or_path:
return OpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'gpt2' in pretrained_model_name_or_path:
return GPT2Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'transfo-xl' in pretrained_model_name_or_path:
return TransfoXLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'ctrl' in pretrained_model_name_or_path:
return CTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
"'xlm', 'roberta, 'ctrl', 'distilbert', 'camembert', 'albert'".format(pretrained_model_name_or_path))
class AutoModelWithLMHead(object):
r"""
:class:`~transformers.AutoModelWithLMHead` is a generic model class
that will be instantiated as one of the language modeling model classes of the library
when created with the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: T5ModelWithLMHead (T5 model)
- contains `distilbert`: DistilBertForMaskedLM (DistilBERT model)
- contains `albert`: AlbertForMaskedLM (ALBERT model)
- contains `camembert`: CamembertForMaskedLM (CamemBERT model)
- contains `roberta`: RobertaForMaskedLM (RoBERTa model)
- contains `bert`: BertForMaskedLM (Bert model)
- contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model)
- contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model)
- contains `xlnet`: XLNetLMHeadModel (XLNet model)
- contains `xlm`: XLMWithLMHeadModel (XLM model)
- contains `ctrl`: CTRLLMHeadModel (Salesforce CTRL model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError("AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the language modeling model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: T5ModelWithLMHead (T5 model)
- contains `distilbert`: DistilBertForMaskedLM (DistilBERT model)
- contains `albert`: AlbertForMaskedLM (ALBERT model)
- contains `camembert`: CamembertForMaskedLM (CamemBERT model)
- contains `roberta`: RobertaForMaskedLM (RoBERTa model)
- contains `bert`: BertForMaskedLM (Bert model)
- contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model)
- contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model)
- contains `xlnet`: XLNetLMHeadModel (XLNet model)
- contains `xlm`: XLMWithLMHeadModel (XLM model)
- contains `ctrl`: CTRLLMHeadModel (Salesforce CTRL model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if 't5' in pretrained_model_name_or_path:
return T5WithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'distilbert' in pretrained_model_name_or_path:
return DistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'albert' in pretrained_model_name_or_path:
return AlbertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'camembert' in pretrained_model_name_or_path:
return CamembertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'roberta' in pretrained_model_name_or_path:
return RobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'openai-gpt' in pretrained_model_name_or_path:
return OpenAIGPTLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'gpt2' in pretrained_model_name_or_path:
return GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'transfo-xl' in pretrained_model_name_or_path:
return TransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMWithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'ctrl' in pretrained_model_name_or_path:
return CTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
"'xlm', 'roberta','ctrl', 'distilbert', 'camembert', 'albert'".format(pretrained_model_name_or_path))
class AutoModelForSequenceClassification(object):
r"""
:class:`~transformers.AutoModelForSequenceClassification` is a generic model class
that will be instantiated as one of the sequence classification model classes of the library
when created with the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model)
- contains `albert`: AlbertForSequenceClassification (ALBERT model)
- contains `camembert`: CamembertForSequenceClassification (CamemBERT model)
- contains `roberta`: RobertaForSequenceClassification (RoBERTa model)
- contains `bert`: BertForSequenceClassification (Bert model)
- contains `xlnet`: XLNetForSequenceClassification (XLNet model)
- contains `xlm`: XLMForSequenceClassification (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError("AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the sequence classification model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model)
- contains `albert`: AlbertForSequenceClassification (ALBERT model)
- contains `camembert`: CamembertForSequenceClassification (CamemBERT model)
- contains `roberta`: RobertaForSequenceClassification (RoBERTa model)
- contains `bert`: BertForSequenceClassification (Bert model)
- contains `xlnet`: XLNetForSequenceClassification (XLNet model)
- contains `xlm`: XLMForSequenceClassification (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if 'distilbert' in pretrained_model_name_or_path:
return DistilBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'albert' in pretrained_model_name_or_path:
return AlbertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'camembert' in pretrained_model_name_or_path:
return CamembertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'roberta' in pretrained_model_name_or_path:
return RobertaForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'xlnet', 'xlm', 'roberta', 'distilbert', 'camembert', 'albert'".format(pretrained_model_name_or_path))
class AutoModelForQuestionAnswering(object):
r"""
:class:`~transformers.AutoModelForQuestionAnswering` is a generic model class
that will be instantiated as one of the question answering model classes of the library
when created with the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model)
- contains `albert`: AlbertForQuestionAnswering (ALBERT model)
- contains `bert`: BertForQuestionAnswering (Bert model)
- contains `xlnet`: XLNetForQuestionAnswering (XLNet model)
- contains `xlm`: XLMForQuestionAnswering (XLM model)
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError("AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model)
- contains `albert`: AlbertForQuestionAnswering (ALBERT model)
- contains `bert`: BertForQuestionAnswering (Bert model)
- contains `xlnet`: XLNetForQuestionAnswering (XLNet model)
- contains `xlm`: XLMForQuestionAnswering (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
if 'distilbert' in pretrained_model_name_or_path:
return DistilBertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'albert' in pretrained_model_name_or_path:
return AlbertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'bert' in pretrained_model_name_or_path:
return BertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlnet' in pretrained_model_name_or_path:
return XLNetForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif 'xlm' in pretrained_model_name_or_path:
return XLMForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
"'bert', 'xlnet', 'xlm', 'distilbert', 'albert'".format(pretrained_model_name_or_path))
|
py | 1a30ea9efd97c86ae7beb4c6844d41e1b16c8874 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Dirk van der Laarse and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ProductLineProcessFlowWorkstation(Document):
pass
|
py | 1a30eabab07b631bdf05a2a6304a39b2b7499eb8 | import discord
from discord.ext import commands
from Core.Commands.Settings.Functions.change_prefix import ChangePrefix
from Core.Commands.Settings.Functions.change_status import ChangeStatus
class Settings(commands.Cog):
"""Class contains commands with settings
"""
def __init__(self, bot):
"""Constructor method
"""
self.bot = bot
self.bot.add_cog(ChangePrefix(bot))
self.bot.add_cog(ChangeStatus(bot))
|
py | 1a30ead097047a3eb1e86ebf29d6a99bffac034f | from datetime import datetime
from vortexasdk import FleetUtilisationOriginBreakdown
from docs.utils import to_markdown
from tests.testcases import TestCaseUsingRealAPI
class TestFleetUtilisationOriginBreakdownReal(TestCaseUsingRealAPI):
def test_search_returns_one_day(self):
date = datetime(2019, 11, 10)
result = FleetUtilisationOriginBreakdown().search(
filter_time_min=date,
filter_time_max=date
)
assert len(result) > 0
def test_to_df(self):
start = datetime(2019, 11, 1)
end = datetime(2019, 11, 10)
df = (
FleetUtilisationOriginBreakdown()
.search(
filter_time_min=start,
filter_time_max=end
)
.to_df()
)
assert list(df.columns) == ["key", "label", "value", "count"]
def test_with_params(self):
start = datetime(2020, 10, 18)
end = datetime(2021, 1, 18)
df = (
FleetUtilisationOriginBreakdown()
.search(
filter_time_min=start,
filter_time_max=end,
breakdown_size='5',
breakdown_geography='country'
)
.to_df()
)
assert len(df) == 5
def test_to_list(self):
start = datetime(2019, 11, 1)
end = datetime(2019, 11, 10)
time_series_list = (
FleetUtilisationOriginBreakdown()
.search(
filter_time_min=start,
filter_time_max=end
)
.to_list()
)
assert len(time_series_list) > 0
|
py | 1a30eb0238537dabf2d2fec11f2651c297c645a4 | #!/usr/bin/python -u
#
# Indexes the examples and build an XML description
#
import string
import glob
import sys
try:
import libxml2
except:
sys.exit(1)
sys.path.insert(0, "..")
from apibuild import CParser, escape
examples = []
extras = ['examples.xsl', 'index.py']
tests = []
sections = {}
symbols = {}
api_dict = None
api_doc = None
def load_api():
global api_dict
global api_doc
if api_dict != None:
return
api_dict = {}
try:
print "loading ../libxml2-api.xml"
api_doc = libxml2.parseFile("../libxml2-api.xml")
except:
print "failed to parse ../libxml2-api.xml"
sys.exit(1)
def find_symbol(name):
global api_dict
global api_doc
if api_doc == None:
load_api()
if name == None:
return
if api_dict.has_key(name):
return api_dict[name]
ctxt = api_doc.xpathNewContext()
res = ctxt.xpathEval("/api/symbols/*[@name = '%s']" % (name))
if type(res) == type([]) and len(res) >= 1:
if len(res) > 1:
print "Found %d references to %s in the API" % (len(res), name)
node = res[0]
typ = node.name
file = node.xpathEval("string(@file)")
info = node.xpathEval("string(info)")
else:
print "Reference %s not found in the API" % (name)
return None
ret = (typ, file, info)
api_dict[name] = ret
return ret
def parse_top_comment(filename, comment):
res = {}
lines = string.split(comment, "\n")
item = None
for line in lines:
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
while line != "" and line[0] == '*':
line = line[1:]
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
try:
(it, line) = string.split(line, ":", 1)
item = it
while line != "" and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
except:
if item != None:
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
return res
def parse(filename, output):
global symbols
global sections
parser = CParser(filename)
parser.collect_references()
idx = parser.parse()
info = parse_top_comment(filename, parser.top_comment)
output.write(" <example filename='%s'>\n" % filename)
try:
synopsis = info['synopsis']
output.write(" <synopsis>%s</synopsis>\n" % escape(synopsis));
except:
print "Example %s lacks a synopsis description" % (filename)
try:
purpose = info['purpose']
output.write(" <purpose>%s</purpose>\n" % escape(purpose));
except:
print "Example %s lacks a purpose description" % (filename)
try:
usage = info['usage']
output.write(" <usage>%s</usage>\n" % escape(usage));
except:
print "Example %s lacks an usage description" % (filename)
try:
test = info['test']
output.write(" <test>%s</test>\n" % escape(test));
progname=filename[0:-2]
command=string.replace(test, progname, './' + progname, 1)
tests.append(command)
except:
pass
try:
author = info['author']
output.write(" <author>%s</author>\n" % escape(author));
except:
print "Example %s lacks an author description" % (filename)
try:
copy = info['copy']
output.write(" <copy>%s</copy>\n" % escape(copy));
except:
print "Example %s lacks a copyright description" % (filename)
try:
section = info['section']
output.write(" <section>%s</section>\n" % escape(section));
if sections.has_key(section):
sections[section].append(filename)
else:
sections[section] = [filename]
except:
print "Example %s lacks a section description" % (filename)
for topic in info.keys():
if topic != "purpose" and topic != "usage" and \
topic != "author" and topic != "copy" and \
topic != "section" and topic != "synopsis" and topic != "test":
str = info[topic]
output.write(" <extra topic='%s'>%s</extra>\n" % (
escape(topic), escape(str)))
output.write(" <includes>\n")
for include in idx.includes.keys():
if include.find("libxml") != -1:
output.write(" <include>%s</include>\n" % (escape(include)))
output.write(" </includes>\n")
output.write(" <uses>\n")
for ref in idx.references.keys():
id = idx.references[ref]
name = id.get_name()
line = id.get_lineno()
if symbols.has_key(name):
sinfo = symbols[name]
refs = sinfo[0]
# gather at most 5 references per symbols
if refs > 5:
continue
sinfo.append(filename)
sinfo[0] = refs + 1
else:
symbols[name] = [1, filename]
info = find_symbol(name)
if info != None:
type = info[0]
file = info[1]
output.write(" <%s line='%d' file='%s' name='%s'/>\n" % (type,
line, file, name))
else:
type = id.get_type()
output.write(" <%s line='%d' name='%s'/>\n" % (type,
line, name))
output.write(" </uses>\n")
output.write(" </example>\n")
return idx
def dump_symbols(output):
global symbols
output.write(" <symbols>\n")
keys = symbols.keys()
keys.sort()
for symbol in keys:
output.write(" <symbol name='%s'>\n" % (symbol))
info = symbols[symbol]
i = 1
while i < len(info):
output.write(" <ref filename='%s'/>\n" % (info[i]))
i = i + 1
output.write(" </symbol>\n")
output.write(" </symbols>\n")
def dump_sections(output):
global sections
output.write(" <sections>\n")
keys = sections.keys()
keys.sort()
for section in keys:
output.write(" <section name='%s'>\n" % (section))
info = sections[section]
i = 0
while i < len(info):
output.write(" <example filename='%s'/>\n" % (info[i]))
i = i + 1
output.write(" </section>\n")
output.write(" </sections>\n")
def dump_Makefile():
for file in glob.glob('*.xml'):
extras.append(file)
for file in glob.glob('*.res'):
extras.append(file)
Makefile="""# Beware this is autogenerated by index.py
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include -I@srcdir@/include @THREAD_CFLAGS@ @Z_CFLAGS@
DEPS = $(top_builddir)/libxml2rr.la
LDADDS = @STATIC_BINARIES@ $(top_builddir)/libxml2rr.la @THREAD_LIBS@ @Z_LIBS@ $(ICONV_LIBS) -lm @WIN32_EXTRA_LIBADD@
rebuild: examples.xml index.html
examples.xml: index.py *.c
-@($(srcdir)/index.py)
index.html: examples.xml examples.xsl
-@(xsltproc examples.xsl examples.xml && echo "Rebuilt web page" && xmllint --valid --noout index.html)
install-data-local:
$(mkinstalldirs) $(DESTDIR)$(HTML_DIR)
-@INSTALL@ -m 0644 $(srcdir)/*.html $(srcdir)/*.c $(srcdir)/*.xml $(srcdir)/*.xsl $(srcdir)/*.res $(DESTDIR)$(HTML_DIR)
"""
EXTRA_DIST=""
for extra in extras:
EXTRA_DIST = EXTRA_DIST + extra + " "
Makefile = Makefile + "EXTRA_DIST=%s\n\n" % (EXTRA_DIST)
noinst_PROGRAMS=""
for example in examples:
noinst_PROGRAMS = noinst_PROGRAMS + example + " "
Makefile = Makefile + "noinst_PROGRAMS=%s\n\n" % (noinst_PROGRAMS)
for example in examples:
Makefile = Makefile + "%s_SOURCES=%s.c\n%s_LDFLAGS=\n%s_DEPENDENCIES= $(DEPS)\n%s_LDADD= @RDL_LIBS@ $(LDADDS)\n\n" % (example, example, example,
example, example)
Makefile = Makefile + "valgrind: \n\t$(MAKE) CHECKER='valgrind' tests\n\n"
Makefile = Makefile + "tests: $(noinst_PROGRAMS)\n"
Makefile = Makefile + "\t@(echo '## examples regression tests')\n"
Makefile = Makefile + "\t@(echo > .memdump)\n"
for test in tests:
Makefile = Makefile + "\t@($(CHECKER) %s)\n" % (test)
Makefile = Makefile + '\t@(grep "MORY ALLO" .memdump | grep -v "MEMORY ALLOCATED : 0" ; exit 0)\n'
Makefile = Makefile + "\n\n"
try:
old = open("Makefile.am", "r").read()
if old != Makefile:
n = open("Makefile.am", "w").write(Makefile)
print "Updated Makefile.am"
except:
print "Failed to read or save Makefile.am"
#
# Autogenerate the .cvsignore too ...
#
ignore = """.memdump
Makefile.in
Makefile
"""
for example in examples:
ignore = ignore + "%s\n" % (example)
try:
old = open(".cvsignore", "r").read()
if old != ignore:
n = open(".cvsignore", "w").write(ignore)
print "Updated .cvsignore"
except:
print "Failed to read or save .cvsignore"
if __name__ == "__main__":
load_api()
output = open("examples.xml", "w")
output.write("<examples>\n")
for file in glob.glob('*.c'):
parse(file, output)
examples.append(file[:-2])
dump_symbols(output)
dump_sections(output)
output.write("</examples>\n")
output.close()
dump_Makefile()
|
py | 1a30eb5f6102248048690b72aa1c08654e618e5e | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import functools
import os
import pprint
import re
import sys
import subprocess
perr = functools.partial(print, file=sys.stderr)
def dump_env_vars(prefix, pattern=None):
if pattern is not None:
match = lambda s: re.search(pattern, s)
else:
match = lambda s: True
for name in sorted(os.environ):
if name.startswith(prefix) and match(name):
perr("- {0}: {1!r}".format(name, os.environ[name]))
def run_cmd(cmdline):
proc = subprocess.Popen(cmdline,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Command {cmdline} failed with code {returncode}, "
"stderr was:\n{stderr}\n"
.format(cmdline=cmdline, returncode=proc.returncode,
stderr=err.decode()))
return out
def get_commit_description(commit):
"""
Return the textual description (title + body) of the given git commit.
"""
out = run_cmd(["git", "show", "--no-patch", "--pretty=format:%B",
commit])
return out.decode('utf-8', 'ignore')
def list_affected_files(commit_range):
"""
Return a list of files changed by the given git commit range.
"""
perr("Getting affected files from", repr(commit_range))
out = run_cmd(["git", "diff", "--name-only", commit_range])
return list(filter(None, (s.strip() for s in out.decode().splitlines())))
def get_travis_head_commit():
return os.environ['TRAVIS_COMMIT']
def get_travis_commit_range():
if os.environ['TRAVIS_EVENT_TYPE'] == 'pull_request':
# TRAVIS_COMMIT_RANGE is too pessimistic for PRs, as it may contain
# unrelated changes. Instead, use the same strategy as on AppVeyor
# below.
run_cmd(["git", "fetch", "-q", "origin",
"+refs/heads/{0}".format(os.environ['TRAVIS_BRANCH'])])
merge_base = run_cmd(["git", "merge-base",
"HEAD", "FETCH_HEAD"]).decode().strip()
return "{0}..HEAD".format(merge_base)
else:
cr = os.environ['TRAVIS_COMMIT_RANGE']
# See
# https://github.com/travis-ci/travis-ci/issues/4596#issuecomment-139811122
return cr.replace('...', '..')
def get_travis_commit_description():
# Prefer this to get_commit_description(get_travis_head_commit()),
# as rebasing or other repository events may make TRAVIS_COMMIT invalid
# at the time we inspect it
return os.environ['TRAVIS_COMMIT_MESSAGE']
def list_travis_affected_files():
"""
Return a list of files affected in the current Travis build.
"""
commit_range = get_travis_commit_range()
try:
return list_affected_files(commit_range)
except RuntimeError:
# TRAVIS_COMMIT_RANGE can contain invalid revisions when
# building a branch (not a PR) after rebasing:
# https://github.com/travis-ci/travis-ci/issues/2668
if os.environ['TRAVIS_EVENT_TYPE'] == 'pull_request':
raise
# If it's a rebase, it's probably enough to use the last commit only
commit_range = '{0}^..'.format(get_travis_head_commit())
return list_affected_files(commit_range)
def list_appveyor_affected_files():
"""
Return a list of files affected in the current AppVeyor build.
This only works for PR builds.
"""
# Re-fetch PR base branch (e.g. origin/master), pointing FETCH_HEAD to it
run_cmd(["git", "fetch", "-q", "origin",
"+refs/heads/{0}".format(os.environ['APPVEYOR_REPO_BRANCH'])])
# Compute base changeset between FETCH_HEAD (PR base) and HEAD (PR head)
merge_base = run_cmd(["git", "merge-base",
"HEAD", "FETCH_HEAD"]).decode().strip()
# Compute changes files between base changeset and HEAD
return list_affected_files("{0}..HEAD".format(merge_base))
LANGUAGE_TOPICS = ['c_glib', 'cpp', 'docs', 'go', 'java', 'js', 'python',
'r', 'ruby', 'rust', 'csharp']
ALL_TOPICS = LANGUAGE_TOPICS + ['integration', 'site', 'dev']
AFFECTED_DEPENDENCIES = {
'java': ['integration', 'python'],
'js': ['integration'],
'ci': ALL_TOPICS,
'cpp': ['python', 'c_glib', 'r', 'ruby', 'integration'],
'format': LANGUAGE_TOPICS,
'.travis.yml': ALL_TOPICS,
'c_glib': ['ruby']
}
COMPONENTS = {'cpp', 'java', 'c_glib', 'r', 'ruby', 'integration', 'js',
'rust', 'csharp', 'site', 'go', 'docs', 'python', 'dev'}
def get_affected_topics(affected_files):
"""
Return a dict of topics affected by the given files.
Each dict value is True if affected, False otherwise.
"""
affected = dict.fromkeys(ALL_TOPICS, False)
for path in affected_files:
parts = []
head = path
while head:
head, tail = os.path.split(head)
parts.append(tail)
parts.reverse()
assert parts
p = parts[0]
fn = parts[-1]
if fn.startswith('README'):
continue
if p in COMPONENTS:
affected[p] = True
_path_already_affected = {}
def _affect_dependencies(component):
if component in _path_already_affected:
# For circular dependencies, terminate
return
for topic in AFFECTED_DEPENDENCIES.get(component, ()):
affected[topic] = True
_affect_dependencies(topic)
_path_already_affected[topic] = True
_affect_dependencies(p)
return affected
def make_env_for_topics(affected):
return {'ARROW_CI_{0}_AFFECTED'.format(k.upper()): '1' if v else '0'
for k, v in affected.items()}
def get_unix_shell_eval(env):
"""
Return a shell-evalable string to setup some environment variables.
"""
return "; ".join(("export {0}='{1}'".format(k, v)
for k, v in env.items()))
def get_windows_shell_eval(env):
"""
Return a shell-evalable string to setup some environment variables.
"""
return "\n".join(('set "{0}={1}"'.format(k, v)
for k, v in env.items()))
def run_from_travis():
perr("Environment variables (excerpt):")
dump_env_vars('TRAVIS_', '(BRANCH|COMMIT|PULL)')
if (os.environ['TRAVIS_REPO_SLUG'] == 'apache/arrow' and
os.environ['TRAVIS_BRANCH'] == 'master' and
os.environ['TRAVIS_EVENT_TYPE'] != 'pull_request'):
# Never skip anything on master builds in the official repository
affected = dict.fromkeys(ALL_TOPICS, True)
else:
desc = get_travis_commit_description()
if '[skip travis]' in desc:
# Skip everything
affected = dict.fromkeys(ALL_TOPICS, False)
elif '[force ci]' in desc or '[force travis]' in desc:
# Test everything
affected = dict.fromkeys(ALL_TOPICS, True)
else:
# Test affected topics
affected_files = list_travis_affected_files()
perr("Affected files:", affected_files)
affected = get_affected_topics(affected_files)
assert set(affected) <= set(ALL_TOPICS), affected
perr("Affected topics:")
perr(pprint.pformat(affected))
return get_unix_shell_eval(make_env_for_topics(affected))
def run_from_appveyor():
perr("Environment variables (excerpt):")
dump_env_vars('APPVEYOR_', '(PULL|REPO)')
if not os.environ.get('APPVEYOR_PULL_REQUEST_HEAD_COMMIT'):
# Not a PR build, test everything
affected = dict.fromkeys(ALL_TOPICS, True)
else:
affected_files = list_appveyor_affected_files()
perr("Affected files:", affected_files)
affected = get_affected_topics(affected_files)
assert set(affected) <= set(ALL_TOPICS), affected
perr("Affected topics:")
perr(pprint.pformat(affected))
return get_windows_shell_eval(make_env_for_topics(affected))
def test_get_affected_topics():
affected_topics = get_affected_topics(['cpp/CMakeLists.txt'])
assert affected_topics == {
'c_glib': True,
'cpp': True,
'docs': False,
'go': False,
'java': False,
'js': False,
'python': True,
'r': True,
'ruby': True,
'rust': False,
'csharp': False,
'integration': True,
'site': False,
'dev': False
}
affected_topics = get_affected_topics(['format/Schema.fbs'])
assert affected_topics == {
'c_glib': True,
'cpp': True,
'docs': True,
'go': True,
'java': True,
'js': True,
'python': True,
'r': True,
'ruby': True,
'rust': True,
'csharp': True,
'integration': True,
'site': False,
'dev': False
}
if __name__ == "__main__":
# This script should have its output evaluated by a shell,
# e.g. "eval `python ci/detect-changes.py`"
if os.environ.get('TRAVIS'):
try:
print(run_from_travis())
except Exception:
# Make sure the enclosing eval will return an error
print("exit 1")
raise
elif os.environ.get('APPVEYOR'):
try:
print(run_from_appveyor())
except Exception:
print("exit 1")
raise
else:
sys.exit("Script must be run under Travis-CI or AppVeyor")
|
py | 1a30ed830b6ac8e84a7afbb611f1e85fd85aad12 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from biocontainers_flask.server.models.base_model_ import Model
from biocontainers_flask.server import util
class Checksum(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, checksum: str=None, type: str=None): # noqa: E501
"""Checksum - a model defined in Swagger
:param checksum: The checksum of this Checksum. # noqa: E501
:type checksum: str
:param type: The type of this Checksum. # noqa: E501
:type type: str
"""
self.swagger_types = {
'checksum': str,
'type': str
}
self.attribute_map = {
'checksum': 'checksum',
'type': 'type'
}
self._checksum = checksum
self._type = type
@classmethod
def from_dict(cls, dikt) -> 'Checksum':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Checksum of this Checksum. # noqa: E501
:rtype: Checksum
"""
return util.deserialize_model(dikt, cls)
@property
def checksum(self) -> str:
"""Gets the checksum of this Checksum.
The hex-string encoded checksum for the data. # noqa: E501
:return: The checksum of this Checksum.
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum: str):
"""Sets the checksum of this Checksum.
The hex-string encoded checksum for the data. # noqa: E501
:param checksum: The checksum of this Checksum.
:type checksum: str
"""
if checksum is None:
raise ValueError("Invalid value for `checksum`, must not be `None`") # noqa: E501
self._checksum = checksum
@property
def type(self) -> str:
"""Gets the type of this Checksum.
The digest method used to create the checksum. The value (e.g. `sha-256`) SHOULD be listed as `Hash Name String` in the https://github.com/ga4gh-discovery/ga4gh-checksum/blob/master/hash-alg.csv[GA4GH Checksum Hash Algorithm Registry]. Other values MAY be used, as long as implementors are aware of the issues discussed in https://tools.ietf.org/html/rfc6920#section-9.4[RFC6920]. GA4GH may provide more explicit guidance for use of non-IANA-registered algorithms in the future. # noqa: E501
:return: The type of this Checksum.
:rtype: str
"""
return self._type
@type.setter
def type(self, type: str):
"""Sets the type of this Checksum.
The digest method used to create the checksum. The value (e.g. `sha-256`) SHOULD be listed as `Hash Name String` in the https://github.com/ga4gh-discovery/ga4gh-checksum/blob/master/hash-alg.csv[GA4GH Checksum Hash Algorithm Registry]. Other values MAY be used, as long as implementors are aware of the issues discussed in https://tools.ietf.org/html/rfc6920#section-9.4[RFC6920]. GA4GH may provide more explicit guidance for use of non-IANA-registered algorithms in the future. # noqa: E501
:param type: The type of this Checksum.
:type type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
|
py | 1a30ee20ff75f166ba6f3f1669d8414f9ffd9869 | from .config import UTILS1_LOGLEVEL
import logging
from log_utils.utils import get_logger_with_file_handler
formatter = 'logger name : %(name)s ,%(levelname)s , func : %(funcName)s , %(message)s , module : %(module)s ,line : %(lineno)d , %(asctime)s'
logger = get_logger_with_file_handler(__name__,UTILS1_LOGLEVEL,formatter)
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)
def add(num1 : float,num2 : float)->float:
logger.warning(f'args : {num1} , {num2}')
return num1+num2 |
py | 1a30f0555ab7f6904a9b7f6b36c59c9a49211b1a | import asyncio
import contextlib
from types import TracebackType
from typing import Optional, Type, Dict, Any
import aiojobs
from aiojobs import Scheduler
from .client import ChaosIQClient
from .log import logger
from .types import Config
__all__ = ["Heartbeat"]
class Heartbeat:
def __init__(self, config: Config) -> None:
self.sched: Scheduler = None
self.config = config
self._running = False
self.aiojob = None
async def __aenter__(self) -> 'Heartbeat':
await self.setup()
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
await self.cleanup()
@property
def running(self) -> bool:
"""
Flag that is set when the heartbeat is active.
"""
return self._running
async def setup(self) -> None:
"""
Create the underlying scheduler to periodically send the heartbeat.
"""
logger.info("Creating heartbeat loop")
self.sched = await asyncio.wait_for(
aiojobs.create_scheduler(
exception_handler=self.aiojobs_exception), None)
period = self.config.heartbeat_interval
if not period:
logger.critical(f"Heartbeat is not properly configured; "
f"interval '{period}' is not valid")
return
logger.info("Spawning the heartbeat...")
self.aiojob = await self.sched.spawn(self.send_pulse())
async def cleanup(self) -> None:
"""
Gracefully terminate the scheduler.
"""
if self.aiojob:
logger.info("Stopping heartbeat pulse...")
await self.aiojob.close()
if not self.sched.closed:
logger.info("Closing heartbeat loop")
await asyncio.wait_for(self.sched.close(), None)
self._running = False
async def send_pulse(self) -> None:
"""
Sends its heartbeat periodically to the console
This must be interrupted instantly and not until wait is complete !!
We can NOT wait for end of iddle before leaving the loop
"""
self._running = True
wait = self.config.heartbeat_interval
logger.info(f"Sending heartbeat every {wait} seconds")
while self._running and not self.sched.closed:
await asyncio.sleep(wait)
with contextlib.suppress(Exception):
async with ChaosIQClient(self.config) as client:
await client.post(
"/agent/actions", json={"action": "heartbeat"})
@staticmethod
def aiojobs_exception(
scheduler: Scheduler,
context: Dict[str, Any]) -> None: # pragma: no cover
logger.error(context)
|
py | 1a30f0602c7f03d5176c176c9d096e25d9ea9968 | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from __future__ import annotations
from typing import Any, Mapping, Optional, Sequence, Union
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class ObfuscatorOptions(BaseModel):
class Config:
allow_mutation = False
collect_commands: Optional[bool]
collect_comments: Optional[bool]
collect_metadata: Optional[bool]
collect_tables: Optional[bool]
replace_digits: Optional[bool]
class QueryActivity(BaseModel):
class Config:
allow_mutation = False
collection_interval: Optional[float]
enabled: Optional[bool]
payload_row_limit: Optional[float]
class QueryMetrics(BaseModel):
class Config:
allow_mutation = False
collection_interval: Optional[float]
enabled: Optional[bool]
class QuerySamples(BaseModel):
class Config:
allow_mutation = False
collection_interval: Optional[float]
enabled: Optional[bool]
explain_function: Optional[str]
explained_queries_cache_maxsize: Optional[int]
explained_queries_per_hour_per_query: Optional[int]
samples_per_hour_per_query: Optional[int]
seen_samples_cache_maxsize: Optional[int]
class Relation(BaseModel):
class Config:
allow_mutation = False
relation_name: Optional[str]
relation_regex: Optional[str]
relation_schema: Optional[str]
relkind: Optional[Sequence[str]]
schemas: Optional[Sequence[str]]
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
application_name: Optional[str]
collect_activity_metrics: Optional[bool]
collect_bloat_metrics: Optional[bool]
collect_count_metrics: Optional[bool]
collect_database_size_metrics: Optional[bool]
collect_default_database: Optional[bool]
collect_function_metrics: Optional[bool]
collect_wal_metrics: Optional[bool]
custom_queries: Optional[Sequence[Mapping[str, Any]]]
data_directory: Optional[str]
dbm: Optional[bool]
dbname: Optional[str]
dbstrict: Optional[bool]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
host: str
ignore_databases: Optional[Sequence[str]]
max_relations: Optional[int]
min_collection_interval: Optional[float]
obfuscator_options: Optional[ObfuscatorOptions]
password: Optional[str]
pg_stat_statements_view: Optional[str]
port: Optional[int]
query_activity: Optional[QueryActivity]
query_metrics: Optional[QueryMetrics]
query_samples: Optional[QuerySamples]
query_timeout: Optional[int]
relations: Optional[Sequence[Union[str, Relation]]]
reported_hostname: Optional[str]
service: Optional[str]
ssl: Optional[str]
ssl_cert: Optional[str]
ssl_key: Optional[str]
ssl_password: Optional[str]
ssl_root_cert: Optional[str]
table_count_limit: Optional[int]
tag_replication_role: Optional[bool]
tags: Optional[Sequence[str]]
username: str
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
|
py | 1a30f0d9cda97bfc18e9641607afe1ecb74c6d95 | # Kendall Jackson MIT License
# GetScraped Private v1
#parse_listing from https://github.com/scrapehero/yellowpages-scraper
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
import unicodecsv as csv
#import argparse
final_list = []
state = 'AZ'
categories = ['Abrasive Dealers','Abundant Life Churches','AC Repairs','Accommodation','Acupuncture','Adhesive','Adoption Centres','Adventure Clubs','Advertising','Advertising Agencies','Advocates','Aerobics','Aeronautical Engineering Colleges','Air And Train Ambulance Services','Air Cargo Agents','Air Conditioners','Air Coolers','Air Hostess Training Institutes','Air Pollution Control Equipment Dealers','Alliance Churches','Alloy, Iron & Steel Industries','Alternative Fuels Stations','Alternative Medicines','Aluminium Extrusion Industry','Ambulance Services','Ammonia Gas Dealers','Amusement Parks','Anglican Churches','Animation Training Institutes','Apostolic Churches','Apparels & Accessories','Apple Product Repair','Aquarium','Architects','Area Rugs & Mats','Armenian Churches','Arms & Ammunition Dealer','Arms And Ammunitions','Art Gallery','Art Paintings','Artificial Grass','Artificial Turf','Arts & Craft Classes','Astrologers','ATM Centres','Audio Video Systems','Auditoriums','Auto Dealers','Auto Service Centres','Automobile Engine Oil Dealers','Automobiles','Aviation Academies','Ayurvedic Food','Ayurvedic Medicines','Ayurvedic Treatment','B 2 B','B Pharmacy Colleges','B.Ed. Colleges','Baby Foods','Baby Store','Bakeries','Bakery Equipments','Balloon Decorations','Bamboo Flooring','Bangles','Banks','Banquet Halls','Baptist Churches','Bar Coding Machine Dealer','Bars','Bathroom Linen','Battery Dealers','BDS Colleges','Bean Bags','Beautician Training Institutes','Beauty & Wellness','Beauty And Cosmetic Products','Beauty Parlours','Bed Linen','Bed Room Furniture','Beef Shops','Belts & Wallets','Bicycle Stores','Bike Rentals','Billing Machine Dealers','Binding','Binoculars & Telescope','Birth Certificate Offices','Blocks Material','Blood Donation Centres','Blow Moulding Machine Dealer','Body Massage Parlours','Boilers','Book Publishers','Books Stores','Bore Well Drilling','Boutiques','Bowling','Brick Materials','Bridal Makeup','Budget Hotels','Building and Construction','Building Demolition','Building Materials','Bulk SMS Modems','Bulk Sms Services','Burqa Retailers','Business Cards','Business Consultants','Business Hotels','CA & ICWA Training Institutes','Cable Manufacturers','Cable Tv Operators','Cabs Services','Cafes','Cake Shops','Calvary Chapel Churches','Camera Accessories','Camera Lens','Cameras','Candles','Caps & Hats','Car Ac Repairs','Car Accessories','Car Dealers','Car Rentals','Car Repairs & Services','Carbon Dioxide Gas Dealers','Cargo & Logistics','Cargo Agents','Carpenters','Carpet & Rugs','Carpet And Carpet Tiles','Casual Dining','Catering Services','Catholic Churches','CBSC Schools','Cement Materials','Central Government Offices','Centreing Materials','Chairs','Chandeliers','Charitable Trusts','Chartered Accountants','Chartered Bus','Chat & Snacks','Chicken Shops','Children Wear','Childrens Hospitals','Chimneys','Snacks','Chit Funds','Chocolate Shops','Churches','Cinema Theaters','Citric Acid Dealers','City Clerk Offices','City Government Offices','Civil Contractors','Cleaning Tools & Accessories','Clinics','Clocks','Cloud Software','Clubs','CNG Pump Stations','Coarse Aggregates','Commercial Kitchen Equipment Dealers','Communication','Competitive Exams','Computer Accessories & Peripherals','Computers','Computers, Tablets & Mobiles','Conference Hall','Construction & Renovation','Construction Companies','Consultants','Contact Lenses','Content Writers','Contractors','Convention Centres','Cooking Classes','Cooks On Hire','Cooktops','Cookware','Corporate Catering Services','Corporate Gifts','Cosmetic Surgery','Couriers','Courts','CPAP & BIPAP Systems','Crackers','Crane Services','Cremation Grounds','Cremation Services','Curtain Accessories','Cushion & Cushion Covers','Cutlery','Dance Academies','Dead Body Freezer Box On Hire','Decor & Lightings','Decor & Show Pieces','Decoration Materials','Degree Colleges','Dental Clinics','Designing & Wood Carving','Detective Agencies','Dhaba','Diagnostic Centres','Diesel Gas Stations','Dietician','Digital Cameras','Digital Printers','Digital Weighing Scale Dealers','Dining','Dining Room Furniture','Dining Sets','Disc Jockey Training Institutes','Dishwasher','Diwan Sets','Doctors','Dog Training','Doors, Windows & Partitions','Drama Theaters','Dress Materials','Drilling Equipments','Driver Service Agents','Dry Fruits','Dry Ice Dealer','DSLR Cameras','DTP Services','Dvd & Vcd','Eastern Orthodox Churches','Education','Education Colleges','Education Consultants','Education Councils & Board Offices','Education Schools','Egg Shops','Electrical Contractors','Electrical Sub-Stations','Electrical Suppliers','Electricians','Electronic Accessories','Electronic Display Boards Manufacturer','Electronic Weighing Scale Dealers','Electronics','Elevators','Email Marketing','Embroidery Works','Emergency Services','Engineering Colleges','ENT Hospitals','Entrance Exams Coaching Centres','Establishments','Ethnic Wear','Evangelical Churches','Event Decorators','Event Management','Event Organizers','Event Venues','Events Catering Services','Excavation','Eye Hospitals','Eyeglasses','Fabrication & Welding Works','False Ceiling','Family Clubs','Fans','Farm Houses','Fashion Designers','Fashion Designing Training Institutes','Fast Food Centre','Fertility & Infertility Clinics','Fertilizer & Soil','Film And Television Institute Of India','Film Studios','Financial Planners','Financial Services','Fine Dining','Fire Alarms','Fire And Safety Course Training','Fire Extinguishers','Fire Protection Systems','Fire Safety Equipments','Fire Stations','Fish & Sea Food Shops','Fitness Centres','Flex Printing Services','Flooring','Flooring Installations','Flooring Tools & Materials','Florists','Flower Decorations','Food & Beverage Outlets','Food & Beverages','Food Courts','Food Machinery Manufacturer','Food Processing Equipment Manufacturer','Food Stores','Footwear','Foreign Exchange','Foursquare Churches','Frames','Fruit Juice Processing Machine Manufacture','Fruits','Full Gospel Churches','Function Halls','Funeral Band','Funeral Materials','Furnishings','Furniture','Furniture on Hire','Furniture Storage','Gaming Centres','Gardening Tools','Garments','Gas Dealers','Gas Stations','Gemological Institute Of India','General Hospitals','General order suppliers','General Pharmacies','GI Pipe Dealer','Gifts And Novelties','Glass Fitting Hardware','Glasswares','Go Karting','Goldsmiths','Gospel Churches','Government Hospitals','Government Offices','Graphic Designers','GRE & TOEFL Coaching Centres','Greek Orthodox Churches','Groceries','Groundwater Surveyors','Guest Houses','Gurudwaras','Water Heater Repair','Gymnasium','Gymnasium Equipments','Hair Fall Treatments','Hair Stylists','Hair Transplantation','Hair Treatments','Hall Decorations','Handicraft Items','Handlooms','Hardware And Network Training Institutes','Hardware And Networking','Hardware Stores','Hardware Tools','Hardwood Flooring','HD Cameras','Health','Health Clubs','Hearse Services','Heavy Vehicle Dealers','Helmet Dealers','Hispanic Churches','Home Appliances','Home Builders','Home Delivery Restaurants','Home Furniture','Home Needs','Home Theater Systems','Homeopathy Clinics','Homeopathy Medicines','Hosiery Store','Hospitals','Hotels','House Painters','Housekeeping Services','Hr Consultancies','Hydraulic & Pulley Equipment Dealers','Hydrochloric Acid Dealers','Hypermarkets','IB Schools','Ice Cream & Dessert Parlors','ICSE Schools','IGCSE Schools','Immigration Consultants','Income Tax Offices','Industrial Bearing Dealers','Industrial Belt Dealers','Industrial Burner Dealers','Industrial Chemical Dealers','Industrial Electronic Components Dealers','Industrial Equipments','Industrial Fan Dealers','Industrial Fire Extinguisher Dealers','industrial machine dealers','Industrial Safety Equipment Dealers','Industrial Spring Dealers','Industrial Trolleys Manufacturer','Innerwear And Loungewear','Institute Of Hotel Management','Insurance Agents','Interior Design Courses','Interior Designers','Internet Service Providers','Inverters','Investment Advisors','Irrigation Equipment Dealers','ITI Training','Jain Temples','Jeans','Jewellery','Jewellery Box Manufacturers','Journalism Training Institutes','Juice Centre','Junior Colleges','Kalyana Mandapam','Kennels','Kitchen & Dining','Kitchen Storage Containers','Lab Equipment And Chemical Suppliers','Labor Contractors','Laboratories','Ladies Bags & Purses','Ladies Dresses','Laminate Flooring','Language Training Institutes','Laptop Repair','Laptops','Lathe Machine Dealers','Laundry Services','Law Colleges','Lawn & Garden','Leather Goods Manufacturer','Legal & Financial Services','Legal Services','Libraries','Lifestyle Accessories','Lightings','Living Room Furniture','Loan Agencies','Loan Agents','Local Government Offices','Locks','Lodges','Logistic Companies','Logistics Services','Lounges','Luxury Hotels','Maggam Job Works','Makeup Artists','Manufacturer of Power Generators','Marriage Bureaus','Marriage Halls','Mass Communication & Journalism Colleges','Matching Centres','Maternity Hospitals','Mattresses','Meat & Poultry Shops','Media Advertising','Medical Coding Training Institutes','Medical Colleges','Medical Equipments','Medical Stockings','Meditation Centres','Mehandi Artists','Mennonite Churches','Mens Hostels','Mesh Dealers','Metal Industries','Methodist Churches','Metro Rail Stations','Microbreweries','Microwave Repairs','Military Recruiting Offices','Milk & Milk Products','Mineral Water Suppliers','Mobile Phones','Mobile Repairs','Mobile Repairs','Modular Furniture','Modular Kitchen Dealers','Money Transfer Agencies','Montessori Training Institutes','Moravian Churches','Morgues Services','Mormon Churches','Mosques','Motor Driving Schools','Mould Dies Manufacturer','Moving Media Ads','Mp3 Players','MS Pipe Dealer','Multispecialty Hospitals','Museums','Music Academies','Musical Instruments','Mutton Shops','Natural Flooring','Nature Cure Centers','Naturopathy','Network Securities','Networking Devices','New Age Churches','Newspaper Ads','NGO Clubs','NGOs & Social Service Organisations','Night Clubs','Night Life','Night Wears','Nitric Acid Dealers','Notary Services','Number Plate Manufacturers','Nursing Colleges','Nutritional Supplement Dealers','Office Furniture','Offices','Offset Printers','Old Age Homes','Old Cut Notes Exchange Services','Online Classes','Optics & Eyewear','Organ Donation Centres','Orphanages & Shelters','Orthodox Churches','Other Vehicles','Outdoor Advertising','Outdoor Catering Services','Outdoor Furniture','Overseas Education Consultants','Oxygen Concentrators','Oxygen Gas Dealers','P R P Hair Treatments','Packers And Movers','Packing Machine Manufacturers','Painters','Painting Suppliers','Pan Shops','Pants','Paper Rolls Manufacturers','Paper Stores','Parks','Part Time Jobs Consultancies','Party Halls','Passport Offices','Pawn Brokers','Pcs & Desktops','Pedicure & Manicure','Pen Stores','Pentecostal Churches','Perforated Sheet Manufacturers','Perfumes','Personal Fitness Trainers','Personality Development Training Institutes','Pest Control Services','Pet Shops','Pets','PG Colleges','Pharmaceutical Companies','Pharmaceutical Packaging Material Dealers','Pharmacies','Pharmacy Colleges','Photo Frames','Photo Studios','Photocopiers','Photographers','Photography Training Institutes','physiotherapist','Physiotherapy Clinics','Piercing','Pilot Training Institutes','Pipe Dealers','Pizza Restaurants','Placement Consultants','Plants','Plastic & Disposable Items','Plastic Injection Moulding Machine Dealer','Plastic Products Manufacturers','Play Schools','Play Stations','Playground Equipments','Playgrounds','Plumbers','Plumbing','Plywood & Laminates','Police Stations','Political Party Offices','Pollution Inspection Stations','Polymers & Asbestos Products Dealer','Polytechnic Colleges','Pork Shops','Post Offices','Power Generator Suppliers','Power Stations','Power Tools Dealers','Presbyterian Churches','Printed Circuit Board Dealers','Printers','Printing & Stationaries','Printing Machines','Printing Materials','Printing Press','Professional Services','Professionals','Project Management Training Institutes','Projectors','Promotional Products','Property Consultants','Property Dealers','Protestant Churches','Public Safety Offices','Pubs','Pumps & Controllers','Pundits','PVC Pipe Dealer','Quaker Churches','Quick Bites','Radio Jockey Training Institutes','Radio Stations','Radium Works','Railings','Railway Cargo Agents','Railway Stations','Ready Made Garments','Ready Mix Concrete','Real Estate','Real Estate Agents','Real Estate Developers','Real Estate Loans & Mortgages','Recording Studios','Reformed Churches','Refrigerator Repair','Refrigerators','Registry Offices','Rehabilitation Centres','Religion','Research Institutes','Residential Designers','Resins & Chemicals Manufacturer','Resorts','Restaurant Test','Restaurants','RO Water Purifier','Road Cargo Agents','Robotics Engineering','Robotics Training Institutes','Roofing Sheets','RTA Offices','Rubber Oil Seals Dealer','Rubber Product Dealer','Rubber Product Manufacturers','Rubber Stamps','Rudraksha','Russian Orthodox Churches','Sand Materials','Sandals & Floaters','Sanitaryware & Bathroom Accessories','Sarees & Blouses','Scalp Treatments','School District Offices','School For Mentally Challenged','Scrap Dealers','Screen Printers','Sea Cargo Agents','Seat Cover & Rexine Works','Security Guard Services','Security Services','Security Systems','Seeds','SelfDefence Training Services','Servers','Service Centres','Serviced Apartments','Seventh-Day Adventist Churches','Sewing Machine Dealers','Share Brokers','Shipping Companies','Shirts','Shoes','Shopping Malls','Shorts & Cargo','Sign Boards','Signage','Singing Classes','Skin Care Clinics','Snooker Parlours','Socks','Sofa Sets','Software & IT Services','Software Certifications','Software Dealers','Software Development','Software Training Institutes','Solar Products Manufacturers','Sound And Lighting On Hire','Sound Systems','Spa & Saloon','Spare Part Dealers','Spare Parts & Accessories','Speakers','Spiritual And Pooja Accessories','Spiritual Centres','Spoken English Institutes','Sports','Sports Academies','Sports Clubs','Sports Equipments','Sports Stores','Sports Wear','Sports, Entertainment & Hobbies','Stadiums','Stage Decorations','Stainless Steel Pipe Dealer','Stamp Papers','Standees & Demo Tents','State Board Schools','State Government Offices','Stationaries','Stationary Stores','Stations','Steel Wires & Ropes Manufacturers','Stem Cell Banking','Stock Brokers','Studios','Study Hall Centre','Sub-Registrar Offices','Suitings & Shirtings','Suits And Blazers','Sulphuric Acid Dealers','Sunglasses','Super Specialty Hospitals','Supermarkets','Surgical Instruments','Sweet Shops','Swimming Pools','Table Accessories','Tailoring Materials','Tailors','Tailors & Designers','Take Away Restaurants','Tattoo Makers','Telecommunications','Television Installation','Televisions','Temples','Tent Houses','Textiles','Theaters','Theme Parks','Thermocol Dealers','Ticketing','Tiles','Timber Depot','Tmt Iron & Steel Bars','Tours And Travels','Toy Shops','Trading Consultants','Training Institutes','Transportation','Travel Agencies','Travel Goods','Travel Services','Trophy & Momento Dealers','Trousers','T-Shirts','Tuitions','Tv Accessories','TV Studio','Two Wheelers Dealers','Two Wheelers Service Centres','Typing Institutes','Tyre Dealers','Unani Treatment','Underground Stations','Uniforms','Unitarian Universalist Churches','United Churches Of Christ','Unity Churches','Universities','UPS','UPSC & IAS Coaching Centres','Used Auto Dealers','Used Bike Dealers','Used Cars Dealers','Utensils','UV Water Purifier','Valve Dealer','Vegetables','Vehicle Glass Dealers','Vehicle On Hire','Vending Machine Manufacturer','Veterinary Hospitals','Veterinary Medicines','Video Editing Studios','Video Gaming Centres','Videographers','Vineyard Churches','Vinyl Flooring','Vocational Colleges','Wall Papers','Washing Machine Repair','Washing Machines','Water Cooler Suppliers','Water Parks','Water Purifier Dealers','Water Purifier Repairs','Water Softeners','Water Suppliers','Water Tank Suppliers','Waterproofing','Waterproofing Materials','Weather Stations','Web Designing Companies','Web Hosting Companies','Wedding & Events','Wedding Bands','Wedding Cards','Wedding Catering Services','Wedding Decorations','Wedding Planners','Weight Loss & Gain Centres','Welding Equipment','Welfare Offices','Wesleyan Churches','Wet Grinder Dealers','Wine Shops','Winter Wear','Wire Mesh Dealers','Womens Hostels','Wooden Flooring','Wrist Watch Repairs and Services','Wrist Watches','Xerox Shops','Yoga Centres','Zoo Parks','Zumba Fitness']
cities = ['Ajo','Ak-Chin Village','Amado','Apache Junction','Arizona City','Arizona Village','Ash Fork','Avondale','Avra Valley','Bagdad','Benson','Big Park','Bisbee','Bitter Springs','Black Canyon City','Blackwater','Bluewater','Bouse','Buckeye','Bullhead City','Burnside','Cameron','Camp Verde','Canyon Day','Carefree','Casa Grande','Casas Adobes','Catalina','Catalina Foothills','Cave Creek','Central Heights-Midland City','Chandler','Chilchinbito','Chinle','Chino Valley','Chuichu','Cibecue','Cibola','Clarkdale','Claypool','Clifton','Colorado City','Congress','Coolidge','Cordes Lakes','Cornville','Corona de Tucson','Cottonwood','Cottonwood-Verde Village','Dennehotso','Desert Hills','Dewey-Humboldt','Dilkon','Dolan Springs','Douglas','Drexel-Alvernon','Drexel Heights','Dudleyville','Duncan','Eagar','East Fork','East Sahuarita','Ehrenberg','Elgin','El Mirage','Eloy','First Mesa','Flagstaff','Florence','Flowing Wells','Fort Defiance','Fortuna Foothills','Fountain Hills','Fredonia','Gadsden','Ganado','Gila Bend','Gilbert','Gisela','Glendale','Globe','Gold Camp','Golden Valley','Goodyear','Grand Canyon Village','Greasewood','Green Valley','Guadalupe','Hayden','Heber-Overgaard','Holbrook','Hotevilla-Bacavi','Houck','Huachuca City','Jeddito','Jerome','Kachina Village','Kaibab','Kaibito','Kayenta','Keams Canyon','Kearny','Kingman','Kykotsmovi Village','Lake Havasu City','Lake Montezuma','Lechee','Leupp','Litchfield Park','Littletown','Lukachukai','McNary','Mammoth','Many Farms','Marana','Maricopa','Mayer','Mesa','Mesquite Creek','Miami','Moenkopi','Mohave Valley','Mojave Ranch Estates','Morenci','Mountainaire','Munds Park','Naco','Nazlini','New Kingman-Butler','New River','Nogales','Oljato-Monument Valley','Oracle','Oro Valley','Page','Paradise Valley','Parker','Parker Strip','Parks','Patagonia','Paulden','Payson','Peach Springs','Peeples Valley','Peoria','Peridot','Phoenix','Picture Rocks','Pima','Pine','Pinetop-Lakeside','Pinon','Pirtleville','Pisinemo','Poston','Prescott','Prescott Valley','Quartzsite','Queen Creek','Queen Valley','Red Mesa','Rio Rico Northeast','Rio Rico Northwest','Rio Rico Southeast','Rio Rico Southwest','Rio Verde','Rock Point','Rough Rock','Round Rock','Sacaton','Safford','Sahuarita','St. David','St. Johns','St. Michaels','Salome','San Carlos','San Luis','San Manuel','Santan','Santa Rosa','Sawmill','Scottsdale','Second Mesa','Sedona','Seligman','Sells','Shongopovi','Shonto','Show Low','Sierra Vista','Sierra Vista Southeast','Snowflake','Somerton','Sonoita','South Tucson','Springerville','Spring Valley','Stanfield','Steamboat','Strawberry','Summit','Sun City','Sun City West','Sun Lakes','Sun Valley','Supai','Superior','Surprise','Swift Trail Junction','Tacna','Tanque Verde','Taylor','Teec Nos Pos','Tempe','Thatcher','Three Points','Tolleson','Tombstone','Tonalea','Tonto Basin','Top-of-the-World','Tortolita','Tsaile','Tubac','Tuba City','Tucson','Tucson Estates','Tumacacori-Carmen','Tusayan','Vail','Valencia West','Wellton','Wenden','Whetstone','Whiteriver','Wickenburg','Wilhoit','Willcox','Williams','Williamson','Willow Valley','Window Rock','Winkelman','Winslow','Winslow West','Yarnell','Young','Youngtown','Yuma']
def parse_listing(keyword,place):
"""
Function to process yellowpage listing page
: param keyword: search query
: param place : place name
"""
url = "https://www.yellowpages.com/search?search_terms={0}&geo_location_terms={1}".format(keyword,place)
print("retrieving ",url)
headers = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Host':'www.yellowpages.com',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'
}
# Adding retries
for retry in range(10):
try:
response = requests.get(url,verify=True, headers = headers )
print("parsing page")
if response.status_code==200:
parser = html.fromstring(response.text)
#making links absolute
base_url = "https://www.yellowpages.com"
parser.make_links_absolute(base_url)
XPATH_LISTINGS = "//div[@class='search-results organic']//div[@class='v-card']"
listings = parser.xpath(XPATH_LISTINGS)
scraped_results = []
for results in listings:
XPATH_BUSINESS_NAME = ".//a[@class='business-name']//text()"
XPATH_WEBSITE = ".//div[@class='info']//div[contains(@class,'info-section')]//div[@class='links']//a[contains(@class,'website')]/@href"
raw_business_name = results.xpath(XPATH_BUSINESS_NAME)
raw_website = results.xpath(XPATH_WEBSITE)
business_name = ''.join(raw_business_name).strip() if raw_business_name else None
website = ''.join(raw_website).strip() if raw_website else None
business_details = {
'business_name':business_name,
'website':website,
'industry':keyword,
'city': city,
'state': 'AZ'
}
if(website != '' and website != None):
scraped_results.append(business_details)
#print(scraped_results)
return scraped_results
elif response.status_code==404:
print("Could not find a location matching",place)
#no need to retry for non existing page
return []
else:
print("Failed to process page")
return []
except:
print("Failed to process page")
return []
def runtime(word, place):
keyword = word
place = place
scraped_data = parse_listing(keyword,place)
if(scraped_data):
return scraped_data
else:
return []
if __name__=="__main__":
for city in cities:
final_list = []
for elem in categories:
final_list.append(runtime(elem, city + ',' + state))
print('STARTING FILE WRITE')
print("Writing scraped data to %s-%s-yellowpages-scraped-links.csv"%(city, state))
with open('%s-%s-yellowpages-scraped-links.csv'%(city,state),'ab') as csvfile:
fieldnames = ['business_name', 'website', 'industry', 'city', 'state']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames, quoting=csv.QUOTE_ALL)
writer.writeheader()
for data in final_list:
for keys in data:
writer.writerow(keys)
print('DONE. Kendall is Awesome.')
|
py | 1a30f161892ee8ec35435409b368ed30822db5e1 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AvailableRegionSummary(object):
"""
The summary of region availability for a subscription.
"""
def __init__(self, **kwargs):
"""
Initializes a new AvailableRegionSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param region_name:
The value to assign to the region_name property of this AvailableRegionSummary.
:type region_name: str
"""
self.swagger_types = {
'region_name': 'str'
}
self.attribute_map = {
'region_name': 'regionName'
}
self._region_name = None
@property
def region_name(self):
"""
**[Required]** Gets the region_name of this AvailableRegionSummary.
Region availability for the subscription.
:return: The region_name of this AvailableRegionSummary.
:rtype: str
"""
return self._region_name
@region_name.setter
def region_name(self, region_name):
"""
Sets the region_name of this AvailableRegionSummary.
Region availability for the subscription.
:param region_name: The region_name of this AvailableRegionSummary.
:type: str
"""
self._region_name = region_name
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | 1a30f2427bb8864840fc93ce3c057964da01f0a0 | # Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package containing tests for Melange GHOP module.
"""
|
py | 1a30f2586e14f93b93da054d4543528d1063ac2c | # Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..com.gltf2_io import gltf_from_dict
from ..com.gltf2_io_debug import Log
import logging
import json
import struct
import base64
from os.path import dirname, join, isfile, basename
from urllib.parse import unquote
class glTFImporter():
"""glTF Importer class."""
def __init__(self, filename, import_settings):
"""initialization."""
self.filename = filename
self.import_settings = import_settings
self.glb_buffer = None
self.buffers = {}
self.accessor_cache = {}
if 'loglevel' not in self.import_settings.keys():
self.import_settings['loglevel'] = logging.ERROR
log = Log(import_settings['loglevel'])
self.log = log.logger
self.log_handler = log.hdlr
self.SIMPLE = 1
self.TEXTURE = 2
self.TEXTURE_FACTOR = 3
# TODO: move to a com place?
self.extensions_managed = [
'KHR_materials_pbrSpecularGlossiness',
'KHR_lights_punctual',
'KHR_materials_unlit',
'KHR_texture_transform'
]
# TODO : merge with io_constants
self.fmt_char_dict = {}
self.fmt_char_dict[5120] = 'b' # Byte
self.fmt_char_dict[5121] = 'B' # Unsigned Byte
self.fmt_char_dict[5122] = 'h' # Short
self.fmt_char_dict[5123] = 'H' # Unsigned Short
self.fmt_char_dict[5125] = 'I' # Unsigned Int
self.fmt_char_dict[5126] = 'f' # Float
self.component_nb_dict = {}
self.component_nb_dict['SCALAR'] = 1
self.component_nb_dict['VEC2'] = 2
self.component_nb_dict['VEC3'] = 3
self.component_nb_dict['VEC4'] = 4
self.component_nb_dict['MAT2'] = 4
self.component_nb_dict['MAT3'] = 9
self.component_nb_dict['MAT4'] = 16
@staticmethod
def bad_json_value(val):
"""Bad Json value."""
raise ValueError('Json contains some unauthorized values')
def checks(self):
"""Some checks."""
if self.data.asset.version != "2.0":
return False, "glTF version must be 2"
if self.data.extensions_required is not None:
for extension in self.data.extensions_required:
if extension not in self.data.extensions_used:
return False, "Extension required must be in Extension Used too"
if extension not in self.extensions_managed:
return False, "Extension " + extension + " is not available on this addon version"
if self.data.extensions_used is not None:
for extension in self.data.extensions_used:
if extension not in self.extensions_managed:
# Non blocking error #TODO log
pass
return True, None
def load_glb(self):
"""Load binary glb."""
header = struct.unpack_from('<4sII', self.content)
self.format = header[0]
self.version = header[1]
self.file_size = header[2]
if self.format != b'glTF':
return False, "This file is not a glTF/glb file"
if self.version != 2:
return False, "GLB version %d unsupported" % self.version
if self.file_size != len(self.content):
return False, "Bad GLB: file size doesn't match"
offset = 12 # header size = 12
# JSON chunk is first
type_, len_, json_bytes, offset = self.load_chunk(offset)
if type_ != b"JSON":
return False, "Bad GLB: first chunk not JSON"
if len_ != len(json_bytes):
return False, "Bad GLB: length of json chunk doesn't match"
try:
json_str = str(json_bytes, encoding='utf-8')
json_ = json.loads(json_str, parse_constant=glTFImporter.bad_json_value)
self.data = gltf_from_dict(json_)
except ValueError as e:
return False, e.args[0]
# BIN chunk is second (if it exists)
if offset < len(self.content):
type_, len_, data, offset = self.load_chunk(offset)
if type_ == b"BIN\0":
if len_ != len(data):
return False, "Bad GLB: length of BIN chunk doesn't match"
self.glb_buffer = data
return True, None
def load_chunk(self, offset):
"""Load chunk."""
chunk_header = struct.unpack_from('<I4s', self.content, offset)
data_length = chunk_header[0]
data_type = chunk_header[1]
data = self.content[offset + 8: offset + 8 + data_length]
return data_type, data_length, data, offset + 8 + data_length
def read(self):
"""Read file."""
# Check this is a file
if not isfile(self.filename):
return False, "Please select a file"
# Check if file is gltf or glb
with open(self.filename, 'rb') as f:
self.content = memoryview(f.read())
self.is_glb_format = self.content[:4] == b'glTF'
# glTF file
if not self.is_glb_format:
content = str(self.content, encoding='utf-8')
self.content = None
try:
self.data = gltf_from_dict(json.loads(content, parse_constant=glTFImporter.bad_json_value))
return True, None
except ValueError as e:
return False, e.args[0]
# glb file
else:
# Parsing glb file
success, txt = self.load_glb()
self.content = None
return success, txt
def is_node_joint(self, node_idx):
"""Check if node is a joint."""
if not self.data.skins: # if no skin in gltf file
return False, None
for skin_idx, skin in enumerate(self.data.skins):
if node_idx in skin.joints:
return True, skin_idx
return False, None
def load_buffer(self, buffer_idx):
"""Load buffer."""
buffer = self.data.buffers[buffer_idx]
if buffer.uri:
data, _file_name = self.load_uri(buffer.uri)
if data is not None:
self.buffers[buffer_idx] = data
else:
# GLB-stored buffer
if buffer_idx == 0 and self.glb_buffer is not None:
self.buffers[buffer_idx] = self.glb_buffer
def load_uri(self, uri):
"""Loads a URI.
Returns the data and the filename of the resource, if there is one.
"""
sep = ';base64,'
if uri.startswith('data:'):
idx = uri.find(sep)
if idx != -1:
data = uri[idx + len(sep):]
return memoryview(base64.b64decode(data)), None
path = join(dirname(self.filename), unquote(uri))
try:
with open(path, 'rb') as f_:
return memoryview(f_.read()), basename(path)
except Exception:
self.log.error("Couldn't read file: " + path)
return None, None
|
py | 1a30f28c186d1cd68412be7a4f52b0844039bfa1 | from sys import path
path.append('/home/joerojas/Desarrollo/Curso-Basico-Python/102_misPaquetes/packages')
import extra.iota
print(extra.iota.FunI()) |
py | 1a30f29428a6eeb9c34a6ecedb3de29fa9f068db | # Copyright (C) 2021-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
# as published by MongoDB, Inc.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Server Side Public License for more details.
#
# You should have received a copy of the Server Side Public License
# along with this program. If not, see
# <http://www.mongodb.com/licensing/server-side-public-license>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the Server Side Public License in all respects for
# all of the code used other than as permitted herein. If you modify file(s)
# with this exception, you may extend this exception to your version of the
# file(s), but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version. If you delete this
# exception statement from all source files in the program, then also delete
# it in the license file.
#
# pylint: disable=too-many-lines
"""Checks compatibility of old and new IDL files.
In order to support user-selectable API versions for the server, server commands are now
defined using IDL files. This script checks that old and new commands are compatible with each
other, which allows commands to be updated without breaking the API specifications within a
specific API version.
This script accepts two directories as arguments, the "old" and the "new" IDL directory.
Before running this script, run checkout_idl_files_from_past_releases.py to find and create
directories containing the old IDL files from previous releases.
"""
import argparse
import os
import sys
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Set, Optional, Tuple, Union
from idl import parser, syntax, errors, common
from idl.compiler import CompilerImportResolver
from idl_compatibility_errors import IDLCompatibilityContext, IDLCompatibilityErrorCollection
ALLOW_ANY_TYPE_LIST: List[str] = [
# This list if only used in unit-tests.
"commandAllowedAnyTypes",
"commandAllowedAnyTypes-param-anyTypeParam",
"commandAllowedAnyTypes-reply-anyTypeField",
"oldTypeBsonAnyAllowList",
"newTypeBsonAnyAllowList",
"oldReplyFieldTypeBsonAnyAllowList-reply-oldBsonSerializationTypeAnyReplyField",
"newReplyFieldTypeBsonAnyAllowList-reply-newBsonSerializationTypeAnyReplyField",
"oldParamTypeBsonAnyAllowList-param-bsonTypeAnyParam",
"newParamTypeBsonAnyAllowList-param-bsonTypeAnyParam",
"commandAllowedAnyTypesWithVariant-reply-anyTypeField",
"replyFieldTypeBsonAnyWithVariant-reply-bsonSerializationTypeAnyStructField",
"replyFieldTypeBsonAnyWithVariantWithArray-reply-bsonSerializationTypeAnyStructField",
"parameterFieldTypeBsonAnyWithVariant-param-bsonSerializationTypeAnyStructField",
"parameterFieldTypeBsonAnyWithVariantWithArray-param-bsonSerializationTypeAnyStructField",
"commandTypeBsonAnyWithVariant",
"commandTypeBsonAnyWithVariantWithArray",
"replyFieldCppTypeNotEqual-reply-cppTypeNotEqualReplyField",
"commandCppTypeNotEqual",
"commandParameterCppTypeNotEqual-param-cppTypeNotEqualParam",
"replyFieldSerializerNotEqual-reply-serializerNotEqualReplyField",
"commandSerializerNotEqual",
"commandParameterSerializerNotEqual-param-serializerNotEqualParam",
"replyFieldDeserializerNotEqual-reply-deserializerNotEqualReplyField",
"commandDeserializerNotEqual",
"commandParameterDeserializerNotEqual-param-deserializerNotEqualParam",
"newlyAddedReplyFieldTypeBsonAnyAllowed-reply-newlyAddedBsonSerializationTypeAnyReplyField",
"replyFieldTypeBsonAnyWithVariantUnstable-reply-bsonSerializationTypeWithVariantAnyUnstableReplyField",
"newlyAddedParamBsonAnyAllowList-param-newlyAddedBsonAnyAllowListParam",
"newlyAddedTypeFieldBsonAnyAllowList",
"parameterFieldTypeBsonAnyWithVariantUnstable-param-bsonSerializationTypeAnyStructField",
"commandTypeBsonAnyWithVariantUnstable",
"commandParameterCppTypeNotEqualUnstable-param-cppTypeNotEqualParam",
"replyFieldCppTypeNotEqualUnstable-reply-cppTypeNotEqualReplyUnstableField",
"commandCppTypeNotEqualUnstable",
"commandParameterSerializerNotEqualUnstable-param-serializerNotEqualParam",
"replyFieldSerializerNotEqualUnstable-reply-serializerNotEqualReplyUnstableField",
"commandSerializerNotEqualUnstable",
"commandParameterDeserializerNotEqualUnstable-param-deserializerNotEqualParam",
"replyFieldDeserializerNotEqualUnstable-reply-deserializerNotEqualReplyUnstableField",
"commandDeserializerNotEqualUnstable",
'create-param-backwards',
'saslStart-param-payload',
'saslStart-param-payload',
'saslStart-reply-payload',
'saslContinue-param-payload',
'saslContinue-reply-payload',
# These commands (aggregate, find, update, delete, findAndModify, explain) might contain some
# fields with type `any`. Currently, it's not possible to avoid the `any` type in those cases.
# Instead, here are the preventive measures in-place to catch unintentional breaking changes:
# 1- Added comments on top of custom serializers/deserializers (related to these fields) to
# let the future developers know that their modifications to these methods might lead to
# a breaking change in the API.
# 2- Added proper unit-tests to catch accidental changes to the custom serializers/deserializers
# by over-fitting on the current implementation of these custom serializers/deserializers.
# 3- Added further checks to the current script (idl_check_compatibility.py) to check for
# changing a custom serializer/deserializer and considering it as a potential breaking
# change.
'aggregate-param-pipeline',
'aggregate-param-explain',
'aggregate-param-allowDiskUse',
'aggregate-param-cursor',
'aggregate-param-hint',
'aggregate-param-needsMerge',
'aggregate-param-fromMongos',
'aggregate-param-$_requestReshardingResumeToken',
'aggregate-param-isMapReduceCommand',
'count-param-hint',
'count-param-limit',
'count-param-maxTimeMS',
'find-param-filter',
'find-param-projection',
'find-param-sort',
'find-param-hint',
'find-param-collation',
'find-param-singleBatch',
'find-param-allowDiskUse',
'find-param-min',
'find-param-max',
'find-param-returnKey',
'find-param-showRecordId',
'find-param-$queryOptions',
'find-param-tailable',
'find-param-oplogReplay',
'find-param-noCursorTimeout',
'find-param-awaitData',
'find-param-allowPartialResults',
'find-param-readOnce',
'find-param-allowSpeculativeMajorityRead',
'find-param-$_requestResumeToken',
'find-param-$_resumeAfter',
'find-param-maxTimeMS',
'update-param-u',
'update-param-hint',
'update-param-upsertSupplied',
'update-reply-_id',
'delete-param-limit',
'delete-param-hint',
'findAndModify-param-hint',
'findAndModify-param-update',
'findAndModify-reply-upserted',
'insert-reply-opTime',
'update-reply-opTime',
'delete-reply-opTime',
'aggregate-reply-partialResultsReturned',
'aggregate-reply-invalidated',
'find-reply-partialResultsReturned',
'find-reply-invalidated',
'getMore-reply-partialResultsReturned',
'getMore-reply-invalidated',
]
# Do not add user visible fields already released in earlier versions.
IGNORE_UNSTABLE_LIST: List[str] = [
# The 'originalSpec' field was introduced in v5.1 behind a disabled feature flag and is not user
# visible. This is part of the listIndexes output when executed against system.bucket.*
# collections, which users should avoid doing.
'listIndexes-reply-originalSpec',
# The 'vars' field was introduced to facilitate communication between mongot and mongod and is
# not user visible.
'find-reply-vars',
'aggregate-reply-vars',
# The 'cursor' field is now optional in a reply, as inter-node communication in aggregation
# can return one or more cursors. Multiple cursors are covered under the 'cursors' field.
'find-reply-cursor',
'aggregate-reply-cursor',
# The 'recordPreImages' field is only used by Realm and is not documented to users.
'collMod-param-recordPreImages',
# The 'ignoreUnknownIndexOptions' field is for internal use only and is not documented to users.
'createIndexes-param-ignoreUnknownIndexOptions',
# The 'runtimeConstants' field is a legacy field for internal use only and is not documented to
# users.
'delete-param-runtimeConstants',
]
SKIPPED_FILES = [
"unittest.idl", "mozILocalization.idl", "mozILocaleService.idl", "mozIOSPreferences.idl",
"nsICollation.idl", "nsIStringBundle.idl", "nsIScriptableUConv.idl", "nsITextToSubURI.idl"
]
# Do not add commands that were visible to users in previously released versions.
IGNORE_COMMANDS_LIST: List[str] = [
# The following commands were released behind a feature flag in 5.3 but were shelved in
# favor of getClusterParameter and setClusterParameter. Since the feature flag was not enabled
# in 5.3, they were effectively unusable and so can be safely removed from the strict API.
'getChangeStreamOptions',
'setChangeStreamOptions',
]
class FieldCompatibility:
"""Information about a Field to check compatibility."""
def __init__(self, field_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],
idl_file: syntax.IDLParsedSpec, idl_file_path: str, unstable: Optional[bool],
optional: bool) -> None:
"""Initialize data members and hand special cases, such as optionalBool type."""
self.field_type = field_type
self.idl_file = idl_file
self.idl_file_path = idl_file_path
self.unstable = unstable
self.optional = optional
if isinstance(self.field_type, syntax.Type) and self.field_type.name == "optionalBool":
# special case for optionalBool type, because it is compatible
# with bool type, but has bson_serialization_type == 'any'
# which is not supported by many checks
self.field_type = syntax.Type(field_type.file_name, field_type.line, field_type.column)
self.field_type.name = "bool"
self.field_type.bson_serialization_type = ["bool"]
self.optional = True
@dataclass
class FieldCompatibilityPair:
"""Information about an old and new Field pair to check compatibility."""
old: FieldCompatibility
new: FieldCompatibility
cmd_name: str
field_name: str
class ArrayTypeCheckResult(Enum):
"""Enumeration representing different return values of check_array_type."""
INVALID = 0
TRUE = 1
FALSE = 2
def get_new_commands(
ctxt: IDLCompatibilityContext, new_idl_dir: str, import_directories: List[str]
) -> Tuple[Dict[str, syntax.Command], Dict[str, syntax.IDLParsedSpec], Dict[str, str]]:
"""Get new IDL commands and check validity."""
new_commands: Dict[str, syntax.Command] = dict()
new_command_file: Dict[str, syntax.IDLParsedSpec] = dict()
new_command_file_path: Dict[str, str] = dict()
for dirpath, _, filenames in os.walk(new_idl_dir):
for new_filename in filenames:
if not new_filename.endswith('.idl') or new_filename in SKIPPED_FILES:
continue
new_idl_file_path = os.path.join(dirpath, new_filename)
with open(new_idl_file_path) as new_file:
new_idl_file = parser.parse(
new_file, new_idl_file_path,
CompilerImportResolver(import_directories + [new_idl_dir]))
if new_idl_file.errors:
new_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {new_idl_file_path}")
for new_cmd in new_idl_file.spec.symbols.commands:
# Ignore imported commands as they will be processed in their own file.
if new_cmd.api_version == "" or new_cmd.imported:
continue
if new_cmd.api_version != "1":
# We're not ready to handle future API versions yet.
ctxt.add_command_invalid_api_version_error(
new_cmd.command_name, new_cmd.api_version, new_idl_file_path)
continue
if new_cmd.command_name in new_commands:
ctxt.add_duplicate_command_name_error(new_cmd.command_name, new_idl_dir,
new_idl_file_path)
continue
new_commands[new_cmd.command_name] = new_cmd
new_command_file[new_cmd.command_name] = new_idl_file
new_command_file_path[new_cmd.command_name] = new_idl_file_path
return new_commands, new_command_file, new_command_file_path
def get_chained_type_or_struct(
chained_type_or_struct: Union[syntax.ChainedType, syntax.ChainedStruct],
idl_file: syntax.IDLParsedSpec,
idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]:
"""Resolve and get chained type or struct from the IDL file."""
parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection())
resolved = idl_file.spec.symbols.resolve_type_from_name(parser_ctxt, chained_type_or_struct,
chained_type_or_struct.name,
chained_type_or_struct.name)
if parser_ctxt.errors.has_errors():
parser_ctxt.errors.dump_errors()
return resolved
def get_field_type(field: Union[syntax.Field, syntax.Command], idl_file: syntax.IDLParsedSpec,
idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]:
"""Resolve and get field type of a field from the IDL file."""
parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection())
field_type = idl_file.spec.symbols.resolve_field_type(parser_ctxt, field, field.name,
field.type)
if parser_ctxt.errors.has_errors():
parser_ctxt.errors.dump_errors()
return field_type
def check_subset(ctxt: IDLCompatibilityContext, cmd_name: str, field_name: str, type_name: str,
sub_list: List[Union[str, syntax.EnumValue]],
super_list: List[Union[str, syntax.EnumValue]], file_path: str):
# pylint: disable=too-many-arguments
"""Check if sub_list is a subset of the super_list and log an error if not."""
if not set(sub_list).issubset(super_list):
ctxt.add_reply_field_not_subset_error(cmd_name, field_name, type_name, file_path)
def check_superset(ctxt: IDLCompatibilityContext, cmd_name: str, type_name: str,
super_list: List[Union[str, syntax.EnumValue]],
sub_list: List[Union[str, syntax.EnumValue]], file_path: str,
param_name: Optional[str], is_command_parameter: bool):
# pylint: disable=too-many-arguments
"""Check if super_list is a superset of the sub_list and log an error if not."""
if not set(super_list).issuperset(sub_list):
ctxt.add_command_or_param_type_not_superset_error(cmd_name, type_name, file_path,
param_name, is_command_parameter)
def check_reply_field_type_recursive(ctxt: IDLCompatibilityContext,
field_pair: FieldCompatibilityPair) -> None:
# pylint: disable=too-many-branches
"""Check compatibility between old and new reply field type if old field type is a syntax.Type instance."""
old_field = field_pair.old
new_field = field_pair.new
old_field_type = old_field.field_type
new_field_type = new_field.field_type
cmd_name = field_pair.cmd_name
field_name = field_pair.field_name
# If the old field is unstable, we only add errors related to the use of 'any' as the
# bson_serialization_type. For all other errors, we check that the old field is stable
# before adding an error.
if not isinstance(new_field_type, syntax.Type):
if not old_field.unstable:
ctxt.add_new_reply_field_type_enum_or_struct_error(
cmd_name, field_name, new_field_type.name, old_field_type.name,
new_field.idl_file_path)
return
# If bson_serialization_type switches from 'any' to non-any type.
if "any" in old_field_type.bson_serialization_type and "any" not in new_field_type.bson_serialization_type:
ctxt.add_old_reply_field_bson_any_error(cmd_name, field_name, old_field_type.name,
new_field_type.name, old_field.idl_file_path)
return
# If bson_serialization_type switches from non-any to 'any' type.
if "any" not in old_field_type.bson_serialization_type and "any" in new_field_type.bson_serialization_type:
ctxt.add_new_reply_field_bson_any_error(cmd_name, field_name, old_field_type.name,
new_field_type.name, new_field.idl_file_path)
return
allow_name: str = cmd_name + "-reply-" + field_name
if "any" in old_field_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
if allow_name not in ALLOW_ANY_TYPE_LIST:
ctxt.add_old_reply_field_bson_any_not_allowed_error(
cmd_name, field_name, old_field_type.name, old_field.idl_file_path)
return
# If cpp_type is changed, it's a potential breaking change.
if old_field_type.cpp_type != new_field_type.cpp_type:
ctxt.add_reply_field_cpp_type_not_equal_error(cmd_name, field_name, new_field_type.name,
new_field.idl_file_path)
# If serializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_field_type.serializer != new_field_type.serializer:
ctxt.add_reply_field_serializer_not_equal_error(
cmd_name, field_name, new_field_type.name, new_field.idl_file_path)
# If deserializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_field_type.deserializer != new_field_type.deserializer:
ctxt.add_reply_field_deserializer_not_equal_error(
cmd_name, field_name, new_field_type.name, new_field.idl_file_path)
if isinstance(old_field_type, syntax.VariantType):
# If the new type is not variant just check the single type.
new_variant_types = new_field_type.variant_types if isinstance(
new_field_type, syntax.VariantType) else [new_field_type]
old_variant_types = old_field_type.variant_types
# Check that new variant types are a subset of old variant types.
for new_variant_type in new_variant_types:
for old_variant_type in old_variant_types:
if old_variant_type.name == new_variant_type.name:
# Check that the old and new version of each variant type is also compatible.
old = FieldCompatibility(old_variant_type, old_field.idl_file,
old_field.idl_file_path, old_field.unstable,
old_field.optional)
new = FieldCompatibility(new_variant_type, new_field.idl_file,
new_field.idl_file_path, new_field.unstable,
new_field.optional)
check_reply_field_type(ctxt,
FieldCompatibilityPair(old, new, cmd_name, field_name))
break
else:
# new_variant_type was not found in old_variant_types.
if not old_field.unstable:
ctxt.add_new_reply_field_variant_type_not_subset_error(
cmd_name, field_name, new_variant_type.name, new_field.idl_file_path)
# If new type is variant and has a struct as a variant type, compare old and new variant_struct_type.
# Since enums can't be part of variant types, we don't explicitly check for enums.
if isinstance(new_field_type,
syntax.VariantType) and new_field_type.variant_struct_type is not None:
if old_field_type.variant_struct_type is None and not old_field.unstable:
ctxt.add_new_reply_field_variant_type_not_subset_error(
cmd_name, field_name, new_field_type.variant_struct_type.name,
new_field.idl_file_path)
else:
check_reply_fields(ctxt, old_field_type.variant_struct_type,
new_field_type.variant_struct_type, cmd_name, old_field.idl_file,
new_field.idl_file, old_field.idl_file_path,
new_field.idl_file_path)
elif not old_field.unstable:
if isinstance(new_field_type, syntax.VariantType):
ctxt.add_new_reply_field_variant_type_error(cmd_name, field_name, old_field_type.name,
new_field.idl_file_path)
else:
check_subset(ctxt, cmd_name, field_name, new_field_type.name,
new_field_type.bson_serialization_type,
old_field_type.bson_serialization_type, new_field.idl_file_path)
def check_reply_field_type(ctxt: IDLCompatibilityContext, field_pair: FieldCompatibilityPair):
"""Check compatibility between old and new reply field type."""
# pylint: disable=too-many-branches
old_field = field_pair.old
new_field = field_pair.new
array_check = check_array_type(ctxt, "reply_field", old_field.field_type, new_field.field_type,
field_pair.cmd_name, 'type', old_field.idl_file_path,
new_field.idl_file_path, old_field.unstable)
if array_check == ArrayTypeCheckResult.INVALID:
return
if array_check == ArrayTypeCheckResult.TRUE:
old_field.field_type = old_field.field_type.element_type
new_field.field_type = new_field.field_type.element_type
old_field_type = old_field.field_type
new_field_type = new_field.field_type
cmd_name = field_pair.cmd_name
field_name = field_pair.field_name
if old_field_type is None:
ctxt.add_reply_field_type_invalid_error(cmd_name, field_name, old_field.idl_file_path)
ctxt.errors.dump_errors()
sys.exit(1)
if new_field_type is None:
ctxt.add_reply_field_type_invalid_error(cmd_name, field_name, new_field.idl_file_path)
ctxt.errors.dump_errors()
sys.exit(1)
if isinstance(old_field_type, syntax.Type):
check_reply_field_type_recursive(ctxt, field_pair)
elif isinstance(old_field_type, syntax.Enum) and not old_field.unstable:
if isinstance(new_field_type, syntax.Enum):
check_subset(ctxt, cmd_name, field_name, new_field_type.name, new_field_type.values,
old_field_type.values, new_field.idl_file_path)
else:
ctxt.add_new_reply_field_type_not_enum_error(cmd_name, field_name, new_field_type.name,
old_field_type.name,
new_field.idl_file_path)
elif isinstance(old_field_type, syntax.Struct):
if isinstance(new_field_type, syntax.Struct):
check_reply_fields(ctxt, old_field_type, new_field_type, cmd_name, old_field.idl_file,
new_field.idl_file, old_field.idl_file_path, new_field.idl_file_path)
else:
if not old_field.unstable:
ctxt.add_new_reply_field_type_not_struct_error(
cmd_name, field_name, new_field_type.name, old_field_type.name,
new_field.idl_file_path)
def check_array_type(ctxt: IDLCompatibilityContext, symbol: str,
old_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],
new_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],
cmd_name: str, symbol_name: str, old_idl_file_path: str,
new_idl_file_path: str, old_field_unstable: bool) -> ArrayTypeCheckResult:
"""
Check compatibility between old and new ArrayTypes.
:returns:
- ArrayTypeCheckResult.TRUE : when the old type and new type are of array type.
- ArrayTypeCheckResult.FALSE : when the old type and new type aren't of array type.
- ArrayTypeCheckResult.INVALID : when one of the types is not of array type while the other one is.
"""
# pylint: disable=too-many-arguments,too-many-branches
old_is_array = isinstance(old_type, syntax.ArrayType)
new_is_array = isinstance(new_type, syntax.ArrayType)
if not old_is_array and not new_is_array:
return ArrayTypeCheckResult.FALSE
if (not old_is_array or not new_is_array) and not old_field_unstable:
ctxt.add_type_not_array_error(symbol, cmd_name, symbol_name, new_type.name, old_type.name,
new_idl_file_path if old_is_array else old_idl_file_path)
return ArrayTypeCheckResult.INVALID
return ArrayTypeCheckResult.TRUE
def check_reply_field(ctxt: IDLCompatibilityContext, old_field: syntax.Field,
new_field: syntax.Field, cmd_name: str, old_idl_file: syntax.IDLParsedSpec,
new_idl_file: syntax.IDLParsedSpec, old_idl_file_path: str,
new_idl_file_path: str):
"""Check compatibility between old and new reply field."""
# pylint: disable=too-many-arguments
old_field_type = get_field_type(old_field, old_idl_file, old_idl_file_path)
new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)
old_field_optional = old_field.optional or (old_field_type
and old_field_type.name == "optionalBool")
new_field_optional = new_field.optional or (new_field_type
and new_field_type.name == "optionalBool")
field_name: str = cmd_name + "-reply-" + new_field.name
if not old_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
if new_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
ctxt.add_new_reply_field_unstable_error(cmd_name, new_field.name, new_idl_file_path)
if new_field_optional and not old_field_optional:
ctxt.add_new_reply_field_optional_error(cmd_name, new_field.name, new_idl_file_path)
if new_field.validator:
if old_field.validator:
if new_field.validator != old_field.validator:
ctxt.add_reply_field_validators_not_equal_error(cmd_name, new_field.name,
new_idl_file_path)
else:
ctxt.add_reply_field_contains_validator_error(cmd_name, new_field.name,
new_idl_file_path)
old_field_compatibility = FieldCompatibility(old_field_type, old_idl_file, old_idl_file_path,
old_field.unstable, old_field.optional)
new_field_compatibility = FieldCompatibility(new_field_type, new_idl_file, new_idl_file_path,
new_field.unstable, new_field.optional)
field_pair = FieldCompatibilityPair(old_field_compatibility, new_field_compatibility, cmd_name,
old_field.name)
check_reply_field_type(ctxt, field_pair)
def check_reply_fields(ctxt: IDLCompatibilityContext, old_reply: syntax.Struct,
new_reply: syntax.Struct, cmd_name: str, old_idl_file: syntax.IDLParsedSpec,
new_idl_file: syntax.IDLParsedSpec, old_idl_file_path: str,
new_idl_file_path: str):
"""Check compatibility between old and new reply fields."""
# pylint: disable=too-many-arguments,too-many-branches
for new_chained_type in new_reply.chained_types or []:
resolved_new_chained_type = get_chained_type_or_struct(new_chained_type, new_idl_file,
new_idl_file_path)
if resolved_new_chained_type is not None:
for old_chained_type in old_reply.chained_types or []:
resolved_old_chained_type = get_chained_type_or_struct(
old_chained_type, old_idl_file, old_idl_file_path)
if (resolved_old_chained_type is not None
and resolved_old_chained_type.name == resolved_new_chained_type.name):
# Check that the old and new version of each chained type is also compatible.
old = FieldCompatibility(resolved_old_chained_type, old_idl_file,
old_idl_file_path, unstable=False, optional=False)
new = FieldCompatibility(resolved_new_chained_type, new_idl_file,
new_idl_file_path, unstable=False, optional=False)
check_reply_field_type(
ctxt, FieldCompatibilityPair(old, new, cmd_name, old_reply.name))
break
else:
# new chained type was not found in old chained types.
ctxt.add_new_reply_chained_type_not_subset_error(
cmd_name, new_reply.name, resolved_new_chained_type.name, new_idl_file_path)
old_reply_fields = get_all_struct_fields(old_reply, old_idl_file, old_idl_file_path)
new_reply_fields = get_all_struct_fields(new_reply, new_idl_file, new_idl_file_path)
for old_field in old_reply_fields or []:
new_field_exists = False
for new_field in new_reply_fields or []:
if new_field.name == old_field.name:
new_field_exists = True
check_reply_field(ctxt, old_field, new_field, cmd_name, old_idl_file, new_idl_file,
old_idl_file_path, new_idl_file_path)
break
if not new_field_exists and not old_field.unstable:
ctxt.add_new_reply_field_missing_error(cmd_name, old_field.name, old_idl_file_path)
for new_field in new_reply_fields or []:
# Check that all fields in the new IDL have specified the 'unstable' field.
if new_field.unstable is None:
ctxt.add_new_reply_field_requires_unstable_error(cmd_name, new_field.name,
new_idl_file_path)
# Check that newly added fields do not have an unallowed use of 'any' as the
# bson_serialization_type.
newly_added = True
for old_field in old_reply_fields or []:
if new_field.name == old_field.name:
newly_added = False
if newly_added:
allow_name: str = cmd_name + "-reply-" + new_field.name
new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)
# If we encounter a bson_serialization_type of None, we skip checking if 'any' is used.
if isinstance(
new_field_type, syntax.Type
) and new_field_type.bson_serialization_type is not None and "any" in new_field_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
any_allow = allow_name in ALLOW_ANY_TYPE_LIST or new_field_type.name == 'optionalBool'
if not any_allow:
ctxt.add_new_reply_field_bson_any_not_allowed_error(
cmd_name, new_field.name, new_field_type.name, new_idl_file_path)
def check_param_or_command_type_recursive(ctxt: IDLCompatibilityContext,
field_pair: FieldCompatibilityPair,
is_command_parameter: bool):
# pylint: disable=too-many-branches,too-many-locals
"""
Check compatibility between old and new command or param type recursively.
If the old type is a syntax.Type instance, check the compatibility between the old and new
command type or parameter type recursively.
"""
old_field = field_pair.old
new_field = field_pair.new
old_type = old_field.field_type
new_type = new_field.field_type
cmd_name = field_pair.cmd_name
param_name = field_pair.field_name
# If the old field is unstable, we only add errors related to the use of 'any' as the
# bson_serialization_type. For all other errors, we check that the old field is stable
# before adding an error.
if not isinstance(new_type, syntax.Type):
if not old_field.unstable:
ctxt.add_new_command_or_param_type_enum_or_struct_error(
cmd_name, new_type.name, old_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
return
allow_name: str = cmd_name + "-param-" + param_name if is_command_parameter else cmd_name
# If bson_serialization_type switches from 'any' to non-any type.
if "any" in old_type.bson_serialization_type and "any" not in new_type.bson_serialization_type:
ctxt.add_old_command_or_param_type_bson_any_error(cmd_name, old_type.name, new_type.name,
old_field.idl_file_path, param_name,
is_command_parameter)
return
# If bson_serialization_type switches from non-any to 'any' type.
if "any" not in old_type.bson_serialization_type and "any" in new_type.bson_serialization_type:
ctxt.add_new_command_or_param_type_bson_any_error(cmd_name, old_type.name, new_type.name,
new_field.idl_file_path, param_name,
is_command_parameter)
return
if "any" in old_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
if allow_name not in ALLOW_ANY_TYPE_LIST:
ctxt.add_old_command_or_param_type_bson_any_not_allowed_error(
cmd_name, old_type.name, old_field.idl_file_path, param_name, is_command_parameter)
return
# If cpp_type is changed, it's a potential breaking change.
if old_type.cpp_type != new_type.cpp_type:
ctxt.add_command_or_param_cpp_type_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
# If serializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_type.serializer != new_type.serializer:
ctxt.add_command_or_param_serializer_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
# If deserializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_type.deserializer != new_type.deserializer:
ctxt.add_command_or_param_deserializer_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
if isinstance(old_type, syntax.VariantType):
if not isinstance(new_type, syntax.VariantType):
if not old_field.unstable:
ctxt.add_new_command_or_param_type_not_variant_type_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
else:
new_variant_types = new_type.variant_types
old_variant_types = old_type.variant_types
# Check that new variant types are a superset of old variant types.
for old_variant_type in old_variant_types:
for new_variant_type in new_variant_types:
# object->object_owned serialize to the same bson type. object_owned->object is
# not always safe so we only limit this special case to object->object_owned.
if (old_variant_type.name == "object" and new_variant_type.name == "object_owned") or \
old_variant_type.name == new_variant_type.name:
# Check that the old and new version of each variant type is also compatible.
old = FieldCompatibility(old_variant_type, old_field.idl_file,
old_field.idl_file_path, old_field.unstable,
old_field.optional)
new = FieldCompatibility(new_variant_type, new_field.idl_file,
new_field.idl_file_path, new_field.unstable,
new_field.optional)
check_param_or_command_type(
ctxt, FieldCompatibilityPair(old, new, cmd_name, param_name),
is_command_parameter)
break
else:
if not old_field.unstable:
# old_variant_type was not found in new_variant_types.
ctxt.add_new_command_or_param_variant_type_not_superset_error(
cmd_name, old_variant_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
# If old and new types both have a struct as a variant type, compare old and new variant_struct_type.
# Since enums can't be part of variant types, we don't explicitly check for enums.
if old_type.variant_struct_type is not None:
if new_type.variant_struct_type is not None:
check_command_params_or_type_struct_fields(
ctxt, old_type.variant_struct_type, new_type.variant_struct_type, cmd_name,
old_field.idl_file, new_field.idl_file, old_field.idl_file_path,
new_field.idl_file_path, is_command_parameter)
# If old type has a variant struct type and new type does not have a variant struct type.
elif not old_field.unstable:
ctxt.add_new_command_or_param_variant_type_not_superset_error(
cmd_name, old_type.variant_struct_type.name, new_field.idl_file_path,
param_name, is_command_parameter)
elif not old_field.unstable:
check_superset(ctxt, cmd_name, new_type.name, new_type.bson_serialization_type,
old_type.bson_serialization_type, new_field.idl_file_path, param_name,
is_command_parameter)
def check_param_or_command_type(ctxt: IDLCompatibilityContext, field_pair: FieldCompatibilityPair,
is_command_parameter: bool):
"""Check compatibility between old and new command parameter type or command type."""
# pylint: disable=too-many-branches
old_field = field_pair.old
new_field = field_pair.new
array_check = check_array_type(
ctxt, "command_parameter" if is_command_parameter else "command_namespace",
old_field.field_type, new_field.field_type, field_pair.cmd_name,
field_pair.field_name if is_command_parameter else "type", old_field.idl_file_path,
new_field.idl_file_path, old_field.unstable)
if array_check == ArrayTypeCheckResult.INVALID:
return
if array_check == ArrayTypeCheckResult.TRUE:
old_field.field_type = old_field.field_type.element_type
new_field.field_type = new_field.field_type.element_type
old_type = old_field.field_type
new_type = new_field.field_type
if old_type is None:
ctxt.add_command_or_param_type_invalid_error(field_pair.cmd_name, old_field.idl_file_path,
field_pair.field_name, is_command_parameter)
ctxt.errors.dump_errors()
sys.exit(1)
if new_type is None:
ctxt.add_command_or_param_type_invalid_error(field_pair.cmd_name, new_field.idl_file_path,
field_pair.field_name, is_command_parameter)
ctxt.errors.dump_errors()
sys.exit(1)
if isinstance(old_type, syntax.Type):
check_param_or_command_type_recursive(ctxt, field_pair, is_command_parameter)
# Only add type errors if the old field is stable.
elif isinstance(old_type, syntax.Enum) and not old_field.unstable:
if isinstance(new_type, syntax.Enum):
check_superset(ctxt, field_pair.cmd_name, new_type.name, new_type.values,
old_type.values, new_field.idl_file_path, field_pair.field_name,
is_command_parameter)
else:
ctxt.add_new_command_or_param_type_not_enum_error(
field_pair.cmd_name, new_type.name, old_type.name, new_field.idl_file_path,
field_pair.field_name, is_command_parameter)
elif isinstance(old_type, syntax.Struct):
if isinstance(new_type, syntax.Struct):
check_command_params_or_type_struct_fields(
ctxt, old_type, new_type, field_pair.cmd_name, old_field.idl_file,
new_field.idl_file, old_field.idl_file_path, new_field.idl_file_path,
is_command_parameter)
else:
if not old_field.unstable:
ctxt.add_new_command_or_param_type_not_struct_error(
field_pair.cmd_name, new_type.name, old_type.name, new_field.idl_file_path,
field_pair.field_name, is_command_parameter)
def check_param_or_type_validator(ctxt: IDLCompatibilityContext, old_field: syntax.Field,
new_field: syntax.Field, cmd_name: str, new_idl_file_path: str,
type_name: Optional[str], is_command_parameter: bool):
"""
Check compatibility between old and new validators.
Check compatibility between old and new validators in command parameter type and command type
struct fields.
"""
# pylint: disable=too-many-arguments
if new_field.validator:
if old_field.validator:
if new_field.validator != old_field.validator:
ctxt.add_command_or_param_type_validators_not_equal_error(
cmd_name, new_field.name, new_idl_file_path, type_name, is_command_parameter)
else:
ctxt.add_command_or_param_type_contains_validator_error(
cmd_name, new_field.name, new_idl_file_path, type_name, is_command_parameter)
def get_all_struct_fields(struct: syntax.Struct, idl_file: syntax.IDLParsedSpec,
idl_file_path: str):
"""Get all the fields of a struct, including the chained struct fields."""
all_fields = struct.fields or []
for chained_struct in struct.chained_structs or []:
resolved_chained_struct = get_chained_type_or_struct(chained_struct, idl_file,
idl_file_path)
if resolved_chained_struct is not None:
for field in resolved_chained_struct.fields:
all_fields.append(field)
return all_fields
def check_command_params_or_type_struct_fields(
ctxt: IDLCompatibilityContext, old_struct: syntax.Struct, new_struct: syntax.Struct,
cmd_name: str, old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,
old_idl_file_path: str, new_idl_file_path: str, is_command_parameter: bool):
"""Check compatibility between old and new parameters or command type fields."""
# pylint: disable=too-many-arguments,too-many-branches
# Check chained types.
for old_chained_type in old_struct.chained_types or []:
resolved_old_chained_type = get_chained_type_or_struct(old_chained_type, old_idl_file,
old_idl_file_path)
if resolved_old_chained_type is not None:
for new_chained_type in new_struct.chained_types or []:
resolved_new_chained_type = get_chained_type_or_struct(
new_chained_type, new_idl_file, new_idl_file_path)
if (resolved_new_chained_type is not None
and resolved_old_chained_type.name == resolved_new_chained_type.name):
# Check that the old and new version of each chained type is also compatible.
old = FieldCompatibility(resolved_old_chained_type, old_idl_file,
old_idl_file_path, unstable=False, optional=False)
new = FieldCompatibility(resolved_new_chained_type, new_idl_file,
new_idl_file_path, unstable=False, optional=False)
check_param_or_command_type(
ctxt, FieldCompatibilityPair(old, new, cmd_name, old_struct.name),
is_command_parameter=False)
break
else:
# old chained type was not found in new chained types.
ctxt.add_new_command_or_param_chained_type_not_superset_error(
cmd_name, old_chained_type.name, new_idl_file_path, old_struct.name,
is_command_parameter)
old_struct_fields = get_all_struct_fields(old_struct, old_idl_file, old_idl_file_path)
new_struct_fields = get_all_struct_fields(new_struct, new_idl_file, new_idl_file_path)
# We need to special-case the stmtId parameter because it was removed. However, it's not a
# breaking change to the API because it was added and removed behind a feature flag, so it was
# never officially released.
allow_list = ["endSessions-param-stmtId", "refreshSessions-param-stmtId"]
for old_field in old_struct_fields or []:
new_field_exists = False
for new_field in new_struct_fields or []:
if new_field.name == old_field.name:
new_field_exists = True
check_command_param_or_type_struct_field(
ctxt, old_field, new_field, cmd_name, old_idl_file, new_idl_file,
old_idl_file_path, new_idl_file_path, old_struct.name, is_command_parameter)
break
allow_name: str = cmd_name + "-param-" + old_field.name
if not new_field_exists and not old_field.unstable and allow_name not in allow_list:
ctxt.add_new_param_or_command_type_field_missing_error(
cmd_name, old_field.name, old_idl_file_path, old_struct.name, is_command_parameter)
# Check if a new field has been added to the parameters or type struct.
# If so, it must be optional.
for new_field in new_struct_fields or []:
# Check that all fields in the new IDL have specified the 'unstable' field.
if new_field.unstable is None:
ctxt.add_new_param_or_command_type_field_requires_unstable_error(
cmd_name, new_field.name, new_idl_file_path, is_command_parameter)
newly_added = True
for old_field in old_struct_fields or []:
if new_field.name == old_field.name:
newly_added = False
if newly_added:
new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)
new_field_optional = new_field.optional or (new_field_type
and new_field_type.name == 'optionalBool')
if not new_field_optional and not new_field.unstable:
ctxt.add_new_param_or_command_type_field_added_required_error(
cmd_name, new_field.name, new_idl_file_path, new_struct.name,
is_command_parameter)
# Check that a new field does not have an unallowed use of 'any' as the bson_serialization_type.
any_allow_name: str = (cmd_name + "-param-" + new_field.name
if is_command_parameter else cmd_name)
# If we encounter a bson_serialization_type of None, we skip checking if 'any' is used.
if isinstance(
new_field_type, syntax.Type
) and new_field_type.bson_serialization_type is not None and "any" in new_field_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
any_allow = any_allow_name in ALLOW_ANY_TYPE_LIST or new_field_type.name == 'optionalBool'
if not any_allow:
ctxt.add_new_command_or_param_type_bson_any_not_allowed_error(
cmd_name, new_field_type.name, old_idl_file_path, new_field.name,
is_command_parameter)
def check_command_param_or_type_struct_field(
ctxt: IDLCompatibilityContext, old_field: syntax.Field, new_field: syntax.Field,
cmd_name: str, old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,
old_idl_file_path: str, new_idl_file_path: str, type_name: Optional[str],
is_command_parameter: bool):
"""Check compatibility between the old and new command parameter or command type struct field."""
# pylint: disable=too-many-arguments
field_name: str = cmd_name + "-param-" + new_field.name
if not old_field.unstable and new_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
ctxt.add_new_param_or_command_type_field_unstable_error(
cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)
# If old field is unstable and new field is stable, the new field should either be optional or
# have a default value.
old_field_type = get_field_type(old_field, old_idl_file, old_idl_file_path)
new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)
old_field_optional = old_field.optional or (old_field_type
and old_field_type.name == "optionalBool")
new_field_optional = new_field.optional or (new_field_type
and new_field_type.name == "optionalBool")
if old_field.unstable and not new_field.unstable and not new_field_optional and new_field.default is None:
ctxt.add_new_param_or_command_type_field_stable_required_no_default_error(
cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)
if old_field_optional and not new_field_optional:
ctxt.add_new_param_or_command_type_field_required_error(
cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)
if not old_field.unstable:
check_param_or_type_validator(ctxt, old_field, new_field, cmd_name, new_idl_file_path,
type_name, is_command_parameter)
old_field_compatibility = FieldCompatibility(old_field_type, old_idl_file, old_idl_file_path,
old_field.unstable, old_field.optional)
new_field_compatibility = FieldCompatibility(new_field_type, new_idl_file, new_idl_file_path,
new_field.unstable, new_field.optional)
field_pair = FieldCompatibilityPair(old_field_compatibility, new_field_compatibility, cmd_name,
old_field.name)
check_param_or_command_type(ctxt, field_pair, is_command_parameter)
def check_namespace(ctxt: IDLCompatibilityContext, old_cmd: syntax.Command, new_cmd: syntax.Command,
old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,
old_idl_file_path: str, new_idl_file_path: str):
"""Check compatibility between old and new namespace."""
# pylint: disable=too-many-arguments
old_namespace = old_cmd.namespace
new_namespace = new_cmd.namespace
# IDL parser already checks that namespace must be one of these 4 types.
if old_namespace == common.COMMAND_NAMESPACE_IGNORED:
if new_namespace != common.COMMAND_NAMESPACE_IGNORED:
ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,
new_namespace, new_idl_file_path)
elif old_namespace == common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB_OR_UUID:
if new_namespace not in (common.COMMAND_NAMESPACE_IGNORED,
common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB_OR_UUID):
ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,
new_namespace, new_idl_file_path)
elif old_namespace == common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB:
if new_namespace == common.COMMAND_NAMESPACE_TYPE:
ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,
new_namespace, new_idl_file_path)
elif old_namespace == common.COMMAND_NAMESPACE_TYPE:
old_type = get_field_type(old_cmd, old_idl_file, old_idl_file_path)
if new_namespace == common.COMMAND_NAMESPACE_TYPE:
new_type = get_field_type(new_cmd, new_idl_file, new_idl_file_path)
old = FieldCompatibility(old_type, old_idl_file, old_idl_file_path, unstable=False,
optional=False)
new = FieldCompatibility(new_type, new_idl_file, new_idl_file_path, unstable=False,
optional=False)
check_param_or_command_type(ctxt,
FieldCompatibilityPair(old, new, old_cmd.command_name, ""),
is_command_parameter=False)
# If old type is "namespacestring", the new namespace can be changed to any
# of the other namespace types.
elif old_type.name != "namespacestring":
# Otherwise, the new namespace can only be changed to "ignored".
if new_namespace != common.COMMAND_NAMESPACE_IGNORED:
ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,
new_namespace, new_idl_file_path)
else:
assert False, 'unrecognized namespace option'
def check_error_reply(old_basic_types_path: str, new_basic_types_path: str,
old_import_directories: List[str],
new_import_directories: List[str]) -> IDLCompatibilityErrorCollection:
"""Check IDL compatibility between old and new ErrorReply."""
old_idl_dir = os.path.dirname(old_basic_types_path)
new_idl_dir = os.path.dirname(new_basic_types_path)
ctxt = IDLCompatibilityContext(old_idl_dir, new_idl_dir, IDLCompatibilityErrorCollection())
with open(old_basic_types_path) as old_file:
old_idl_file = parser.parse(old_file, old_basic_types_path,
CompilerImportResolver(old_import_directories))
if old_idl_file.errors:
old_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {old_basic_types_path}")
old_error_reply_struct = old_idl_file.spec.symbols.get_struct("ErrorReply")
if old_error_reply_struct is None:
ctxt.add_missing_error_reply_struct_error(old_basic_types_path)
else:
with open(new_basic_types_path) as new_file:
new_idl_file = parser.parse(new_file, new_basic_types_path,
CompilerImportResolver(new_import_directories))
if new_idl_file.errors:
new_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {new_basic_types_path}")
new_error_reply_struct = new_idl_file.spec.symbols.get_struct("ErrorReply")
if new_error_reply_struct is None:
ctxt.add_missing_error_reply_struct_error(new_basic_types_path)
else:
check_reply_fields(ctxt, old_error_reply_struct, new_error_reply_struct, "n/a",
old_idl_file, new_idl_file, old_basic_types_path,
new_basic_types_path)
ctxt.errors.dump_errors()
return ctxt.errors
def split_complex_checks(
complex_checks: List[syntax.AccessCheck]) -> Tuple[List[str], List[syntax.Privilege]]:
"""Split a list of AccessCheck into checks and privileges."""
checks = [x.check for x in complex_checks if x.check is not None]
privileges = [x.privilege for x in complex_checks if x.privilege is not None]
# Sort the list of privileges by the length of the action_type list, in decreasing order
# so that two lists of privileges can be compared later.
return checks, sorted(privileges, key=lambda x: len(x.action_type), reverse=True)
def check_complex_checks(ctxt: IDLCompatibilityContext,
old_complex_checks: List[syntax.AccessCheck],
new_complex_checks: List[syntax.AccessCheck], cmd: syntax.Command,
new_idl_file_path: str) -> None:
"""Check the compatibility between complex access checks of the old and new command."""
cmd_name = cmd.command_name
if len(new_complex_checks) > len(old_complex_checks):
ctxt.add_new_additional_complex_access_check_error(cmd_name, new_idl_file_path)
else:
old_checks, old_privileges = split_complex_checks(old_complex_checks)
new_checks, new_privileges = split_complex_checks(new_complex_checks)
if not set(new_checks).issubset(old_checks):
ctxt.add_new_complex_checks_not_subset_error(cmd_name, new_idl_file_path)
if len(new_privileges) > len(old_privileges):
ctxt.add_new_complex_privileges_not_subset_error(cmd_name, new_idl_file_path)
else:
# Check that each new_privilege matches an old_privilege (the resource_pattern is
# equal and the action_types are a subset of the old action_types).
for new_privilege in new_privileges:
for old_privilege in old_privileges:
if (new_privilege.resource_pattern == old_privilege.resource_pattern
and set(new_privilege.action_type).issubset(old_privilege.action_type)):
old_privileges.remove(old_privilege)
break
else:
ctxt.add_new_complex_privileges_not_subset_error(cmd_name, new_idl_file_path)
def split_complex_checks_agg_stages(
complex_checks: List[syntax.AccessCheck]) -> Dict[str, List[syntax.AccessCheck]]:
"""Split a list of AccessChecks into a map keyed by aggregation stage (defaults to None)."""
complex_checks_agg_stages: Dict[str, List[syntax.AccessCheck]] = dict()
for access_check in complex_checks:
agg_stage = None
if access_check.privilege is not None:
# x.privilege.agg_stage can still be None.
agg_stage = access_check.privilege.agg_stage
if agg_stage not in complex_checks_agg_stages:
complex_checks_agg_stages[agg_stage] = []
complex_checks_agg_stages[agg_stage].append(access_check)
return complex_checks_agg_stages
def check_complex_checks_agg_stages(ctxt: IDLCompatibilityContext,
old_complex_checks: List[syntax.AccessCheck],
new_complex_checks: List[syntax.AccessCheck],
cmd: syntax.Command, new_idl_file_path: str) -> None:
"""Check the compatibility between complex access checks of the old and new agggreation stages."""
new_complex_checks_agg_stages = split_complex_checks_agg_stages(new_complex_checks)
old_complex_checks_agg_stages = split_complex_checks_agg_stages(old_complex_checks)
for agg_stage in new_complex_checks_agg_stages:
# Aggregation stages are considered separate commands in the context of validating the
# Stable API. Therefore, it is okay to skip recently added aggregation stages that are
# are not present in the previous release.
if agg_stage not in old_complex_checks_agg_stages:
continue
check_complex_checks(ctxt, old_complex_checks_agg_stages[agg_stage],
new_complex_checks_agg_stages[agg_stage], cmd, new_idl_file_path)
def check_security_access_checks(ctxt: IDLCompatibilityContext,
old_access_checks: syntax.AccessChecks,
new_access_checks: syntax.AccessChecks, cmd: syntax.Command,
new_idl_file_path: str) -> None:
"""Check the compatibility between security access checks of the old and new command."""
# pylint:disable=too-many-locals,too-many-branches,too-many-nested-blocks
cmd_name = cmd.command_name
if old_access_checks is not None and new_access_checks is not None:
old_access_check_type = old_access_checks.get_access_check_type()
new_access_check_type = new_access_checks.get_access_check_type()
if old_access_check_type != new_access_check_type:
ctxt.add_access_check_type_not_equal_error(cmd_name, old_access_check_type,
new_access_check_type, new_idl_file_path)
else:
old_simple_check = old_access_checks.simple
new_simple_check = new_access_checks.simple
if old_simple_check is not None and new_simple_check is not None:
if old_simple_check.check != new_simple_check.check:
ctxt.add_check_not_equal_error(cmd_name, old_simple_check.check,
new_simple_check.check, new_idl_file_path)
else:
old_privilege = old_simple_check.privilege
new_privilege = new_simple_check.privilege
if old_privilege is not None and new_privilege is not None:
if old_privilege.resource_pattern != new_privilege.resource_pattern:
ctxt.add_resource_pattern_not_equal_error(
cmd_name, old_privilege.resource_pattern,
new_privilege.resource_pattern, new_idl_file_path)
if not set(new_privilege.action_type).issubset(old_privilege.action_type):
ctxt.add_new_action_types_not_subset_error(cmd_name, new_idl_file_path)
old_complex_checks = old_access_checks.complex
new_complex_checks = new_access_checks.complex
if old_complex_checks is not None and new_complex_checks is not None:
check_complex_checks_agg_stages(ctxt, old_complex_checks, new_complex_checks, cmd,
new_idl_file_path)
elif new_access_checks is None and old_access_checks is not None:
ctxt.add_removed_access_check_field_error(cmd_name, new_idl_file_path)
elif old_access_checks is None and new_access_checks is not None and cmd.api_version == '1':
ctxt.add_added_access_check_field_error(cmd_name, new_idl_file_path)
def check_compatibility(old_idl_dir: str, new_idl_dir: str, old_import_directories: List[str],
new_import_directories: List[str]) -> IDLCompatibilityErrorCollection:
"""Check IDL compatibility between old and new IDL commands."""
# pylint: disable=too-many-locals
ctxt = IDLCompatibilityContext(old_idl_dir, new_idl_dir, IDLCompatibilityErrorCollection())
new_commands, new_command_file, new_command_file_path = get_new_commands(
ctxt, new_idl_dir, new_import_directories)
# Check new commands' compatibility with old ones.
# Note, a command can be added to V1 at any time, it's ok if a
# new command has no corresponding old command.
old_commands: Dict[str, syntax.Command] = dict()
for dirpath, _, filenames in os.walk(old_idl_dir):
for old_filename in filenames:
if not old_filename.endswith('.idl') or old_filename in SKIPPED_FILES:
continue
old_idl_file_path = os.path.join(dirpath, old_filename)
with open(old_idl_file_path) as old_file:
old_idl_file = parser.parse(
old_file, old_idl_file_path,
CompilerImportResolver(old_import_directories + [old_idl_dir]))
if old_idl_file.errors:
old_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {old_idl_file_path}")
for old_cmd in old_idl_file.spec.symbols.commands:
# Ignore imported commands as they will be processed in their own file.
if old_cmd.api_version == "" or old_cmd.imported:
continue
# Ignore select commands that were removed after being added to the strict API.
# Only commands that were never visible to the end-user in previous releases
# (i.e., hidden behind a feature flag) should be allowed here.
if old_cmd.command_name in IGNORE_COMMANDS_LIST:
continue
if old_cmd.api_version != "1":
# We're not ready to handle future API versions yet.
ctxt.add_command_invalid_api_version_error(
old_cmd.command_name, old_cmd.api_version, old_idl_file_path)
continue
if old_cmd.command_name in old_commands:
ctxt.add_duplicate_command_name_error(old_cmd.command_name, old_idl_dir,
old_idl_file_path)
continue
old_commands[old_cmd.command_name] = old_cmd
if old_cmd.command_name not in new_commands:
# Can't remove a command from V1
ctxt.add_command_removed_error(old_cmd.command_name, old_idl_file_path)
continue
new_cmd = new_commands[old_cmd.command_name]
new_idl_file = new_command_file[old_cmd.command_name]
new_idl_file_path = new_command_file_path[old_cmd.command_name]
if not old_cmd.strict and new_cmd.strict:
ctxt.add_command_strict_true_error(new_cmd.command_name, new_idl_file_path)
# Check compatibility of command's parameters.
check_command_params_or_type_struct_fields(
ctxt, old_cmd, new_cmd, old_cmd.command_name, old_idl_file, new_idl_file,
old_idl_file_path, new_idl_file_path, is_command_parameter=True)
check_namespace(ctxt, old_cmd, new_cmd, old_idl_file, new_idl_file,
old_idl_file_path, new_idl_file_path)
old_reply = old_idl_file.spec.symbols.get_struct(old_cmd.reply_type)
new_reply = new_idl_file.spec.symbols.get_struct(new_cmd.reply_type)
check_reply_fields(ctxt, old_reply, new_reply, old_cmd.command_name,
old_idl_file, new_idl_file, old_idl_file_path,
new_idl_file_path)
check_security_access_checks(ctxt, old_cmd.access_check, new_cmd.access_check,
old_cmd, new_idl_file_path)
ctxt.errors.dump_errors()
return ctxt.errors
def get_generic_arguments(gen_args_file_path: str) -> Tuple[Set[str], Set[str]]:
"""Get arguments and reply fields from generic_argument.idl and check validity."""
arguments: Set[str] = set()
reply_fields: Set[str] = set()
with open(gen_args_file_path) as gen_args_file:
parsed_idl_file = parser.parse(gen_args_file, gen_args_file_path,
CompilerImportResolver([]))
if parsed_idl_file.errors:
parsed_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {gen_args_file_path}")
for argument in parsed_idl_file.spec.symbols.get_generic_argument_list(
"generic_args_api_v1").fields:
arguments.add(argument.name)
for reply_field in parsed_idl_file.spec.symbols.get_generic_reply_field_list(
"generic_reply_fields_api_v1").fields:
reply_fields.add(reply_field.name)
return arguments, reply_fields
def check_generic_arguments_compatibility(old_gen_args_file_path: str, new_gen_args_file_path: str
) -> IDLCompatibilityErrorCollection:
"""Check IDL compatibility between old and new generic_argument.idl files."""
# IDLCompatibilityContext takes in both 'old_idl_dir' and 'new_idl_dir',
# but for generic_argument.idl, the parent directories aren't helpful for logging purposes.
# Instead, we pass in "old generic_argument.idl" and "new generic_argument.idl"
# to make error messages clearer.
ctxt = IDLCompatibilityContext("old generic_argument.idl", "new generic_argument.idl",
IDLCompatibilityErrorCollection())
old_arguments, old_reply_fields = get_generic_arguments(old_gen_args_file_path)
new_arguments, new_reply_fields = get_generic_arguments(new_gen_args_file_path)
for old_argument in old_arguments:
if old_argument not in new_arguments:
ctxt.add_generic_argument_removed(old_argument, new_gen_args_file_path)
for old_reply_field in old_reply_fields:
if old_reply_field not in new_reply_fields:
ctxt.add_generic_argument_removed_reply_field(old_reply_field, new_gen_args_file_path)
return ctxt.errors
def main():
"""Run the script."""
arg_parser = argparse.ArgumentParser(description=__doc__)
arg_parser.add_argument("-v", "--verbose", action="count", help="Enable verbose logging")
arg_parser.add_argument("--old-include", dest="old_include", type=str, action="append",
default=[], help="Directory to search for old IDL import files")
arg_parser.add_argument("--new-include", dest="new_include", type=str, action="append",
default=[], help="Directory to search for new IDL import files")
arg_parser.add_argument("old_idl_dir", metavar="OLD_IDL_DIR",
help="Directory where old IDL files are located")
arg_parser.add_argument("new_idl_dir", metavar="NEW_IDL_DIR",
help="Directory where new IDL files are located")
args = arg_parser.parse_args()
error_coll = check_compatibility(args.old_idl_dir, args.new_idl_dir, args.old_include,
args.new_include)
if error_coll.has_errors():
sys.exit(1)
old_basic_types_path = os.path.join(args.old_idl_dir, "mongo/idl/basic_types.idl")
new_basic_types_path = os.path.join(args.new_idl_dir, "mongo/idl/basic_types.idl")
error_reply_coll = check_error_reply(old_basic_types_path, new_basic_types_path,
args.old_include, args.new_include)
if error_reply_coll.has_errors():
sys.exit(1)
old_generic_args_path = os.path.join(args.old_idl_dir, "mongo/idl/generic_argument.idl")
new_generic_args_path = os.path.join(args.new_idl_dir, "mongo/idl/generic_argument.idl")
error_gen_args_coll = check_generic_arguments_compatibility(old_generic_args_path,
new_generic_args_path)
if error_gen_args_coll.has_errors():
sys.exit(1)
if __name__ == "__main__":
main()
|
py | 1a30f45ca26af266dfc1cf05df3ed7b0940f5bc4 | # 7094
# ^([a-zA-Z0-9]+[._-])*[a-zA-Z0-9]+@(([a-zA-Z0-9]+|([a-zA-Z0-9]+[.-])+)[a-zA-Z0-9]+\.[a-zA-Z]{2,4}|([a-zA-Z]\.com))$
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:"a@"+"a"*10000+"!1 _SLQ_2"
import re2 as re
from time import perf_counter
regex = """^([a-zA-Z0-9]+[._-])*[a-zA-Z0-9]+@(([a-zA-Z0-9]+|([a-zA-Z0-9]+[.-])+)[a-zA-Z0-9]+\.[a-zA-Z]{2,4}|([a-zA-Z]\.com))$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "a@" + "a" * i * 10000 + "!1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") |
py | 1a30f4879cacb25046c87c21c2b57560b724e379 | # Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import unittest
import logging
from . import parser_test_case
from pygccxml import utils
class Test(parser_test_case.parser_test_case_t):
mock_logger = logging.getLogger("Test")
def test_old_xml_generators(self):
"""
Tests for the xml_generators class.
This is for gccxml and for castxml using the gccxml xml file format
"""
self._test_impl("0.6", False, "is_gccxml_06")
self._test_impl("1.114", False, "is_gccxml_07")
self._test_impl("1.115", False, "is_gccxml_09_buggy")
self._test_impl("1.126", False, "is_gccxml_09_buggy")
self._test_impl("1.127", False, "is_gccxml_09")
self._test_impl("1.136", True, "is_castxml")
def test_casxtml_epic_version_1(self):
"""
Test with the castxml epic version set to 1
"""
gen = utils.xml_generators(
self.mock_logger, castxml_format="1.1.0")
self.assertFalse(gen.is_gccxml)
self.assertTrue(gen.is_castxml)
self.assertTrue(gen.is_castxml1)
self.assertEqual(gen.xml_output_version, "1.1.0")
self.assertRaises(RuntimeError, lambda: utils.xml_generators(
self.mock_logger, "1.136", "1.1.0"))
self.assertRaises(RuntimeError, lambda: utils.xml_generators(
self.mock_logger, None, None))
def _test_impl(
self, gccxml_cvs_revision, is_castxml,
expected_gccxml_cvs_revision):
"""
Implementation detail for the test
Args:
gccxml_cvs_revision (str|None) : a known cvs revision
is_castxml (bool): check for castxml
expected_gccxml_cvs_revision (str): will be used to check if the
attribute is set to True.
"""
gen = utils.xml_generators(
self.mock_logger, gccxml_cvs_revision)
if is_castxml:
self.assertFalse(gen.is_gccxml)
self.assertTrue(gen.is_castxml)
else:
self.assertTrue(gen.is_gccxml)
self.assertFalse(gen.is_castxml)
self.assertTrue(getattr(gen, expected_gccxml_cvs_revision))
self.assertEqual(gen.xml_output_version, gccxml_cvs_revision)
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
|
py | 1a30f4a6d80dec42a68c7b27633063e1e23f7f05 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# File: transformer.py
import inspect
import numpy as np
import pprint
import sys
from abc import ABCMeta, abstractmethod
from fvcore.transforms.transform import (
BlendTransform,
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
TransformList,
VFlipTransform,
)
from PIL import Image
from .transform import ExtentTransform, ResizeTransform, RotationTransform
__all__ = [
"RandomApply",
"RandomBrightness",
"RandomContrast",
"RandomCrop",
"RandomExtent",
"RandomFlip",
"RandomSaturation",
"RandomLighting",
"RandomRotation",
"Resize",
"ResizeShortestEdge",
"TransformGen",
"apply_transform_gens",
]
def check_dtype(img):
assert isinstance(img, np.ndarray), "[TransformGen] Needs an numpy array, but got a {}!".format(
type(img)
)
assert not isinstance(img.dtype, np.integer) or (
img.dtype == np.uint8
), "[TransformGen] Got image of type {}, use uint8 or floating points instead!".format(
img.dtype
)
assert img.ndim in [2, 3], img.ndim
class TransformGen(metaclass=ABCMeta):
"""
TransformGen takes an image of type uint8 in range [0, 255], or
floating point in range [0, 1] or [0, 255] as input.
It creates a :class:`Transform` based on the given image, sometimes with randomness.
The transform can then be used to transform images
or other data (boxes, points, annotations, etc.) associated with it.
The assumption made in this class
is that the image itself is sufficient to instantiate a transform.
When this assumption is not true, you need to create the transforms by your own.
A list of `TransformGen` can be applied with :func:`apply_transform_gens`.
"""
def _init(self, params=None):
if params:
for k, v in params.items():
if k != "self" and not k.startswith("_"):
setattr(self, k, v)
@abstractmethod
def get_transform(self, img):
pass
def _rand_range(self, low=1.0, high=None, size=None):
"""
Uniform float random number between low and high.
"""
if high is None:
low, high = 0, low
if size is None:
size = []
return np.random.uniform(low, high, size)
def __repr__(self):
"""
Produce something like:
"MyTransformGen(field1={self.field1}, field2={self.field2})"
"""
try:
sig = inspect.signature(self.__init__)
classname = type(self).__name__
argstr = []
for name, param in sig.parameters.items():
assert (
param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
), "The default __repr__ doesn't support *args or **kwargs"
assert hasattr(self, name), (
"Attribute {} not found! "
"Default __repr__ only works if attributes match the constructor.".format(name)
)
attr = getattr(self, name)
default = param.default
if default is attr:
continue
argstr.append("{}={}".format(name, pprint.pformat(attr)))
return "{}({})".format(classname, ", ".join(argstr))
except AssertionError:
return super().__repr__()
__str__ = __repr__
class RandomApply(TransformGen):
"""
Randomly apply the wrapper transformation with a given probability.
"""
def __init__(self, transform, prob=0.5):
"""
Args:
transform (Transform, TransformGen): the transform to be wrapped
by the `RandomApply`. The `transform` can either be a
`Transform` or `TransformGen` instance.
prob (float): probability between 0.0 and 1.0 that
the wrapper transformation is applied
"""
super().__init__()
assert isinstance(transform, (Transform, TransformGen)), (
f"The given transform must either be a Transform or TransformGen instance. "
f"Not {type(transform)}"
)
assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
self.prob = prob
self.transform = transform
def get_transform(self, img):
do = self._rand_range() < self.prob
if do:
if isinstance(self.transform, TransformGen):
return self.transform.get_transform(img)
else:
return self.transform
else:
return NoOpTransform()
class RandomFlip(TransformGen):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
do = self._rand_range() < self.prob
if do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
class Resize(TransformGen):
""" Resize image to a target size"""
def __init__(self, shape, interp=Image.BILINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: PIL interpolation method
"""
if isinstance(shape, int):
shape = (shape, shape)
shape = tuple(shape)
self._init(locals())
def get_transform(self, img):
return ResizeTransform(
img.shape[0], img.shape[1], self.shape[0], self.shape[1], self.interp
)
class ResizeShortestEdge(TransformGen):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
if self.is_range:
size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
size = np.random.choice(self.short_edge_length)
if size == 0:
return NoOpTransform()
scale = size * 1.0 / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return ResizeTransform(h, w, newh, neww, self.interp)
class RandomRotation(TransformGen):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around the given center.
"""
def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
"""
Args:
angle (list[float]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the angle (in degrees).
If ``sample_style=="choice"``, a list of angles to sample from
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (list[[float, float]]): If ``sample_style=="range"``,
a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
[0, 0] being the top left of the image and [1, 1] the bottom right.
If ``sample_style=="choice"``, a list of centers to sample from
Default: None, which means that the center of rotation is the center of the image
center has no effect if expand=True because it only affects shifting
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(angle, (float, int)):
angle = (angle, angle)
if center is not None and isinstance(center[0], (float, int)):
center = (center, center)
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
center = None
if self.is_range:
angle = np.random.uniform(self.angle[0], self.angle[1])
if self.center is not None:
center = (
np.random.uniform(self.center[0][0], self.center[1][0]),
np.random.uniform(self.center[0][1], self.center[1][1]),
)
else:
angle = np.random.choice(self.angle)
if self.center is not None:
center = np.random.choice(self.center)
if center is not None:
center = (w * center[0], h * center[1]) # Convert to absolute coordinates
return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
class RandomCrop(TransformGen):
"""
Randomly crop a subimage out of an image.
"""
def __init__(self, crop_type: str, crop_size):
"""
Args:
crop_type (str): one of "relative_range", "relative", "absolute".
See `config/defaults.py` for explanation.
crop_size (tuple[float]): the relative ratio or absolute pixels of
height and width
"""
super().__init__()
assert crop_type in ["relative_range", "relative", "absolute"]
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
croph, cropw = self.get_crop_size((h, w))
assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
h0 = np.random.randint(h - croph + 1)
w0 = np.random.randint(w - cropw + 1)
return CropTransform(w0, h0, cropw, croph)
def get_crop_size(self, image_size):
"""
Args:
image_size (tuple): height, width
Returns:
crop_size (tuple): height, width in absolute pixels
"""
h, w = image_size
if self.crop_type == "relative":
ch, cw = self.crop_size
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "relative_range":
crop_size = np.asarray(self.crop_size, dtype=np.float32)
ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "absolute":
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
else:
NotImplementedError("Unknown crop type {}".format(self.crop_type))
class RandomExtent(TransformGen):
"""
Outputs an image by cropping a random "subrect" of the source image.
The subrect can be parameterized to include pixels outside the source image,
in which case they will be set to zeros (i.e. black). The size of the output
image will vary with the size of the random subrect.
"""
def __init__(self, scale_range, shift_range):
"""
Args:
output_size (h, w): Dimensions of output image
scale_range (l, h): Range of input-to-output size scaling factor
shift_range (x, y): Range of shifts of the cropped subrect. The rect
is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
where (w, h) is the (width, height) of the input image. Set each
component to zero to crop at the image's center.
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
img_h, img_w = img.shape[:2]
# Initialize src_rect to fit the input image.
src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
# Apply a random scaling to the src_rect.
src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
# Apply a random shift to the coordinates origin.
src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
# Map src_rect coordinates into image coordinates (center at corner).
src_rect[0::2] += 0.5 * img_w
src_rect[1::2] += 0.5 * img_h
return ExtentTransform(
src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
)
class RandomContrast(TransformGen):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=img.mean(), src_weight=1 - w, dst_weight=w)
class RandomBrightness(TransformGen):
"""
Randomly transforms image brightness.
Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce brightness
- intensity = 1 will preserve the input image
- intensity > 1 will increase brightness
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
class RandomSaturation(TransformGen):
"""
Randomly transforms image saturation.
Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce saturation (make the image more grayscale)
- intensity = 1 will preserve the input image
- intensity > 1 will increase saturation
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation (1 preserves input).
intensity_max (float): Maximum augmentation (1 preserves input).
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
assert img.shape[-1] == 3, "Saturation only works on RGB images"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = img.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
class RandomLighting(TransformGen):
"""
Randomly transforms image color using fixed PCA over ImageNet.
The degree of color jittering is randomly sampled via a normal distribution,
with standard deviation given by the scale parameter.
"""
def __init__(self, scale):
"""
Args:
scale (float): Standard deviation of principal component weighting.
"""
super().__init__()
self._init(locals())
self.eigen_vecs = np.array(
[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
)
self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
def get_transform(self, img):
assert img.shape[-1] == 3, "Saturation only works on RGB images"
weights = np.random.normal(scale=self.scale, size=3)
return BlendTransform(
src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
)
def apply_transform_gens(transform_gens, img):
"""
Apply a list of :class:`TransformGen` on the input image, and
returns the transformed image and a list of transforms.
We cannot simply create and return all transforms without
applying it to the image, because a subsequent transform may
need the output of the previous one.
Args:
transform_gens (list): list of :class:`TransformGen` instance to
be applied.
img (ndarray): uint8 or floating point images with 1 or 3 channels.
Returns:
ndarray: the transformed image
TransformList: contain the transforms that's used.
"""
for g in transform_gens:
assert isinstance(g, TransformGen), g
check_dtype(img)
tfms = []
for g in transform_gens:
tfm = g.get_transform(img)
assert isinstance(
tfm, Transform
), "TransformGen {} must return an instance of Transform! Got {} instead".format(g, tfm)
img = tfm.apply_image(img)
tfms.append(tfm)
return img, TransformList(tfms)
|
py | 1a30f55afc55ffd9d76cc7cffb4a3b2c47ee0a06 | def independence():
# Simply check that extensions can be imported. This is run in a test
# flagged as "local" since we want extensions to be possible to import in
# standalone unit tests.
#
# Nothing in extensions can actually be used, of course, but that's not a
# problem; the unit tests simply need to make sure not to depend on that.
import extensions
print "independence: ok"
|
py | 1a30f7e940e9bfb29cf40041df665831c84cc02d | from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, append=True, widget_type='', widget_name='', widget_pos='under')
# self.delete_input(input or index)
# self.create_new_output(type_, label, append=True)
# self.delete_output(output or index)
# self.update_shape()
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
self.initialized()
def update_event(self, input_called=-1):
if input_called == 0:
while(self.input(1)):
self.exec_output(0)
def get_data(self):
data = {}
# ...
return data
def set_data(self, data):
pass
# ...
# optional - important for threading - stop everything here
def removing(self):
pass
|
py | 1a30fa90bed29cfd4968dd1a9d15b59b9ffad0ab | # pylint: disable=C0302
"""
@file
@brief Implements a class able to compute the predictions
from on an :epkg:`ONNX` model.
"""
from collections import OrderedDict
from io import BytesIO
from time import perf_counter
import warnings
import textwrap
import pprint
import numpy
from scipy.sparse import coo_matrix
from onnx import load, load_model, checker, shape_inference
from onnx import onnx_pb as onnx_proto
from onnx.helper import make_model
from ..tools.code_helper import make_callable, print_code
from ..onnx_tools.onnx2py_helper import (
_var_as_dict, numpy_min, numpy_max, guess_numpy_type_from_string)
from ..onnx_tools.onnx_manipulations import (
select_model_inputs_outputs, enumerate_model_node_outputs,
overwrite_opset, insert_results_into_onnx)
from ..onnx_tools.optim import onnx_remove_node_unused
from .onnx_inference_node import OnnxInferenceNode
from .onnx_inference_exports import OnnxInferenceExport
from .shape_object import ShapeObject
from .type_object import SequenceType
class OnnxInference:
"""
Loads an :epkg:`ONNX` file or object or stream.
Computes the output of the :epkg:`ONNX` graph.
Several runtimes are available.
* ``'python'``: the runtime implements every onnx operator
needed to run a :epkg:`scikit-learn` model by using :epkg:`numpy`
or C++ code.
* ``'python_compiled'``: it is the same runtime than the previous
one except every operator is called from a compiled function
(@see me _build_compile_run) instead for a method going through
the list of operator
* ``'onnxruntime1'``: uses :epkg:`onnxruntime`
* ``'onnxruntime2'``: this mode is mostly used to debug as
python handles calling every operator but :epkg:`onnxruntime`
is called for every of them, this process may fail due to
wrong inference type specially of the graph includes
custom nodes, in that case, it is better to compute the output
of intermediates nodes. It is much slower as fo every output, every
node is computed but more robust.
:param onnx_or_bytes_or_stream: :epkg:`onnx` object,
bytes, or filename or stream
:param runtime: runtime options
:param skip_run: do not build the runtime
:param inplace: use inplace computation as much as possible
:param input_inplace: the computation is allowed
to overwrite the input, see :meth:`_guess_inplace
<mlprodict.onnxrt.onnx_inference.OnnxInference._guess_inplace>`
:param ir_version: if not None, overwrite the default version
:param target_opset: used to overwrite *target_opset*
:param runtime_options: specific options for the runtime
:param inside_loop: tells the runtime the graph is meant to
be repeated multiple times (in that case, inputs and
outputs may share the same name)
:param static_inputs: Loop can use static variables,
variables from the graph which runs the loop
(enumerate of strings)
:param new_outputs: if the loading fails, it might worth
cutting the graph, if not None, the graph will
be cut to have these new_outputs as the final outputs
:param new_opset: overwrite the main opset and replaces
by this new one
:param device: device, a string `cpu`, `cuda`, `cuda:0`...,
this option is only available with runtime *onnxruntime1*
Among the possible runtime_options, there are:
* *enable_profiling*: enables profiling for :epkg:`onnxruntime`
* *session_options*: an instance of *SessionOptions* from
:epkg:`onnxruntime`
* *ir_version*: change ir_version
.. versionchanged:: 0.7
Parameters *new_outputs*, *new_opset* were added.
.. versionchanged:: 0.8
Parameters *static_inputs*, *device* were added.
"""
def __init__(self, onnx_or_bytes_or_stream, runtime=None,
skip_run=False, inplace=True,
input_inplace=False, ir_version=None,
target_opset=None, runtime_options=None,
session_options=None, inside_loop=False,
static_inputs=None, new_outputs=None, new_opset=None,
device=None):
if isinstance(onnx_or_bytes_or_stream, bytes):
self.obj = load_model(BytesIO(onnx_or_bytes_or_stream))
elif isinstance(onnx_or_bytes_or_stream, BytesIO):
self.obj = load_model(onnx_or_bytes_or_stream)
elif isinstance(onnx_or_bytes_or_stream, str):
self.obj = load(onnx_or_bytes_or_stream)
elif hasattr(onnx_or_bytes_or_stream, 'graph'):
self.obj = onnx_or_bytes_or_stream
elif isinstance(onnx_or_bytes_or_stream, onnx_proto.GraphProto):
self.obj = make_model(onnx_or_bytes_or_stream,
producer_name='mlprodict')
else:
raise TypeError("Unable to handle type {}.".format( # pragma: no cover
type(onnx_or_bytes_or_stream)))
if ir_version is not None:
self.obj.ir_version = ir_version
if new_outputs is not None:
self.obj = select_model_inputs_outputs(
self.obj, outputs=new_outputs, infer_shapes=True)
if new_opset is not None:
self.obj = overwrite_opset(self.obj, new_opset)
if device is not None and runtime != 'onnxruntime1':
raise ValueError(
"Incompatible values, device can be specified with "
"runtime 'onnxruntime1', not %r." % runtime)
self.runtime = runtime
self.skip_run = skip_run
self.input_inplace = input_inplace
self.inplace = inplace
self.force_target_opset = target_opset
self.runtime_options = runtime_options
self.inside_loop = inside_loop
self.static_inputs = static_inputs
self.device = device
self._init()
def __getstate__(self):
"""
To pickle the object.
"""
return {'onnx': self.obj.SerializeToString(),
'runtime': self.runtime,
'runtime_options': self.runtime_options,
'skip_run': self.skip_run,
'input_inplace': self.input_inplace,
'inplace': self.inplace,
'force_target_opset': self.force_target_opset,
'static_inputs': self.static_inputs,
'inside_loop': self.inside_loop,
'device': self.device}
def __setstate__(self, state):
"""
To unpickle the object.
"""
onx = state['onnx']
self.obj = load_model(BytesIO(onx))
self.runtime = state['runtime']
self.runtime_options = state['runtime_options']
self.skip_run = state['skip_run']
self.input_inplace = state['input_inplace']
self.inplace = state['inplace']
self.force_target_opset = state['force_target_opset']
self.static_inputs = state['static_inputs']
self.inside_loop = state['inside_loop']
self.device = state['device']
self._init()
def _init(self):
"""
Prepares the instance to deliver predictions.
"""
self.graph_ = self.to_sequence()
if len(self.graph_['sequence']) == 0:
raise RuntimeError( # pragma: no cover
"No runnable nodes was found in the ONNX graph.")
self.outputs_ = self.graph_['outputs']
self.inputs_ = self.graph_['inputs']
for ino in [self.obj.graph.input, self.obj.graph.output]:
for xy in ino:
shape = xy.type.tensor_type.shape
for d in shape.dim:
if d.dim_value == 0 and "0" in str(d) and 'dim_param' not in str(d):
# d.dim_value returns 0 whether is is 0 or empty.
# it may be a parameter as well
raise RuntimeError( # pragma: no cover
"Wrong ONNX file, one input or output has an empty shape: "
"{}.".format(xy))
self.target_opset_ = self.graph_['targets']
if self.force_target_opset is not None:
if isinstance(self.force_target_opset, dict):
self.target_opset_ = self.force_target_opset # pragma: no cover
else:
self.target_opset_ = {'': self.force_target_opset}
self.ir_version_ = self.graph_['ir_version']
if not self.skip_run:
if self.runtime == 'onnxruntime1':
# Loads the onnx with onnxruntime as a single file.
del self.graph_
from .ops_whole.session import OnnxWholeSession
self._whole = OnnxWholeSession(
self.obj, self.runtime, self.runtime_options,
self.device)
self._run = self._run_whole_runtime
else:
self.sequence_ = self.graph_['sequence']
self.inits_ = self.graph_['inits']
self.statics_ = self.graph_['statics']
dtype = self._guess_input_dtype()
variables = self.inits_.copy()
for node in self.sequence_:
domain = node.onnx_node.domain
target_opset = self.target_opset_.get(domain, None)
if self.runtime in ('onnxruntime2', 'empty'):
node.setup_runtime(self.runtime, variables, self.__class__,
target_opset=target_opset, dtype=dtype,
domain=domain, ir_version=self.ir_version_,
runtime_options=self.runtime_options)
else:
node.setup_runtime(self.runtime, variables, self.__class__,
target_opset=target_opset, domain=domain,
ir_version=self.ir_version_,
runtime_options=self.runtime_options)
if hasattr(node, 'ops_') and hasattr(node.ops_, 'typed_outputs_'):
for k, v in node.ops_.typed_outputs_:
variables[k] = v
self._run = self._run_sequence_runtime
if not self.skip_run and self.runtime in ('python', None):
self.shapes_ = self._set_shape_inference_runtime()
if self.inplace:
self.inplaces_ = self._guess_inplace(self.input_inplace)
self.exporters_ = OnnxInferenceExport(self)
self.to_json = self.exporters_.to_json
self.to_dot = self.exporters_.to_dot
self.to_python = self.exporters_.to_python
self.to_text = self.exporters_.to_text
self.to_onnx_code = self.exporters_.to_onnx_code
if self.runtime in ('python_compiled', 'python_compiled_debug'):
# switch the inference method to the compiled one
_, fct, code = self._build_compile_run('debug' in self.runtime)
setattr(self, '_run_compiled', fct)
setattr(self, '_run_compiled_code', code)
self._run = self._run_sequence_runtime_compiled
def _run_sequence_runtime_compiled(
self, inputs, clean_right_away=False, intermediate=False,
verbose=0, node_time=False, yield_ops=None, fLOG=None):
"""
Executes a compiled version of @see me _run_sequence_runtime,
compiled with method @see me _build_compile_run.
Every parameter with a default value is ignored.
Switch to ``runtime='python'`` to enable those.
"""
try:
return self._run_compiled( # pylint: disable=E1101
inputs, yield_ops=yield_ops)
except NameError as e:
raise RuntimeError( # pragma: no cover
"Unable to compute prediction due to %r. Code:\n%s"
"" % (e, print_code(
self._run_compiled_code))) from e # pylint: disable=E1101
def _guess_input_dtype(self):
for _, v in self.graph_['inputs'].items():
if 'type' not in v:
continue # pragma: no cover
t = v['type']
if 'elem' not in t:
continue
if t['elem'] == 'double':
return numpy.float64
return numpy.float32
def __str__(self):
"""
usual
"""
rows = ['OnnxInference(...)']
if hasattr(self, '_run_compiled_code'):
rows.append(
textwrap.indent(
self._run_compiled_code, ' ')) # pylint: disable=E1101
else:
rows.append(textwrap.indent(str(self.obj), ' '))
return "\n".join(rows)
def __repr__(self):
"""
usual
"""
return "OnnxInference(...)" # pragma: no cover
def check_model(self):
"""
Checks the model follow :epkg:`ONNX` conventions.
"""
checker.check_model(self.obj)
def shape_inference(self):
"""
Infers the shape of the outputs
with :epkg:`onnx` package.
@return A new :epkg:`ONNX` graph which defined outputs.
"""
return shape_inference.infer_shapes(self.obj)
@property
def input_names(self):
"""
Returns the names of all inputs.
It does not include the optional inputs.
.. versionchanged:: 0.6
The list does not include optional inputs anymore.
"""
inits = set(_.name for _ in self.obj.graph.initializer)
return [_.name for _ in self.obj.graph.input if _.name not in inits]
@property
def input_names_shapes(self):
"""
Returns the names and shapes of all inputs.
This method assumes all inputs are tensors.
It does not include the optional inputs.
.. versionchanged:: 0.6
The list does not include optional inputs anymore.
"""
names = set(self.input_names)
return [(_.name, _var_as_dict(_)['type']['shape'])
for _ in self.obj.graph.input if _.name in names]
@staticmethod
def _get_type_property(info, prop):
if prop in info:
return info[prop]
if 'kind' in info and info['kind'] == 'sequence':
if prop == 'shape':
return ('?', )
raise NotImplementedError(
"Unable to retrieve property %r from %r."
"" % (prop, info))
@property
def input_names_shapes_types(self):
"""
Returns the names, shapes, types of all inputs.
This method assumes all inputs are tensors.
It does not include the optional inputs.
.. versionchanged:: 0.6
The list does not include optional inputs anymore.
"""
f = OnnxInference._get_type_property
names = set(self.input_names)
return [(_.name, f(_var_as_dict(_)['type'], 'shape'),
'tensor(%s)' % f(_var_as_dict(_)['type'], 'elem'))
for _ in self.obj.graph.input if _.name in names]
@property
def output_names(self):
"""
Returns the names of all outputs.
"""
return [_.name for _ in self.obj.graph.output]
@property
def output_names_shapes(self):
"""
Returns the names and shapes of all outputs.
This method assumes all inputs are tensors.
"""
f = OnnxInference._get_type_property
return [(_.name, f(_var_as_dict(_)['type'], 'shape'))
for _ in self.obj.graph.output]
@property
def output_names_shapes_types(self):
"""
Returns the names, shapes, types of all outputs.
This method assumes all inputs are tensors.
It does not include the optional outputs.
.. versionadd:: 0.7
"""
names = set(self.output_names)
f = OnnxInference._get_type_property
return [(_.name, f(_var_as_dict(_)['type'], 'shape'),
'tensor(%s)' % f(_var_as_dict(_)['type'], 'elem'))
for _ in self.obj.graph.output if _.name in names]
def global_index(self, name):
"""
Maps every name to one integer to avoid using dictionaries
when running the predictions.
@param name outputs name
@return integer
"""
if not hasattr(self, '_global_index'):
self._global_index = {}
if name in self._global_index:
return self._global_index[name]
self._global_index[name] = len(self._global_index)
return self._global_index[name]
def to_sequence(self):
"""
Produces a graph to facilitate the execution.
One example:
.. exref::
:title: Convert ONNX into graph
An example on how to convert an :epkg:`ONNX`
graph into a graph.
.. runpython::
:showcode:
:warningout: DeprecationWarning
import pprint
import numpy
from skl2onnx.algebra.onnx_ops import OnnxLinearRegressor
from skl2onnx.common.data_types import FloatTensorType
from mlprodict.onnxrt import OnnxInference
pars = dict(coefficients=numpy.array([1., 2.]),
intercepts=numpy.array([1.]),
post_transform='NONE')
onx = OnnxLinearRegressor('X', output_names=['Y'], **pars)
model_def = onx.to_onnx({'X': pars['coefficients'].astype(numpy.float32)},
outputs=[('Y', FloatTensorType([1]))],
target_opset=12)
oinf = OnnxInference(model_def)
pprint.pprint(oinf.to_sequence())
See an example of representation in notebook
:ref:`onnxvisualizationrst`.
"""
inits = {}
variables = {}
outputs = {}
nodes = {}
statics = {}
targets = {}
for o in self.obj.opset_import:
targets[o.domain] = o.version
# static variables
if self.static_inputs is not None:
for n in self.static_inputs:
statics[n] = {'name': n}
self.global_index(n)
# inputs
for obj in self.obj.graph.input:
variables[obj.name] = _var_as_dict(obj)
self.global_index(obj.name)
# outputs
for obj in self.obj.graph.output:
if hasattr(obj, 'type') and str(obj.type) != '':
outputs[obj.name] = _var_as_dict(obj)
else:
outputs[obj.name] = {'name': obj.name}
self.global_index(obj.name)
# initializer
for obj in self.obj.graph.initializer:
init_obj = _var_as_dict(obj)
if init_obj is None:
raise RuntimeError( # pragma: no cover
"Unable to convert an initializer\n{}".format(obj))
inits[obj.name] = init_obj
self.global_index(obj.name)
if 'value' not in inits[obj.name]:
raise RuntimeError( # pragma: no cover
"One initializer has no value: '{}'\n{}\n{}".format(
obj.name, inits[obj.name], obj))
# nodes
for node in self.obj.graph.node:
dobj = _var_as_dict(node)
if dobj is None:
raise RuntimeError( # pragma: no cover
"Unable to convert a node\n{}".format(node))
if 'atts' in dobj:
atts = dobj['atts']
for k, v in atts.items():
if not isinstance(v, dict) or 'value' not in v:
raise RuntimeError( # pragma: no cover
"A parameter has no (sparse) value '{}' "
"for node '{}'\nv={}\ndobj=[{}]".format(
k, node.name, v, node))
if node.name in nodes: # pragma: no cover
i = 2
while True:
new_name = "%s_n%i" % (node.name, i)
if new_name not in nodes:
break
i += 1
else:
new_name = node.name
nodes[new_name] = OnnxInferenceNode(node, dobj, self.global_index)
# names
names = {}
for k, v in statics.items():
if (k, 0) in names:
raise RuntimeError( # pragma: no cover
"Static variables '{}' already exists (tag='{}').".format(
k, names[k, 0][0]))
names[k, 0] = ('S', v)
for k, v in inits.items():
if (k, 0) in names:
raise RuntimeError( # pragma: no cover
"Initializer '{}' already exists (tag='{}').".format(
k, names[k, 0][0]))
names[k, 0] = ('C', v)
for k, v in variables.items():
if (k, 0) in names:
if k in inits:
# Kind of default value for an input
continue
raise RuntimeError( # pragma: no cover
"Variable '{}' already exists (tag='{}').".format(
k, names[k, 0][0]))
names[k, 0] = ('I', v)
for k, v in outputs.items():
if (k, 0) in names and self.runtime != 'empty':
if not self.inside_loop or names[k, 0][0] != 'I':
raise RuntimeError( # pragma: no cover
"Output '{}' already exists (tag='{}').".format(
k, names[k, 0][0]))
else:
# For input, output sharing the same name, we marked the name
# as an input.
continue
names[k, 0] = ('O', v)
for k, v in nodes.items():
if (k, 1) in names:
raise RuntimeError( # pragma: no cover
"Node '{}' already exists (tag='{}'). "
"Use inside_loop=True to bypass this exception.".format(
k, names[k, 0][0]))
names[k, 1] = ('N', v)
# ordering
order = {}
modif = 1
intermediate = {}
while modif > 0:
modif = 0
for (k, _), v in names.items():
if (k, 1) in order:
# The operator node is already processed.
continue
if v[0] in {'I', 'C', 'S'}:
if (k, 0) not in order:
order[k, 0] = len(order) # A data node.
modif += 1
continue
if v[0] == 'O':
continue
if all((inp, 0) in order for inp in v[1].inputs):
# If all inputs are available,
# We tell the operator node is processed.
order[k, 1] = len(order)
modif += 1
for o in v[1].outputs:
if (o, 0) in order:
raise RuntimeError( # pragma: no cover
"Two nodes share the same output '{}' "
"or an operator and an output "
"share the same name. "
"(node: {}).".format(o, v[1]))
# We add a data node.
order[o, 0] = len(order)
intermediate[o] = None
modif += 1
# compute
rev = [(v, k[0], k[1]) for k, v in order.items()]
rev.sort()
sequence = []
for _, name, node_kind in rev:
if name not in nodes:
continue
if node_kind == 0:
# It is an output which shares the same name
# as a node.
continue
node = nodes[name]
node.set_order(len(sequence))
sequence.append(node)
if len(sequence) == 0:
raise RuntimeError( # pragma: no cover
"No runnable nodes was found in the ONNX graph"
"\n--rev--\n{}"
"\n--order--\n{}"
"\n--nodes--\n{}"
"\n---".format(
"\n".join([str(_) for _ in names.items()]),
"\n".join([str(_) for _ in order.items()]),
"\n".join([str(_) for _ in nodes.items()])))
# defines where an intermediare output is not needed
last_used = {}
for node in sequence:
for inp in node.inputs:
last_used[inp] = node.order
for k, ord in last_used.items():
sequence[ord].add_variable_to_clean(k)
results = dict(inits=inits, inputs=variables, outputs=outputs,
nodes=nodes, sequence=sequence,
intermediate=intermediate,
targets=targets, ir_version=self.obj.ir_version,
statics=statics)
if len(sequence) < len(nodes):
# Not all node will be executed.
raise RuntimeError( # pragma: no cover
"Unable to run all nodes.\n--Nodes--\n%s\n--Sequence--\n%s"
"\n--Inputs--\n%s\n--Inits--\n%s\n--Statics\n%s"
"" % (pprint.pformat(nodes), pprint.pformat(sequence),
pprint.pformat(list(variables)),
pprint.pformat(list(inits)),
pprint.pformat(list(statics))))
return results
def run(self, inputs, clean_right_away=False,
intermediate=False, verbose=0, node_time=False,
overwrite_types=None, yield_ops=None, fLOG=None):
"""
Computes the predictions for this :epkg:`onnx` graph.
:param inputs: inputs as dictionary or a dataframe
:param clean_right_away: clean the intermediate outputs
as soon as they are not needed
:param intermediate: returns a dictionary of intermediate
variables instead of the results only
:param verbose: display information while predicting
:param node_time: measure time of each node
:param overwrite_types: shape inference does not work all the time,
this allows to force types when building intermediate
results, see @see fn select_model_inputs_outputs
:param yield_ops: dictionary to overwrite the output of
operator *YieldOp*
:param fLOG: logging function if *verbose > 0*
:return: outputs as dictionary
and a second dictionary of the time spent
in each node if *node_time* is True
.. exref::
:title: Computes predictions with any runtime
The following example compares predictions
between :epkg:`scikit-learn` and this runtime
for the python runtime.
.. runpython::
:showcode:
:warningout: DeprecationWarning
import numpy
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from mlprodict.onnxrt import OnnxInference
from mlprodict.onnx_conv import to_onnx
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, _ = train_test_split(X, y)
clr = LinearRegression()
clr.fit(X_train, y_train)
exp = clr.predict(X_test[:5])
print(exp)
model_def = to_onnx(clr, X_train.astype(numpy.float32),
target_opset=12)
oinf = OnnxInference(model_def)
y = oinf.run({'X': X_test[:5]})
print(y)
The function returns all intermediate outputs
if *intermediate* is True. In case of runtime
*onnxruntime1*, if intermediate is True,
the first class builds all :epkg:`ONNX` cut out
to keep the one output and converted into
*OnnxInference*.
.. versionchanged:: 0.8
Parameter *yield_ops* was added.
"""
def retype(col_array):
if (hasattr(col_array, 'categories') and
hasattr(col_array, 'from_codes')):
# isinstance(col_array, pandas.Categorical):
return col_array.astype(numpy.int64)
return col_array
if hasattr(inputs, 'columns') and hasattr(inputs, 'iloc'):
# == isinstance(inputs, pandas.DataFrame)
inputs = OrderedDict((
name, retype(numpy.expand_dims(inputs[name].values, axis=1)))
for name in inputs.columns)
if intermediate:
if self.inplace:
raise RuntimeError( # pragma: no cover
"inplace must be False if intermediate is True, a container "
"might be used by several nodes.")
return self._run(inputs, clean_right_away=False,
intermediate=intermediate,
verbose=verbose, node_time=node_time,
overwrite_types=overwrite_types,
yield_ops=yield_ops, fLOG=fLOG)
if overwrite_types is not None:
raise RuntimeError( # pragma: no cover
"overwrite_types is not used if intermediate is False.")
return self._run(inputs, clean_right_away=False,
intermediate=intermediate,
verbose=verbose, node_time=node_time,
yield_ops=yield_ops, fLOG=fLOG)
def run2onnx(self, inputs, verbose=0, fLOG=None,
as_parameter=True, suffix='_DBG',
param_name=None, node_type='DEBUG',
domain='DEBUG', domain_opset=1):
"""
Executes the graphs with the given inputs, then adds the intermediate
results into ONNX nodes in the original graph. Once saved, it can be
looked with a tool such as :epkg:`netron`.
:param inputs: inputs as dictionary or a dataframe
:param verbose: display information while predicting
:param fLOG: logging function if *verbose > 0*
:param as_parameter: add new nodes with results as one parameter
(True) or as initializer (False)
:param suffix: suffix to add to new results
:param param_name: name of the parameter to add
(by default the result name), it can be a function
`param_name(reult_name) -> parameter_name`
:param node_type: type of the new node
:param domain: domain the new node
:param domain_opset: opset for *domain*
:return: outputs as dictionary
and the onnx graph with new nodes
The following example shows how to use it.
.. gdot::
:script: DOT-SECTION
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_iris
from mlprodict.onnxrt import OnnxInference
import numpy
iris = load_iris()
X = iris.data[:, :2]
y = iris.target
lr = LinearRegression()
lr.fit(X, y)
from mlprodict.onnx_conv import to_onnx
model_onnx = to_onnx(lr, X.astype(numpy.float32))
oinf = OnnxInference(model_onnx, inplace=False)
model_onnx_debug = oinf.run2onnx({'X': X[:3].astype(numpy.float32)})
oinf_debug = OnnxInference(model_onnx_debug[1])
print("DOT-SECTION", oinf_debug.to_dot())
.. versionadded:: 0.7
"""
intermediate = self.run(inputs, verbose=verbose, fLOG=fLOG,
intermediate=True)
for name in self.input_names:
del intermediate[name]
new_onx = insert_results_into_onnx(
self.obj, intermediate, as_parameter=as_parameter,
suffix=suffix, param_name=param_name, node_type=node_type,
domain=domain, domain_opset=domain_opset)
return intermediate, new_onx
def display_sequence(self, verbose=1):
"""
Shows the sequence of nodes to run if ``runtime=='python'``.
"""
rows = []
rows.append("#node: {}".format(len(self.sequence_)))
for i, node in enumerate(self.sequence_):
if verbose >= 1:
rows.append("{}: {}".format(i, str(node)))
return "\n".join(rows)
def _run_sequence_runtime(self, inputs, clean_right_away=False,
intermediate=False, verbose=0, node_time=False,
overwrite_types=None, yield_ops=None,
fLOG=None):
if overwrite_types is not None:
raise NotImplementedError( # pragma: no cover
"overwrite_types != None not implemented.")
if clean_right_away:
raise NotImplementedError( # pragma: no cover
"clean_right_away=true not implemented.")
if node_time:
mtime = []
if verbose >= 1 and fLOG is not None:
printed = set()
if hasattr(self, "_values_init"):
values = self._values_init.copy() # pylint: disable=E0203
else:
values = [None] * len(self._global_index)
if verbose >= 1 and fLOG is not None:
for k, v in self.inits_.items():
values[self._global_index[k]] = v['value']
if verbose < 3:
fLOG("+ki='{}': {} (dtype={} min={} max={})".format(
k, v['value'].shape, v['value'].dtype,
numpy_min(v['value']), numpy_max(v['value'])))
else:
fLOG("+ki='{}': {} (dtype={} min={} max={}\n{}".format(
k, v['value'].shape, v['value'].dtype,
numpy_min(v['value']), numpy_max(v['value']),
v['value']))
printed.add(k)
else:
for k, v in self.inits_.items():
values[self._global_index[k]] = v['value']
# stores the array to skip initialing a second time
if verbose == 0 or fLOG is None:
self._values_init = values.copy()
for name, value in inputs.items():
values[self._global_index[name]] = value
if verbose == 0 or fLOG is None:
if node_time:
for i, node in enumerate(self.sequence_):
if yield_ops is not None and node.onnx_node.op_type == 'YieldOp':
out = node.onnx_node.output[0]
if out in yield_ops:
values[out] = yield_ops[out]
continue
raise RuntimeError( # pragma: no cover
"YieldOp output %r could not be found in "
"yield_ops: %r (node=%r)." % (
out, list(sorted(yield_ops)), node.onnx_node))
t = perf_counter()
node.run(values)
t2 = perf_counter()
mtime.append(dict(i=i, name=node.onnx_node.name,
op_type=node.onnx_node.op_type,
time=t2 - t))
else:
for node in self.sequence_:
node.run(values)
else:
def dispsimple(arr):
if hasattr(arr, 'shape'):
if len(arr.shape) <= 1:
threshold = 8
else:
threshold = min(
50, min(50 // max(arr.shape[1], 1), 8) * arr.shape[1])
if hasattr(arr, 'todense'):
fLOG( # pragma: no cover
numpy.array2string(arr.todense(), max_line_width=120,
suppress_small=True, threshold=threshold))
else:
fLOG(numpy.array2string(arr, max_line_width=120,
suppress_small=True,
threshold=threshold))
else: # pragma: no cover
s = str(arr)
if len(s) > 50:
s = s[:50] + "..."
fLOG(s)
if verbose >= 2:
for k in sorted(self._global_index):
if values[self._global_index[k]] is None:
continue
obj = values[self._global_index[k]]
if k not in printed:
printed.add(k)
if hasattr(obj, 'shape'):
fLOG("-kv='{}' shape={} dtype={} min={} max={}{}".format(
k, obj.shape, obj.dtype, numpy_min(obj),
numpy_max(obj),
' (sparse)' if isinstance(obj, coo_matrix) else ''))
elif (isinstance(obj, list) and len(obj) > 0 and
not isinstance(obj[0], dict)): # pragma: no cover
fLOG("-kv='{}' list len={}".format(k, len(obj)))
if verbose >= 3 and len(obj) > 0:
fLOG("first={} last={}".format(
obj[0], obj[-1]))
else: # pragma: no cover
fLOG("-kv='{}' type={}".format(k, type(obj)))
keys = set(k for k in range(len(values)) if values[k] is not None)
if verbose >= 1:
fLOG("-- OnnxInference: run {} nodes".format(len(self.sequence_)))
for i, node in enumerate(self.sequence_):
if verbose >= 1:
fLOG(node)
if yield_ops is not None and node.onnx_node.op_type == 'YieldOp':
out = node.onnx_node.output[0]
if out in yield_ops:
fLOG("+yo=%r" % out)
values[node.outputs_indices[0]] = yield_ops[out]
else:
raise RuntimeError( # pragma: no cover
"YieldOp output %r could not be found in "
"yield_ops: %r (node=%r)." % (
out, list(sorted(yield_ops)), node.onnx_node))
elif node_time:
t = perf_counter()
node.run(values)
t2 = perf_counter()
mtime.append(dict(i=i, name=node.onnx_node.name,
op_type=node.onnx_node.op_type,
time=t2 - t))
else:
node.run(values)
added = 0
for k in range(len(values)): # pylint: disable=C0200
if values[k] is None:
continue
if k not in keys and k not in printed:
added += 1
printed.add(k)
name = list(
name for name in self._global_index # pylint: disable=C0206
if self._global_index[name] == k)
if isinstance(values[k], (numpy.ndarray, coo_matrix)):
name = name[0]
mini = numpy_min(values[k])
maxi = numpy_max(values[k])
fLOG("+kr{}'{}': {} (dtype={} min={} max={}{})".format(
"=" if len(values[k].shape) == 0 or min(
values[k].shape) > 0 else "*",
name, values[k].shape, values[k].dtype,
mini, maxi,
' sparse' if isinstance(values[k], coo_matrix) else ''))
if verbose >= 3:
dispsimple(values[k])
else:
fLOG("+kr='{}': {}".format(
name, type(values[k])))
if verbose >= 3: # pragma: no cover
dispsimple(values[k])
if added == 0:
fLOG("? no new result") # pragma: no cover
if intermediate:
values = [(v, k, values[v]) for k, v in self._global_index.items()]
values.sort()
values = OrderedDict((k, v) for _, k, v in values)
return (values, mtime) if node_time else values
try:
res = {k: values[self._global_index[k]] for k in self.outputs_}
except KeyError as e: # pragma: no cover
raise RuntimeError("Unable to find one output [{}]\n in [{}]"
".".format(", ".join(sorted(self.outputs_)),
", ".join(sorted(values)))) from e
return (res, mtime) if node_time else res
def build_intermediate(self, outputs=None, verbose=0, overwrite_types=None,
fLOG=None):
"""
Builds every possible :epkg:`ONNX` file
which computes one specific intermediate output
from the inputs.
:param outputs: subsets of outputs to get,
None to get all outputs,
:param overwrite_types: shape inference does not work all the time,
this allows to force types when building intermediate
results, see @see fn select_model_inputs_outputs
:param verbose: displays intermediate information
:param fLOG: logging function
:return: :epkg:`*py:collections:OrderedDict`
.. versionchanged: 0.6
"""
if verbose > 0:
fLOG('[build_intermediate] BEGIN.')
if outputs is not None:
if isinstance(outputs, str):
outputs = [outputs]
if not isinstance(outputs, set):
outputs = set(outputs)
ord = OrderedDict()
for output in enumerate_model_node_outputs(self.obj, order=True):
if outputs is not None and output not in outputs:
continue
subonx = select_model_inputs_outputs(
self.obj, outputs=output, infer_shapes=True,
overwrite=overwrite_types)
subonx = onnx_remove_node_unused(subonx)
if verbose > 0:
fLOG( # pragma: no cover
'[build_intermediate] + {}'.format(output))
ord[output] = OnnxInference(subonx, runtime=self.runtime,
skip_run=self.skip_run,
runtime_options=self.runtime_options,
inplace=self.inplace,
input_inplace=self.input_inplace)
if verbose > 0:
fLOG( # pragma: no cover
'[build_intermediate] END.')
return ord
def _run_whole_runtime(self, inputs, clean_right_away=False,
intermediate=False, verbose=0, node_time=False,
overwrite_types=None, yield_ops=None, fLOG=None):
# node_time is unused
if clean_right_away:
raise RuntimeError( # pragma: no cover
"clean_right_away=true does not work with this runtime.")
if intermediate:
if hasattr(self, "intermediate_onnx_inference_"):
inter_run = self.intermediate_onnx_inference_ # pylint: disable=E0203
else:
if verbose > 0:
fLOG( # pragma: no cover
"-- OnnxInference: build intermediate")
inter_run = self.build_intermediate(
verbose=verbose, fLOG=fLOG, overwrite_types=overwrite_types)
self.intermediate_onnx_inference_ = inter_run
graph = self.to_sequence()
self.inits_ = graph['inits']
if verbose >= 1:
fLOG( # pragma: no cover
"-- OnnxInference: run {} nodes".format(
len(self.intermediate_onnx_inference_)))
values = OrderedDict(inputs)
for k, v in self.inits_.items():
values[k] = v['value']
if verbose >= 2: # pragma: no cover
for k in sorted(values):
fLOG("-k='{}' shape={} dtype={}".format(
k, values[k].shape, values[k].dtype))
for node, oinf in self.intermediate_onnx_inference_.items():
if verbose >= 4: # pragma: no cover
fLOG('[intermediate] %r' % node)
if verbose >= 5: # pragma: no cover
fLOG(oinf.obj)
if yield_ops is not None and node.onnx_node.op_type == 'YieldOp':
out = node.onnx_node.output[0]
if out in yield_ops:
values[out] = yield_ops[out]
continue
raise RuntimeError( # pragma: no cover
"YieldOp output %r could not be found in "
"yield_ops: %r (node=%r)." % (
out, list(sorted(yield_ops)), node.onnx_node))
output = oinf.run(inputs)[node]
values[node] = output
if verbose >= 1:
if verbose >= 4: # pragma: no cover
for k, v in inputs.items():
if isinstance(output, numpy.ndarray):
fLOG("-i='{}': {} (dtype={}) {}".format(
k, v.shape, v.dtype, v.ravel().tolist()))
else:
fLOG("-i='{}': {} (dtype={}) - ?".format(
k, v.shape, v.dtype))
if isinstance(output, numpy.ndarray):
fLOG("+k='{}': {} (dtype={})".format(
node, output.shape, output.dtype))
if verbose >= 2: # pragma: no cover
fLOG(output)
else:
fLOG("+k='{}': {}".format( # pragma: no cover
node, type(output)))
if verbose >= 2: # pragma: no cover
fLOG(output)
return values
if verbose != 0:
warnings.warn(
"verbose option not implemented if runtime is 'onnxruntime1'")
res = self._whole.run(inputs)
return {k: v for k, v in zip(self.outputs_, res)}
def __getitem__(self, item):
"""
Returns the ONNX verions of a node.
"""
if isinstance(item, tuple):
node_name, att_name = item
else:
node_name = item
att_name = None
node_ = None
for node in self.obj.graph.node:
if node.name == node_name:
node_ = node
break
if node_ is None:
raise IndexError( # pragma: no cover
"Unable to get node name '{}'.\n{}".format(
node_name, "\n".join(node.name for node in self.obj.graph.node)))
if att_name is None:
return node_
for att in node_.attribute:
if att.name == att_name:
return att
raise IndexError( # pragma: no cover
"Unable to find attribute '{}' from node "
"'{}'.".format(att_name, node_name))
def switch_initializers_dtype(self, model=None,
dtype_in=numpy.float32,
dtype_out=numpy.float64):
"""
Switches all initializers to ``numpy.float64``. If *model*
is None, a simple cast is done. Otherwise, the function assumes
the model is a :epkg:`scikit-learn` pipeline.
This only works if the runtime is ``'python'``.
@param model :epkg:`scikit-learn` model or None
@param dtype_in previous type
@param dtype_out next type
@return done operations
"""
from ..onnx_tools.optim.sklearn_helper import enumerate_fitted_arrays, pairwise_array_distances
if self.runtime != 'python': # pragma: no cover
raise RuntimeError("Initializers can be casted only if the "
"runtime is 'python' not '{}'.".format(self.runtime))
if hasattr(self, '_values_init'):
del self._values_init
# first pass: simple cast
done = []
initializer = self.inits_
for k, v in initializer.items():
if isinstance(v['value'], numpy.ndarray):
if v['value'].dtype == dtype_in:
v['value'] = v['value'].astype(dtype_out)
done.append(("pass1", "+", "init", k, v['value']))
else:
done.append(("pass1", "-", "init", k,
v['value'])) # pragma: no cover
for k, v in self.graph_['nodes'].items():
res = v.switch_initializers_dtype(dtype_in=dtype_in,
dtype_out=dtype_out)
for r in res:
done.append(("pass1", "node", k) + r)
for k, v in self.graph_['intermediate'].items():
if v is None:
continue
res = v.switch_initializers_dtype(dtype_in=dtype_in,
dtype_out=dtype_out)
for r in res:
done.append(("pass1", "sub", k) + r)
if model is not None:
# Second pass, we compare all arrays from the model
# to the arrays in the converted models.
def dist(a):
cast = a.astype(dtype_in).astype(dtype_out)
d = pairwise_array_distances([cast], [a])[0, 0]
return d
done_ = [(c, c[-1]) for c in done]
moda_ = [(a, a[-2][-1]) for a in enumerate_fitted_arrays(model)
if dist(a[-2][-1]) > 0]
aconv = [_[-1] for _ in done_]
amoda = [_[-1] for _ in moda_]
distances = pairwise_array_distances(aconv, amoda)
for i in range(distances.shape[0]):
j = numpy.argmin(distances[i])
d = distances[i, j]
if d < 0.1:
numpy.copyto(aconv[i], amoda[j])
done.append(("pass2", d) + done_[i][0])
return done
def _set_shape_inference_runtime(self):
"""
Set shapes based on shape inference
relying on the runtime.
The values are stored in every node.
"""
if not hasattr(self, 'sequence_') or not hasattr(self, 'inputs_'):
raise RuntimeError( # pragma: no cover
"This method only works if the runtime is 'python' not "
"'{}'.".format(self.runtime))
values = OrderedDict()
for k, v in self.inputs_.items():
# The function assumes the first dimension is unknown
# and is the batch size.
try:
values[k] = ShapeObject(v, use_n1=True, name=k)
except TypeError as e: # pragma: no cover
raise TypeError(
"Unable to guess shape for %r (shape=%r)." % (k, v)) from e
impossible = False
for k, v in self.statics_.items():
# static inputs should be known.
if k not in values:
try:
values[k] = ShapeObject(v)
except TypeError:
# default value is wrong
impossible = True
values[k] = None
for k, v in self.inits_.items():
values[k] = ShapeObject(v['value'], name=k)
last = None
for i, node in enumerate(self.sequence_):
try:
s = node._set_shape_inference_runtime(values)
last = s
except (IndexError, TypeError, KeyError,
AttributeError) as e: # pragma: no cover
rows = []
if last is not None:
for k, v in last.items():
rows.append("{}: {}".format(k, v))
for k in range(i + 1):
rows.append("{} --> {}".format(k, self.sequence_[k]))
if not impossible:
raise RuntimeError("Unable to infer shape of node {}\n{}".format(
i, '\n'.join(rows))) from e
return values
def infer_shapes(self):
"""
Computes expected shapes.
:return: dictionary of shapes
"""
return self._set_shape_inference_runtime()
def _set_type_inference_runtime(self):
"""
Set types based on type inference
relying on the runtime.
The values are stored in every node.
"""
if not hasattr(self, 'sequence_') or not hasattr(self, 'inputs_'):
raise RuntimeError( # pragma: no cover
"This method only works if the runtime is 'python' not "
"'{}'.".format(self.runtime))
values = OrderedDict()
for k, v in self.statics_.items():
values[k] = None
for k, v in self.inputs_.items():
# The function assumes the first dimension is unknown
# and is the batch size.
if isinstance(v['type']['elem'], dict):
# sequence
values[k] = SequenceType()
else:
values[k] = guess_numpy_type_from_string(v['type']['elem'])
for k, v in self.inits_.items():
values[k] = v['value'].dtype
last = None
for i, node in enumerate(self.sequence_):
try:
s = node._set_type_inference_runtime(values)
last = s
except IndexError as e: # pragma: no cover
rows = []
if last is not None:
for k, v in last.items():
rows.append("{}: {}".format(k, v))
for k in range(i + 1):
rows.append("{} --> {}".format(k, self.sequence_[k]))
raise RuntimeError("Unable to infer type of node {}\n{}".format(
i, '\n'.join(rows))) from e
return values
def infer_types(self):
"""
Computes expected shapes.
:return: dictionary of types
"""
return self._set_type_inference_runtime()
def _set_size_inference_runtime(self, inputs, context=None):
"""
Set sizes allocated during inference
relying on the runtime.
The values are stored in every node.
"""
if not hasattr(self, 'sequence_') or not hasattr(self, 'inputs_'):
raise RuntimeError( # pragma: no cover
"This method only works if the runtime is 'python' not "
"'{}'.".format(self.runtime))
values = OrderedDict()
for k, v in self.statics_.items():
if context is None:
raise RuntimeError( # pragma: no cover
"static variable but context is None.")
values[k] = context[k]
for k, v in self.inits_.items():
values[k] = v['value']
for k, v in self.inputs_.items():
if k in inputs:
values[k] = inputs[k]
last = None
for i, node in enumerate(self.sequence_):
try:
s = node._set_size_inference_runtime(values)
last = s
except IndexError as e: # pragma: no cover
rows = []
if last is not None:
for k, v in last.items():
rows.append("{}: {}".format(k, v))
for k in range(i + 1):
rows.append("{} --> {}".format(k, self.sequence_[k]))
raise RuntimeError("Unable to infer size of node {}\n{}".format(
i, '\n'.join(rows))) from e
return values
def infer_sizes(self, inputs, context=None):
"""
Computes expected sizes.
:param inputs: inputs as a dictionary
:return: dictionary of dictionary of sizes
"""
res = self._set_size_inference_runtime(inputs, context=context)
return {k: v for k, v in res.items() if k.startswith('#')}
def _guess_inplace(self, input_inplace=False):
"""
Looks into every node of the graph to see
if there is a way to do the computation
inplace. By default (*input_inplace=False*),
the function assumes inputs cannot be modified
so the first node cannot do inplace computation.
This function only works with the python runtime.
@param input_inplace the computation is allowed
to overwrite the input
This function checks that one node is used only
once and then can be modified by the next node.
Nodes `A`, `C` can be overwritten by the computation.
Node `B` cannot as it is used by two nodes.
.. blockdiag::
diagram {
A -> B -> C -> E;
B -> D;
}
It does not handle specific case such node `B` being
overwritten by node `C` but without changing its shape
and node `D` only needs the shape of `B`. Then `B` could
be overwritten as well.
"""
forbid = {}
values = OrderedDict()
for k in self.statics_:
values[k] = dict(inplace=False, to=[], fr=[])
for k in self.inputs_:
values[k] = dict(inplace=input_inplace, to=[], fr=[])
for k in self.inits_:
values[k] = dict(inplace=False, to=[], fr=[])
for node in self.sequence_:
for n in node.inputs:
values[n]['to'].append(node)
for n in node.outputs:
if node.op_type == 'Constant':
# We cannot modify constant.
forbid[n] = node
if n not in values:
values[n] = dict(inplace=None, to=[], fr=[])
values[n]['fr'].append(node)
# checks the number of outputs
outputs = set(self.output_names)
modif = 1
while modif > 0:
modif = 0
for n, v in values.items():
if v['inplace'] is not None:
continue
if n in forbid:
continue
if len(v['to']) == 1:
v['inplace'] = True
modif += 1
# convey the information to every node
inplaces = {}
for n, v in values.items():
if v['inplace']:
inplaces[n] = v
for node in v['to']:
if n in outputs:
continue
node.enable_inplace_compute(n)
return inplaces
def _build_compile_run(self, debug=False):
"""
Rewrite the run function in python,
compiles it, and adds it as a method.
@param debug insert debugging code
@return method name, callable object
.. exref::
:title: Run a model with runtime 'python_compiled'
The following code trains a model and compute
the predictions with runtime ``'python_compiled'``.
It converts the onnx graph into a python function
which calls every operator. Its code is printed
below.
.. runpython::
:showcode:
:warningout: DeprecationWarning
import numpy
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from skl2onnx import to_onnx
from mlprodict.onnxrt import OnnxInference
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, __ = train_test_split(X, y, random_state=11)
y_train = y_train.astype(numpy.float32)
clr = AdaBoostClassifier(
base_estimator=DecisionTreeClassifier(max_depth=3),
n_estimators=3)
clr.fit(X_train, y_train)
model_def = to_onnx(clr, X_train.astype(numpy.float32),
target_opset=12)
oinf2 = OnnxInference(model_def, runtime='python_compiled')
print(oinf2.run({'X': X_test[:5]}))
# prints out the python function equivalent
# to the onnx graph
print(oinf2)
"""
def clean_name(name):
return name.replace(":", "_").replace('.', '_').replace('/', '_')
# inits
inputs = self.input_names
code = ['def compiled_run(dict_inputs, yield_ops=None):']
code.append(" if yield_ops is not None:")
code.append(
" raise NotImplementedError('yields_ops should be None.')")
if debug:
code.append(" printed = {}")
context = {}
# static variables
for k in sorted(self.statics_):
code.append(" # static: {0}".format(k))
code.append(" {0} = dict_inputs['{1}']".format(
clean_name(k), k))
if debug:
code.append(
" debug_print('i.{0}', {1}, printed)".format(
clean_name(k), k))
# initializers
for k, v in sorted(self.inits_.items()):
if k.startswith("_OPT_"):
raise RuntimeError( # pragma: no cover
"The runtime cannot handle any constant name "
"starting with '_OPT_': '{}'.".format(k))
if k in inputs:
context["_OPT_" + clean_name(k)] = v['value']
code.append(" # init: _OPT_{0} ({1})".format(
clean_name(k), k))
if debug:
code.append(
" debug_print('c.[_OPT_{0}]', _OPT_{1}, printed)".format(
clean_name(k), k))
else:
context[clean_name(k)] = v['value']
code.append(" # init: {0} ({1})".format(
clean_name(k), k))
if debug:
code.append(
" debug_print('c.[{0}]', {1}, printed)".format(
clean_name(k), k))
# method signature
code.append(" # inputs")
for inp in inputs:
if '_OPT_' + inp in context:
# optional inputs
code.append(
" {0} = dict_inputs.get('{1}', _OPT_{0})".format(
clean_name(inp), inp))
else:
code.append(" {0} = dict_inputs['{1}']".format(
clean_name(inp), inp))
if debug:
code.append(
" debug_print('i.{0}', {1}, printed)".format(
clean_name(inp), inp))
# code
for i, node in enumerate(self.sequence_):
name = "n{}_{}".format(i, node.ops_.__class__.__name__.lower())
context[name] = node.ops_._run
if (node.ops_.__class__.__name__ == 'Loop' and
node.ops_.need_context()):
# Adding context.
ctx = "{%s}" % ", ".join(
"'%s': %s" % (n, n) for n in node.ops_.additional_inputs)
code.append(' ({1}, ) = {2}({0}, context={3})'.format(
', '.join(map(clean_name, node.inputs)),
', '.join(map(clean_name, node.outputs)),
name, ctx))
else:
code.append(' ({1}, ) = {2}({0})'.format(
', '.join(map(clean_name, node.inputs)),
', '.join(map(clean_name, node.outputs)),
name))
if debug:
code.append(" print('''# {}''')".format(code[-1][4:]))
for o in node.outputs:
code.append(
" debug_print('o.{0}', {1}, printed)".format(
clean_name(o), o))
# return
code.append(' return {')
for out in self.output_names:
code.append(" '{1}': {0},".format(
clean_name(out), out))
code.append(' }')
final_code = '\n'.join(code)
# compile the outcome
context['self'] = self
try:
obj = compile(final_code, "<string>", 'exec')
except SyntaxError as e: # pragma: no cover
raise SyntaxError(
"Unable to compile\n#####\n{}".format(final_code)) from e
fcts_obj = [_ for _ in obj.co_consts
if _ is not None and not isinstance(_, (bool, str, int))]
fct = make_callable(
"compiled_run", fcts_obj[0], final_code, context, debug)
# end
return "compiled_run", fct, final_code
def reduce_size(self, pickable=False):
"""
Reduces the memory footprint as much as possible.
@param pickable keeps a pickle object?
"""
import gc
del self.graph_
if not pickable:
del self.obj
if self.runtime in ('python_compiled', 'python_compiled_debug'):
del self.sequence_
gc.collect()
def get_profiling(self, as_df=False):
"""
Returns the profiling after a couple of execution.
:param as_df: return the results as a dataframe (True)
:return: dataframe or list of dictionaries
.. versionadded:: 0.6
"""
if (self.runtime_options is None or
not self.runtime_options.get('enable_profiling', False)):
raise RuntimeError(
"Profiling is available if options 'enable_profiling' "
"is set to true in 'runtime_options' but is %r." % self.runtime_options)
prof = None
if hasattr(self, '_whole'):
prof = self._whole.get_profiling()
if prof is None:
raise NotImplementedError( # pragma: no cover
"profiling is only implemented for runtime 'onnxruntime1'.")
if as_df:
import pandas
return pandas.DataFrame(prof)
return prof
def get_execution_order(self):
"""
This function returns a dictionary `{(kind, name): (order, op)}`,
*name* can be a node name or a result name. In that case,
it gets the execution order than the node which created it.
The function returns None if the order is not available
(the selected runtime does not return it). *kind* is either
`'node'` or `'node'`. If two nodes have the same name,
returned order is the last one. Initializers gets an execution
order equal to -1, inputs to 0, all others results are >= 1.
.. versionadded:: 0.7
"""
if not hasattr(self, "sequence_"):
return None
res = {}
for k, v in self.inits_.items():
res['res', k] = (-1, v)
for name, shape in self.input_names_shapes:
res['res', name] = (0, shape)
for i, node in enumerate(self.sequence_):
key = ('node', node.onnx_node.name)
res[key] = (i + 1, node)
for out in node.onnx_node.output:
key = ('res', out)
if key in res:
raise RuntimeError( # pragma: no cover
"Output %r of node name %r already registered."
"" % (out, node.onnx_node.name))
res[key] = (i + 1, None)
return res
|
py | 1a30fb40600d4666bd50a5b75878d708873ffa34 | from typing import Iterable
import re
from dbt.clients.jinja import get_rendered
from dbt.contracts.graph.parsed import ParsedDocumentation
from dbt.node_types import NodeType
from dbt.parser.base import Parser
from dbt.parser.search import (
BlockContents, FileBlock, BlockSearcher
)
SHOULD_PARSE_RE = re.compile(r'{[{%]')
class DocumentationParser(Parser[ParsedDocumentation]):
@property
def resource_type(self) -> NodeType:
return NodeType.Documentation
@classmethod
def get_compiled_path(cls, block: FileBlock):
return block.path.relative_path
def generate_unique_id(self, resource_name: str) -> str:
# because docs are in their own graph namespace, node type doesn't
# need to be part of the unique ID.
return '{}.{}'.format(self.project.project_name, resource_name)
def parse_block(
self, block: BlockContents
) -> Iterable[ParsedDocumentation]:
unique_id = self.generate_unique_id(block.name)
contents = get_rendered(block.contents, {}).strip()
doc = ParsedDocumentation(
root_path=self.project.project_root,
path=block.file.path.relative_path,
original_file_path=block.path.original_file_path,
package_name=self.project.project_name,
unique_id=unique_id,
name=block.name,
block_contents=contents,
)
return [doc]
def parse_file(self, file_block: FileBlock):
searcher: Iterable[BlockContents] = BlockSearcher(
source=[file_block],
allowed_blocks={'docs'},
source_tag_factory=BlockContents,
)
for block in searcher:
for parsed in self.parse_block(block):
self.manifest.add_doc(file_block.file, parsed)
|
py | 1a30fb8fd266cca93f016ed29de1fa7e5726d0c1 | """
* Binary Exponentiation for Powers
* This is a method to find a^b in a time complexity of O(log b)
* This is one of the most commonly used methods of finding powers.
* Also useful in cases where solution to (a^b)%c is required,
* where a,b,c can be numbers over the computers calculation limits.
* Done using iteration, can also be done using recursion
* @author chinmoy159
* @version 1.0 dated 10/08/2017
"""
def b_expo(a, b):
res = 1
while b > 0:
if b&1:
res *= a
a *= a
b >>= 1
return res
def b_expo_mod(a, b, c):
res = 1
while b > 0:
if b&1:
res = ((res%c) * (a%c)) % c
a *= a
b >>= 1
return res
"""
* Wondering how this method works !
* It's pretty simple.
* Let's say you need to calculate a ^ b
* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.
* Once b is even, repeat the process to get a ^ b
* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1
*
* As far as the modulo is concerned,
* the fact : (a*b) % c = ((a%c) * (b%c)) % c
* Now apply RULE 1 OR 2 whichever is required.
"""
|
py | 1a30fde460e2706385b779e11d788fa9d0704901 | from django.views.generic import TemplateView
class FrontEndView(TemplateView):
template_name = 'home.html'
|
py | 1a30fe1917e267e3092bbbdc0f86dec8f73778cb | """
WSGI config for testappauto614_dev_23545 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testappauto614_dev_23545.settings')
application = get_wsgi_application()
|
py | 1a30ff06ccd24823958ce627b5d8cdaae7ac1a7b | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as signature hash functions.
This file is modified from python-eleccoinlib.
"""
from collections import namedtuple
import hashlib
import struct
import unittest
from typing import List, Dict
from .key import TaggedHash, tweak_add_pubkey
from .messages import (
CTransaction,
CTxOut,
hash256,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
MAX_SCRIPT_ELEMENT_SIZE = 520
LOCKTIME_THRESHOLD = 500000000
ANNEX_TAG = 0x50
OPCODE_NAMES = {} # type: Dict[CScriptOp, str]
LEAF_VERSION_TAPSCRIPT = 0xc0
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
def bn2vch(v):
"""Convert number to eleccoin-specific little endian format."""
# We need v.bit_length() bits, plus a sign bit for every nonzero number.
n_bits = v.bit_length() + (v != 0)
# The number of bytes for that is:
n_bytes = (n_bits + 7) // 8
# Convert number to absolute value + sign in top bit.
encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
# Serialize to bytes
return encoded_v.to_bytes(n_bytes, 'little')
_opcode_instances = [] # type: List[CScriptOp]
class CScriptOp(int):
"""A single script opcode"""
__slots__ = ()
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bytes([len(d)]) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n - 1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1 + 1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super().__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff + 1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# BIP 342 opcodes (Tapscript)
OP_CHECKSIGADD = CScriptOp(0xba)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_PUSHDATA4: 'OP_PUSHDATA4',
OP_1NEGATE: 'OP_1NEGATE',
OP_RESERVED: 'OP_RESERVED',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_VER: 'OP_VER',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_VERIF: 'OP_VERIF',
OP_VERNOTIF: 'OP_VERNOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_CAT: 'OP_CAT',
OP_SUBSTR: 'OP_SUBSTR',
OP_LEFT: 'OP_LEFT',
OP_RIGHT: 'OP_RIGHT',
OP_SIZE: 'OP_SIZE',
OP_INVERT: 'OP_INVERT',
OP_AND: 'OP_AND',
OP_OR: 'OP_OR',
OP_XOR: 'OP_XOR',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_RESERVED1: 'OP_RESERVED1',
OP_RESERVED2: 'OP_RESERVED2',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_2MUL: 'OP_2MUL',
OP_2DIV: 'OP_2DIV',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_MUL: 'OP_MUL',
OP_DIV: 'OP_DIV',
OP_MOD: 'OP_MOD',
OP_LSHIFT: 'OP_LSHIFT',
OP_RSHIFT: 'OP_RSHIFT',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_NOP1: 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4: 'OP_NOP4',
OP_NOP5: 'OP_NOP5',
OP_NOP6: 'OP_NOP6',
OP_NOP7: 'OP_NOP7',
OP_NOP8: 'OP_NOP8',
OP_NOP9: 'OP_NOP9',
OP_NOP10: 'OP_NOP10',
OP_CHECKSIGADD: 'OP_CHECKSIGADD',
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super().__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum:
__slots__ = ("value",)
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes([len(r)]) + r
@staticmethod
def decode(vch):
result = 0
# We assume valid push_size and minimal encoding
value = vch[1:]
if len(value) == 0:
return result
for i, byte in enumerate(value):
result |= int(byte) << 8 * i
if value[-1] >= 0x80:
# Mask for all but the highest result bit
num_mask = (2**(len(value) * 8) - 1) >> 1
result &= num_mask
result *= -1
return result
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
__slots__ = ()
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bytes([other])
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bytes([CScriptOp(OP_0)])
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes([CScriptOp.encode_op_n(other)])
elif other == -1:
other = bytes([OP_1NEGATE])
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# add makes no sense for a CScript()
raise NotImplementedError
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super().__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super().__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = self[i]
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = self[i]
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = self[i] + (self[i + 1] << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i + datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % o.hex()
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_DEFAULT = 0 # Taproot-only default, semantics same as SIGHASH_ALL
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Electron codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def LegacySignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for _ in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitV0SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
class TestFrameworkScript(unittest.TestCase):
def test_bn2vch(self):
self.assertEqual(bn2vch(0), bytes([]))
self.assertEqual(bn2vch(1), bytes([0x01]))
self.assertEqual(bn2vch(-1), bytes([0x81]))
self.assertEqual(bn2vch(0x7F), bytes([0x7F]))
self.assertEqual(bn2vch(-0x7F), bytes([0xFF]))
self.assertEqual(bn2vch(0x80), bytes([0x80, 0x00]))
self.assertEqual(bn2vch(-0x80), bytes([0x80, 0x80]))
self.assertEqual(bn2vch(0xFF), bytes([0xFF, 0x00]))
self.assertEqual(bn2vch(-0xFF), bytes([0xFF, 0x80]))
self.assertEqual(bn2vch(0x100), bytes([0x00, 0x01]))
self.assertEqual(bn2vch(-0x100), bytes([0x00, 0x81]))
self.assertEqual(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))
self.assertEqual(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))
self.assertEqual(bn2vch(0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x00]))
self.assertEqual(bn2vch(-0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))
self.assertEqual(bn2vch(123456789), bytes([0x15, 0xCD, 0x5B, 0x07]))
self.assertEqual(bn2vch(-54321), bytes([0x31, 0xD4, 0x80]))
def test_cscriptnum_encoding(self):
# round-trip negative and multi-byte CScriptNums
values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500]
for value in values:
self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value)
def TaprootSignatureHash(txTo, spent_utxos, hash_type, input_index = 0, scriptpath = False, script = CScript(), codeseparator_pos = -1, annex = None, leaf_ver = LEAF_VERSION_TAPSCRIPT):
assert (len(txTo.vin) == len(spent_utxos))
assert (input_index < len(txTo.vin))
out_type = SIGHASH_ALL if hash_type == 0 else hash_type & 3
in_type = hash_type & SIGHASH_ANYONECANPAY
spk = spent_utxos[input_index].scriptPubKey
ss = bytes([0, hash_type]) # epoch, hash_type
ss += struct.pack("<i", txTo.nVersion)
ss += struct.pack("<I", txTo.nLockTime)
if in_type != SIGHASH_ANYONECANPAY:
ss += sha256(b"".join(i.prevout.serialize() for i in txTo.vin))
ss += sha256(b"".join(struct.pack("<q", u.nValue) for u in spent_utxos))
ss += sha256(b"".join(ser_string(u.scriptPubKey) for u in spent_utxos))
ss += sha256(b"".join(struct.pack("<I", i.nSequence) for i in txTo.vin))
if out_type == SIGHASH_ALL:
ss += sha256(b"".join(o.serialize() for o in txTo.vout))
spend_type = 0
if annex is not None:
spend_type |= 1
if (scriptpath):
spend_type |= 2
ss += bytes([spend_type])
if in_type == SIGHASH_ANYONECANPAY:
ss += txTo.vin[input_index].prevout.serialize()
ss += struct.pack("<q", spent_utxos[input_index].nValue)
ss += ser_string(spk)
ss += struct.pack("<I", txTo.vin[input_index].nSequence)
else:
ss += struct.pack("<I", input_index)
if (spend_type & 1):
ss += sha256(ser_string(annex))
if out_type == SIGHASH_SINGLE:
if input_index < len(txTo.vout):
ss += sha256(txTo.vout[input_index].serialize())
else:
ss += bytes(0 for _ in range(32))
if (scriptpath):
ss += TaggedHash("TapLeaf", bytes([leaf_ver]) + ser_string(script))
ss += bytes([0])
ss += struct.pack("<i", codeseparator_pos)
assert len(ss) == 175 - (in_type == SIGHASH_ANYONECANPAY) * 49 - (out_type != SIGHASH_ALL and out_type != SIGHASH_SINGLE) * 32 + (annex is not None) * 32 + scriptpath * 37
return TaggedHash("TapSighash", ss)
def taproot_tree_helper(scripts):
if len(scripts) == 0:
return ([], bytes(0 for _ in range(32)))
if len(scripts) == 1:
# One entry: treat as a leaf
script = scripts[0]
assert(not callable(script))
if isinstance(script, list):
return taproot_tree_helper(script)
assert(isinstance(script, tuple))
version = LEAF_VERSION_TAPSCRIPT
name = script[0]
code = script[1]
if len(script) == 3:
version = script[2]
assert version & 1 == 0
assert isinstance(code, bytes)
h = TaggedHash("TapLeaf", bytes([version]) + ser_string(code))
if name is None:
return ([], h)
return ([(name, version, code, bytes())], h)
elif len(scripts) == 2 and callable(scripts[1]):
# Two entries, and the right one is a function
left, left_h = taproot_tree_helper(scripts[0:1])
right_h = scripts[1](left_h)
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = []
else:
# Two or more entries: descend into each side
split_pos = len(scripts) // 2
left, left_h = taproot_tree_helper(scripts[0:split_pos])
right, right_h = taproot_tree_helper(scripts[split_pos:])
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = [(name, version, script, control + left_h) for name, version, script, control in right]
if right_h < left_h:
right_h, left_h = left_h, right_h
h = TaggedHash("TapBranch", left_h + right_h)
return (left + right, h)
TaprootInfo = namedtuple("TaprootInfo", "scriptPubKey,inner_pubkey,negflag,tweak,leaves")
TaprootLeafInfo = namedtuple("TaprootLeafInfo", "script,version,merklebranch")
def taproot_construct(pubkey, scripts=None):
"""Construct a tree of Taproot spending conditions
pubkey: an ECPubKey object for the internal pubkey
scripts: a list of items; each item is either:
- a (name, CScript) tuple
- a (name, CScript, leaf version) tuple
- another list of items (with the same structure)
- a function, which specifies how to compute the hashing partner
in function of the hash of whatever it is combined with
Returns: script (sPK or redeemScript), tweak, {name:(script, leaf version, negation flag, innerkey, merklepath), ...}
"""
if scripts is None:
scripts = []
ret, h = taproot_tree_helper(scripts)
tweak = TaggedHash("TapTweak", pubkey + h)
tweaked, negated = tweak_add_pubkey(pubkey, tweak)
leaves = dict((name, TaprootLeafInfo(script, version, merklebranch)) for name, version, script, merklebranch in ret)
return TaprootInfo(CScript([OP_1, tweaked]), pubkey, negated + 0, tweak, leaves)
def is_op_success(o):
return o == 0x50 or o == 0x62 or o == 0x89 or o == 0x8a or o == 0x8d or o == 0x8e or (o >= 0x7e and o <= 0x81) or (o >= 0x83 and o <= 0x86) or (o >= 0x95 and o <= 0x99) or (o >= 0xbb and o <= 0xfe)
|
py | 1a30fffb107d8e9918a26f257e964aec9b855a08 | # num = int(input())
#
# if num > 5:
# print(">5")
#
# elif num < 5:
# print("<5")
#
# elif num == 5:
# print("=5")
#
# else:
# print("none")
#
# ---------------------------------------
day = input()
print("Enter number: ")
if day == "Monday":
num1 = float(input())
else:
num2 = float(input())
|
py | 1a3101290aefac2c3fe498e530eaa3edb98b8225 | from django.contrib.gis.sitemaps import KMLSitemap, KMZSitemap
from .models import City, Country
sitemaps = {'kml': KMLSitemap([City, Country]),
'kmz': KMZSitemap([City, Country]),
}
|
py | 1a31015e3e28cb3d3542d5909701e2db8a488feb | from aiohttp import web
from utils.utils import *
def get_members(request, client):
try:
role_ids = request.query["ids"].split(",")
guild = client.get_guild(client.config["main_guild_id"])
roles = [get(guild.roles, id=int(role_id)) for role_id in role_ids]
members = [role.members for role in roles]
except KeyError:
return web.json_response({"error": "You did not provide the correct query param ids"})
except AttributeError:
return web.json_response({"error": "Invalid Guild ID or Role ID provided"})
member_data = [
{
"name": member.name,
"id": member.id,
"discriminator": member.discriminator,
} for member in [m for m in members][0]
]
return web.json_response({"member_data": member_data}) |
py | 1a310169466228b42bef45a2100691bcc0b57baa |
import os
import cv2
cascPath = "./haarcascades/haarcascade_frontalface_alt.xml"
input_dir = './lfw'
output_dir = './other_faces'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# classifiers
faceCascade = cv2.CascadeClassifier(cascPath)
index = 1
for (path,dirnames,filenames) in os.walk(input_dir):
for filename in filenames:
if filename.endswith('.jpg'):
print('处理picture %s'%index)
image = cv2.imread(path + '/' + filename)
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(30, 30)
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
image = image[y:y+h,x:x+w]
image = cv2.resize(image,(64,64))
cv2.imshow('image',image)
cv2.imwrite(output_dir+'/'+str(index)+'.jpg',image)
index +=1
if cv2.waitKey(30) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
py | 1a3102afdbde15af0544f1b7b4352399787fd819 | #######################
# Dennis MUD #
# remake_item.py #
# Copyright 2018-2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
NAME = "remake item"
CATEGORIES = ["items"]
USAGE = "remake item <item_id>"
DESCRIPTION = """Resets the item <item_id> in your inventory.
Name and ID are untouched, you must be the primary owner of the item.
Owners are reset to the primary owner only. Wizards can remake any item.
Ex. `remake item 3`"""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argmin=1, argmax=1):
return False
# Perform argument type checks and casts.
itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0)
if itemid is None:
return False
# Lookup the target item and perform item checks.
thisitem = COMMON.check_item(NAME, console, itemid, owner=True, primary=True, holding=True)
if not thisitem:
return False
# remake the item.
if len(thisitem["container"]["inventory"])>0:
console.msg("{0} is not empty, please empty it before remaking.".format(thisitem["name"]))
return False
if thisitem["duplified"]:
console.msg("Please unduplify this item before remaking.")
return False
thisitem["desc"] = ""
thisitem["action"] = ""
thisitem["message"] = ""
thisitem["mlang"] = None
thisitem["lang"] = None
thisitem["owners"] = [console.user["name"]]
thisitem["glued"] = console.database.defaults["items"]["glued"]
thisitem["hidden"] = False
thisitem["truehide"] = False
thisitem["chance"] = 1
thisitem["container"]["enabled"] = False
thisitem["container"]["inventory"] = []
thisitem["telekey"] = None
console.database.upsert_item(thisitem)
# Finished.
console.msg("{0}: Done.".format(NAME))
return True
|
py | 1a3103ac288270399cc51fa60266f9cce7700cd7 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
import numpy as np
import pandas as pd
def to_H2OFrame():
# TODO: negative testing
## 1. list
# a. single col
python_obj = [1, "a", 2.5, "bcd", 0]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# b. 1 col, 5 rows
python_obj = [[1], [2], [3.7], [8], [9]]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# c. 5 cols, 3 rows
python_obj = [[6,7,8,9,10], [1,2,3,4,5], [3,2,2,2,2]]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)
python_obj = [["a", "b"], ["c", "d"]]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=2, cols=2)
# d. jagged
python_obj = [[6,7,8,9,10], [1,2,3,4], [3,2,2]]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5, dim_only=True)
## 2. tuple
# a. single row
python_obj = (1, "a", 2.5, "bcd", 0)
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# b. single column
python_obj = ((1,), (2,), (3.7,), (8,), (9,))
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# c. multiple rows, columns
python_obj = ((6,7,8,9,10), (1,2,3,4,5), (3,2,2,2,2))
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)
# d. jagged
python_obj = ((6,7,8,9,10), (1,2,3,4), (3,2,2))
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5, dim_only=True)
## 3. list-tuple mixed
# a. single column
python_obj = ((1,), [2], (3.7,), [8], (9,))
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# b. single column
python_obj = [(1,), [2], (3.7,), [8], (9,)]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# c. multiple rows, columns
python_obj = ([6,7,8,9,10], (1,2,3,4,5), [3,2,2,2,2])
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)
# d. multiple rows, columns
python_obj = [(6,7,8,9,10), [1,2,3,4,5], (3,2,2,2,2)]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)
# e. jagged
python_obj = [(6,7,8,9,10), [1,2,3,4], (3,2,2)]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5, dim_only=True)
# f. jagged
python_obj = ((6,7,8,9,10), [1,2,3,4], (3,2,2))
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5, dim_only=True)
# 4. dictionary
# a. single row
python_obj = {"a":1, "b":"a", "c":2.5, "d":"bcd", "e":0}
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
"{1}".format(the_frame.names, python_obj.keys())
python_obj = {"a":[1], "b":["a"], "c":[2.5], "d":["bcd"], "e":[0]}
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
"{1}".format(the_frame.names, python_obj.keys())
# b. single column
python_obj = {"foo":(1,2,3.7,8,9)}
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
"{1}".format(the_frame.names, python_obj.keys())
# c. multiple rows, columns
python_obj = {"foo":[6,7,8,9,10], "bar":(1,2,3,4,5), "baz":(3,2,2,2,2)}
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
"{1}".format(the_frame.names, python_obj.keys())
# d. jagged
python_obj = {"foo":(6,7), "bar":(1,2,3,4), "baz":(3,2,2)}
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=4, cols=3, dim_only=True)
assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
"{1}".format(the_frame.names, python_obj.keys())
# 5. numpy.ndarray
# a. single row
python_obj = np.array([1, "a", 2.5, "bcd", 0])
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# b. single column
python_obj = np.array([[1], [2], [3.7], [8], [9]])
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# c. multiple rows, columns
python_obj = np.array([[6,7,8,9,10], [1,2,3,4,5], [3,2,2,2,2]])
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)
# d. jagged
python_obj = np.array([[6,7,8,9,10], [1,2,3,4], [3,2,2]])
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)
## 6. pandas.DataFrame
# a. single row
python_obj = pd.DataFrame({'foo' : pd.Series([1]), 'bar' : pd.Series([6]), 'baz' : pd.Series(["a"]) })
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=3)
# b. single column
python_obj = pd.DataFrame({'foo' : pd.Series([1, 2, 3, 7.8, 9])})
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# c. multiple rows, columns
python_obj = pd.DataFrame({'foo' : pd.Series([6,7,8,9,10]), 'bar' : pd.Series([1,2,3,4,5]),
'baz' : pd.Series([3,2,2,2,2])})
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
# d. jagged
python_obj = pd.DataFrame({'foo' : pd.Series([6,7,8]), 'bar' : pd.Series([1,2,3,4,5]), 'baz' : pd.Series([3,2,2,2])})
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
if __name__ == "__main__":
pyunit_utils.standalone_test(to_H2OFrame)
else:
to_H2OFrame()
|
py | 1a3103c1fcfad091e66562acbb5d1c324b6e4c8f | # -*- coding: utf-8 -*-
"""
mslib.msui.performance_settings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module defines the performance settings dialog
This file is part of mss.
:copyright: Copyright 2017 Joern Ungermann
:copyright: Copyright 2017-2022 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from PyQt5 import QtCore, QtWidgets
from mslib.utils import FatalUserError
from mslib.msui import aircrafts, constants
from mslib.msui.mss_qt import get_open_filename
from mslib.msui.mss_qt import ui_performance_dockwidget as ui_dw
DEFAULT_PERFORMANCE = {
"aircraft": aircrafts.SimpleAircraft(aircrafts.AIRCRAFT_DUMMY),
"visible": False,
"takeoff_weight": 0,
"takeoff_time": QtCore.QDateTime.currentDateTimeUtc(),
"empty_weight": 0,
"ceiling_alt": [410],
}
class MSS_PerformanceSettingsWidget(QtWidgets.QWidget, ui_dw.Ui_PerformanceDockWidget):
"""
This class implements setting the performance settings as a dockable widget.
"""
def __init__(self, parent=None, view=None, settings_dict=None):
"""
Arguments:
parent -- Qt widget that is parent to this widget.
view -- reference to mpl canvas class
settings_dict -- dictionary containing topview options
"""
super(MSS_PerformanceSettingsWidget, self).__init__(parent)
self.setupUi(self)
self.view = view
self.parent = parent
if not settings_dict:
settings_dict = DEFAULT_PERFORMANCE
self.aircraft = settings_dict["aircraft"]
self.lbAircraftName.setText(self.aircraft.name)
self.cbShowPerformance.setChecked(settings_dict["visible"])
self.dsbTakeoffWeight.setValue(settings_dict["takeoff_weight"])
self.dsbEmptyWeight.setValue(
settings_dict.get("empty_weight", settings_dict["takeoff_weight"] - settings_dict.get("fuel", 0)))
self.dteTakeoffTime.setDateTime(settings_dict["takeoff_time"])
# Connecting signals
self.pbLoadPerformance.clicked.connect(self.load_performance)
self.cbShowPerformance.stateChanged.connect(self.update_parent_performance)
self.dteTakeoffTime.dateTimeChanged.connect(self.update_parent_performance)
self.dsbTakeoffWeight.valueChanged.connect(self.update_parent_performance)
self.dsbEmptyWeight.valueChanged.connect(self.update_parent_performance)
def get_settings(self):
"""
Encapsulates GUI selections in a python dictionary.
:return:
Dictionary of all setting informations
"""
settings_dict = {
"aircraft": self.aircraft,
"visible": self.cbShowPerformance.isChecked(),
"takeoff_time": self.dteTakeoffTime.dateTime(),
"takeoff_weight": self.dsbTakeoffWeight.value(),
"empty_weight": self.dsbEmptyWeight.value()
}
return settings_dict
def update_parent_performance(self):
self.parent.setPerformance(self.get_settings())
def load_performance(self):
"""
Gets a filename for a JSON file specifying aircraft performance and initializes an SimpleAircraft model.
"""
filename = get_open_filename(
self, "Open Aircraft Performance JSON File", constants.MSS_CONFIG_PATH,
"Performance File (*.json)", pickertag="filepicker_default")
if filename is not None:
try:
with open(filename) as tf:
performance = json.load(tf)
self.aircraft = aircrafts.SimpleAircraft(performance)
self.lbAircraftName.setText(self.aircraft.name)
self.dsbTakeoffWeight.setValue(self.aircraft.takeoff_weight)
if not any(hasattr(self.aircraft, _x) for _x in ("fuel", "empty_weight")):
raise KeyError("empty_weight")
if hasattr(self.aircraft, "empty_weight"):
self.dsbEmptyWeight.setValue(self.aircraft.empty_weight)
else:
self.dsbEmptyWeight.setValue(self.aircraft.takeoff_weight - self.aircraft.fuel)
self.update_parent_performance()
except KeyError as ex:
QtWidgets.QMessageBox.critical(self, self.tr("Performance JSON Load"),
self.tr(f"JSON File missing '{ex}' entry"))
except (FatalUserError, ValueError) as ex:
QtWidgets.QMessageBox.critical(self, self.tr("Performance JSON Load"),
self.tr(f"JSON File has Syntax Problems:\n{ex}"))
|
py | 1a31053b4ca3d68aa8882dcde6023d66256beb2d | from decimal import Decimal
import graphene
from django_filters import FilterSet, OrderingFilter
from graphene import relay
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from graphene_file_upload.scalars import Upload
from graphql import GraphQLError
from graphql_jwt.decorators import login_required
from graphql_relay.node.node import from_global_id, to_global_id
from .models import BilbyJob, Label, FileDownloadToken, BilbyJobUploadToken
from .status import JobStatus
from .types import JobStatusType, BilbyJobCreationResult, JobParameterInput, JobParameterOutput, JobIniInput, \
JobDetailsInput
from .utils.db_search.db_search import perform_db_search
from .utils.derive_job_status import derive_job_status
from .utils.gen_parameter_output import generate_parameter_output
from .utils.jobs.request_file_download_id import request_file_download_ids
from .utils.jobs.request_job_filter import request_job_filter
from .views import create_bilby_job, update_bilby_job, create_bilby_job_from_ini_string, upload_bilby_job
class LabelType(DjangoObjectType):
class Meta:
model = Label
interfaces = (relay.Node,)
class UserBilbyJobFilter(FilterSet):
class Meta:
model = BilbyJob
fields = '__all__'
order_by = OrderingFilter(
fields=(
('last_updated', 'lastUpdated'),
('name', 'name'),
)
)
@property
def qs(self):
return BilbyJob.user_bilby_job_filter(super(UserBilbyJobFilter, self).qs, self)
class PublicBilbyJobFilter(FilterSet):
class Meta:
model = BilbyJob
fields = '__all__'
order_by = OrderingFilter(
fields=(
('last_updated', 'last_updated'),
('name', 'name'),
)
)
@property
def qs(self):
return BilbyJob.public_bilby_job_filter(super(PublicBilbyJobFilter, self).qs, self)
class BilbyJobNode(DjangoObjectType):
class Meta:
model = BilbyJob
convert_choices_to_enum = False
interfaces = (relay.Node,)
job_status = graphene.Field(JobStatusType)
last_updated = graphene.String()
params = graphene.Field(JobParameterOutput)
labels = graphene.List(LabelType)
@classmethod
def get_queryset(parent, queryset, info):
return BilbyJob.bilby_job_filter(queryset, info)
def resolve_last_updated(parent, info):
return parent.last_updated.strftime("%Y-%m-%d %H:%M:%S UTC")
def resolve_params(parent, info):
return generate_parameter_output(parent)
def resolve_labels(parent, info):
return parent.labels.all()
def resolve_job_status(parent, info):
# Uploaded jobs are always complete
if parent.is_uploaded_job:
return {
"name": JobStatus.display_name(JobStatus.COMPLETED),
"number": JobStatus.COMPLETED,
"date": parent.creation_time
}
try:
# Get job details from the job controller
_, jc_jobs = request_job_filter(
info.context.user.user_id,
ids=[parent.job_controller_id]
)
status_number, status_name, status_date = derive_job_status(jc_jobs[0]["history"])
return {
"name": status_name,
"number": status_number,
"date": status_date.strftime("%Y-%m-%d %H:%M:%S UTC")
}
except Exception:
return {
"name": "Unknown",
"number": 0,
"data": "Unknown"
}
class UserDetails(graphene.ObjectType):
username = graphene.String()
def resolve_username(parent, info):
return "Todo"
class BilbyResultFile(graphene.ObjectType):
path = graphene.String()
is_dir = graphene.Boolean()
file_size = graphene.Decimal()
download_token = graphene.String()
class BilbyResultFiles(graphene.ObjectType):
class Meta:
interfaces = (relay.Node,)
class Input:
job_id = graphene.ID()
files = graphene.List(BilbyResultFile)
is_uploaded_job = graphene.Boolean()
class BilbyPublicJobNode(graphene.ObjectType):
user = graphene.String()
name = graphene.String()
job_status = graphene.Field(JobStatusType)
labels = graphene.List(LabelType)
description = graphene.String()
timestamp = graphene.String()
id = graphene.ID()
class BilbyPublicJobConnection(relay.Connection):
class Meta:
node = BilbyPublicJobNode
class GenerateBilbyJobUploadToken(graphene.ObjectType):
token = graphene.String()
class Query(object):
bilby_job = relay.Node.Field(BilbyJobNode)
bilby_jobs = DjangoFilterConnectionField(BilbyJobNode, filterset_class=UserBilbyJobFilter)
public_bilby_jobs = relay.ConnectionField(
BilbyPublicJobConnection,
search=graphene.String(),
time_range=graphene.String()
)
all_labels = graphene.List(LabelType)
bilby_result_files = graphene.Field(BilbyResultFiles, job_id=graphene.ID(required=True))
gwclouduser = graphene.Field(UserDetails)
generate_bilby_job_upload_token = graphene.Field(GenerateBilbyJobUploadToken)
@login_required
def resolve_generate_bilby_job_upload_token(self, info, **kwargs):
user = info.context.user
# Create a job upload token
token = BilbyJobUploadToken.create(user)
# Return the generated token
return GenerateBilbyJobUploadToken(token=str(token.token))
@login_required
def resolve_all_labels(self, info, **kwargs):
return Label.all()
@login_required
def resolve_public_bilby_jobs(self, info, **kwargs):
# Perform the database search
success, jobs = perform_db_search(info.context.user, kwargs)
if not success:
return []
# Parse the result in to graphql objects
result = []
for job in jobs:
bilby_job = BilbyJob.get_by_id(job['job']['id'], info.context.user)
result.append(
BilbyPublicJobNode(
user=f"{job['user']['firstName']} {job['user']['lastName']}",
name=job['job']['name'],
description=job['job']['description'],
job_status=JobStatusType(
name=JobStatus.display_name(
JobStatus.COMPLETED if bilby_job.is_uploaded_job else job['history'][0]['state']
),
number=JobStatus.COMPLETED if bilby_job.is_uploaded_job else job['history'][0]['state'],
date=bilby_job.creation_time if bilby_job.is_uploaded_job else job['history'][0]['timestamp']
),
labels=bilby_job.labels.all(),
timestamp=bilby_job.creation_time if bilby_job.is_uploaded_job else job['history'][0]['timestamp'],
id=to_global_id("BilbyJobNode", job['job']['id'])
)
)
# Nb. The perform_db_search function currently requests one extra record than kwargs['first'].
# This triggers the ArrayConnection used by returning the result array to correctly set
# hasNextPage correctly, such that infinite scroll works as expected.
return result
@login_required
def resolve_gwclouduser(self, info, **kwargs):
return info.context.user
@login_required
def resolve_bilby_result_files(self, info, **kwargs):
# Get the model id of the bilby job
_, job_id = from_global_id(kwargs.get("job_id"))
# Try to look up the job with the id provided
job = BilbyJob.get_by_id(job_id, info.context.user)
# Fetch the file list from the job controller
success, files = job.get_file_list()
if not success:
raise Exception("Error getting file list. " + str(files))
# Generate download tokens for the list of files
paths = [f['path'] for f in filter(lambda x: not x['isDir'], files)]
tokens = FileDownloadToken.create(job, paths)
# Generate a dict that can be used to query the generated tokens
token_dict = {tk.path: tk.token for tk in tokens}
# Build the resulting file list and send it back to the client
result = [
BilbyResultFile(
path=f["path"],
is_dir=f["isDir"],
file_size=Decimal(f["fileSize"]),
download_token=token_dict[f["path"]] if f["path"] in token_dict else None
)
for f in files
]
return BilbyResultFiles(
files=result,
is_uploaded_job=job.is_uploaded_job
)
class BilbyJobMutation(relay.ClientIDMutation):
class Input:
params = JobParameterInput()
result = graphene.Field(BilbyJobCreationResult)
@classmethod
@login_required
def mutate_and_get_payload(cls, root, info, params):
user = info.context.user
# Create the bilby job
bilby_job = create_bilby_job(user, params)
# Convert the bilby job id to a global id
job_id = to_global_id("BilbyJobNode", bilby_job.id)
# Return the bilby job id to the client
return BilbyJobMutation(
result=BilbyJobCreationResult(job_id=job_id)
)
class BilbyJobFromIniStringMutation(relay.ClientIDMutation):
class Input:
params = JobIniInput()
result = graphene.Field(BilbyJobCreationResult)
@classmethod
@login_required
def mutate_and_get_payload(cls, root, info, params):
user = info.context.user
# Create the bilby job
bilby_job = create_bilby_job_from_ini_string(user, params)
# Convert the bilby job id to a global id
job_id = to_global_id("BilbyJobNode", bilby_job.id)
# Return the bilby job id to the client
return BilbyJobFromIniStringMutation(
result=BilbyJobCreationResult(job_id=job_id)
)
class UpdateBilbyJobMutation(relay.ClientIDMutation):
class Input:
job_id = graphene.ID(required=True)
private = graphene.Boolean(required=False)
labels = graphene.List(graphene.String, required=False)
result = graphene.String()
@classmethod
@login_required
def mutate_and_get_payload(cls, root, info, **kwargs):
user = info.context.user
job_id = kwargs.pop("job_id")
# Update privacy of bilby job
message = update_bilby_job(from_global_id(job_id)[1], user, **kwargs)
# Return the bilby job id to the client
return UpdateBilbyJobMutation(
result=message
)
class GenerateFileDownloadIds(relay.ClientIDMutation):
class Input:
job_id = graphene.ID(required=True)
download_tokens = graphene.List(graphene.String, required=True)
result = graphene.List(graphene.String)
@classmethod
@login_required
def mutate_and_get_payload(cls, root, info, job_id, download_tokens):
user = info.context.user
# Get the job these file downloads are for
job = BilbyJob.get_by_id(from_global_id(job_id)[1], user)
# Verify the download tokens and get the paths
paths = FileDownloadToken.get_paths(job, download_tokens)
# Check that all tokens were found
if None in paths:
raise GraphQLError("At least one token was invalid or expired.")
# For uploaded jobs, we can just return the exact some download tokens - this function is basically a no-op
# for uploaded jobs
if job.is_uploaded_job:
return GenerateFileDownloadIds(
result=download_tokens
)
# Request the list of file download ids from the list of paths
# Only the original job author may generate a file download id
success, result = request_file_download_ids(
job,
paths
)
# Report the error if there is one
if not success:
raise GraphQLError(result)
# Return the list of file download ids
return GenerateFileDownloadIds(
result=result
)
class UploadBilbyJobMutation(relay.ClientIDMutation):
class Input:
upload_token = graphene.String()
details = JobDetailsInput()
job_file = Upload(required=True)
result = graphene.Field(BilbyJobCreationResult)
@classmethod
def mutate_and_get_payload(cls, root, info, upload_token, details, job_file):
# Get the token being used to perform the upload - this will return None if the token doesn't exist or
# is expired
token = BilbyJobUploadToken.get_by_token(upload_token)
if not token:
raise GraphQLError("Job upload token is invalid or expired.")
# Try uploading the bilby job
bilby_job = upload_bilby_job(token, details, job_file)
# Convert the bilby job id to a global id
job_id = to_global_id("BilbyJobNode", bilby_job.id)
# Return the bilby job id to the client
return BilbyJobMutation(
result=BilbyJobCreationResult(job_id=job_id)
)
class Mutation(graphene.ObjectType):
new_bilby_job = BilbyJobMutation.Field()
new_bilby_job_from_ini_string = BilbyJobFromIniStringMutation.Field()
update_bilby_job = UpdateBilbyJobMutation.Field()
generate_file_download_ids = GenerateFileDownloadIds.Field()
upload_bilby_job = UploadBilbyJobMutation.Field()
|
py | 1a310622a120177fc756747cbe7a63947853cc94 | from grpc.beta import implementations
import numpy
import traceback
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from flask_restplus import Resource, abort
from monocker_api.api.restplus import restplus_api
from monocker_api.api.models import getModel
from monocker_api.db.data_models import PredictionRequest
from monocker_api import settings
#==============================================================================
# helper functions
#==============================================================================
def getMonockerModelStub(host, port):
try:
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
except Exception as e:
print("===========================================================")
print("Encountered error while requesting gRPC connection.")
print("Error: ")
print(e)
traceback.print_exc()
print("===========================================================")
stub = None
return stub
def getServingRequest(model, payload):
request = predict_pb2.PredictRequest()
request.model_spec.name = model['model_name']
request.model_spec.signature_name = model['model_signature']
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(
payload['model_input'],
shape=payload['model_input_shape']
)
)
return request
#==============================================================================
#==============================================================================
# Models API
#==============================================================================
# define namespace
api = restplus_api.namespace(
'predict',
description="Operations related to requesting model evaluations"
)
# Define /models route handlers
@api.route('/')
class Models(Resource):
@api.response(501, 'Error in model computation')
@api.response(403, 'Could not connect to tf serving server')
@api.response(404, 'Model not found.')
@api.response(201, 'Successfully retrieved model evaluation.')
@api.expect(PredictionRequest, validate=False, required=True)
def post(self):
# get inputs
payload = restplus_api.payload
# get model
model = getModel(payload['model_name'])
if model is None:
return 'Model not found.', 404
# get request
model['model_signature'] = payload['model_signature']
serving_request = getServingRequest(model, payload)
# get stub
stub = getMonockerModelStub(model['ip_address'], model['port'])
if stub is None:
return 'Could not connect to tf serving server', 403
# make grpc prediction request then return results
try:
prediction = stub.Predict(serving_request, 5.0)
model_response = list(prediction.outputs['scores'].float_val)
return {'model_response': model_response}, 201
except Exception as e:
return str(e), 501
#============================================================================== |
py | 1a31065f212693721a6525e296f7ea11f5f304b5 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import torch
import torch.utils.data as data
from torchvision import transforms
import os
import pickle
import random
import pdb
import sys
import json
from PIL import Image
sys.path.insert(0, '.')
import numpy as np
from skimage import io
def Context_dataset(args, embedding_size):
# Random seed
np.random.seed(args.seed)
# Getting the classes and annotations
# ******
data_path = args.data_path
with open(data_path+'/Context/data/split_'+ str(args.split) +'.json','r') as fp:
gt_annotations = json.load(fp)
# Load Embedding according to OCR
if args.embedding == 'w2vec' or args.embedding == 'fasttext' or args.embedding == 'glove' or args.embedding == 'bert':
if args.ocr == 'google_ocr':
with open(data_path + '/Context/' + args.ocr + '/text_embeddings/Context_' + args.embedding + '.json', 'r') as fp:
text_embedding = json.load(fp)
else:
with open(data_path + '/Context/' + args.ocr + '/text_embeddings/Context_' + args.embedding + '.pickle','rb') as fp:
text_embedding = pickle.load(fp)
elif args.embedding =='phoc':
text_embedding = {'embedding':'phoc'}
elif args.embedding == 'fisher':
text_embedding = {'embedding':'fisher'}
else:
print('OCR SELECTED NOT IMPLEMENTED')
# Load Local features from Faster R-CNN VG
with open(args.data_path + '/Context/context_local_feats.npy', 'rb') as fp:
local_feats = np.load(fp, encoding='bytes')
# Create img_name to index of local features
with open(args.data_path + '/Context/context_local_feats_image_ids.txt', 'r') as fp:
image_ids = fp.readlines()
image_name2features_index = {}
for item in image_ids:
img_name = item.strip().split(',')[0].split('/')[-1].replace('\'', '')
idx = item.strip().split(',')[1].replace(')', '').replace(' ','')
image_name2features_index[img_name] = idx
# BBOXES LOADING FOR TEXT FEATURES
# Load BBOXES of Scene Text
with open(data_path + '/Context/google_ocr/bboxes/Context_bboxes.json', 'r') as fp:
text_bboxes = json.load(fp)
# Load BBOXES of Local Visual Features
with open(data_path + '/Context/context_bboxes.npy', 'rb') as fp:
local_bboxes = np.load(fp, encoding='bytes')
# Data Loaders
train_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomRotation(degrees=15),
transforms.ColorJitter(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
train_loader = Context_Train(args, gt_annotations, text_embedding, embedding_size, local_feats, image_name2features_index, text_bboxes, local_bboxes, train_transform)
test_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
test_loader = Context_Test(args, gt_annotations, text_embedding, embedding_size, local_feats, image_name2features_index, text_bboxes, local_bboxes, test_transform)
return train_loader, test_loader, gt_annotations, text_embedding
class Context_Train(data.Dataset):
def __init__(self, args, gt_annotations, text_embedding, embedding_size, local_feats, image_name2features_index, text_bboxes, local_bboxes, transform=None):
self.args = args
self.gt_annotations = gt_annotations
self.text_embedding = text_embedding
self.embedding_size = embedding_size
self.transform = transform
self.image_list = list(gt_annotations['train'].keys())
self.image_name2features_index = image_name2features_index
self.local_feats = local_feats
self.text_bboxes = text_bboxes
self.local_bboxes = local_bboxes
def __len__(self):
return len(self.gt_annotations['train'])
def __getitem__(self, index):
data_path = self.args.data_path
assert index <= len(self), 'index range error'
image_name = self.image_list[index].rstrip()
image_path = data_path+'/Context/data/JPEGImages/' + image_name
img = Image.open(image_path).convert('RGB')
if self.transform:
img = self.transform(img)
img_class = self.gt_annotations['train'][image_name]
label = np.zeros(28)
label[int(img_class) - 1] = 1
label = torch.from_numpy(label)
label = label.type(torch.FloatTensor)
if self.args.embedding == 'w2vec' or self.args.embedding == 'fasttext' or self.args.embedding == 'glove' or self.args.embedding == 'bert':
text_embedding = np.asarray(self.text_embedding[image_name])
elif self.args.embedding == 'phoc':
with open (data_path + '/Context/yolo_phoc/'+image_name[:-3]+'json') as fp:
phocs = json.load(fp)
text_embedding = np.resize(phocs, (np.shape(phocs)[0], 604))
elif self.args.embedding == 'fisher':
if self.args.ocr == 'yolo_phoc':
relative_path = '/Context/old_fisher_vectors/'
elif self.args.ocr == 'e2e_mlt':
relative_path = '/Context/fasttext_fisher/'
else: print('Not Implemented')
with open (data_path + relative_path +image_name[:-3]+'json')as fp:
fisher_vector = json.load(fp)
text_embedding = np.resize(fisher_vector, (1, 38400))
# FISHER VECTORS DO NOT NEED MAX TEXTUAL
if self.args.embedding != 'fisher':
text_features = np.zeros((self.args.max_textual, self.embedding_size))
if np.shape(text_embedding)[0] == 0:
text_embedding = np.zeros((1,self.embedding_size))
elif np.shape(text_embedding)[0] > self.args.max_textual:
text_embedding = text_embedding[0:self.args.max_textual]
text_features[:len(text_embedding)] = text_embedding
else:
text_features = text_embedding
text_features = torch.from_numpy(text_features)
text_features = text_features.type(torch.FloatTensor)
# SCENE TEXT BBOXES ONLY FOR GOOGLE OCR
text_bboxes = np.asarray(self.text_bboxes[image_name])
if self.args.ocr == 'google_ocr':
text_bboxes_features = np.zeros((self.args.max_textual, 4))
if np.shape(text_bboxes)[0] == 0:
text_bboxes = np.zeros((1, 4))
elif np.shape(text_bboxes)[0] > self.args.max_textual:
text_bboxes = text_bboxes[0:self.args.max_textual]
text_bboxes_features[:len(text_bboxes)] = text_bboxes
else:
# NO BBOXES FOR OTHER OCRs
text_bboxes_features = np.zeros((self.args.max_textual, 4))
text_bboxes_features = torch.from_numpy(text_bboxes_features)
text_bboxes_features = text_bboxes_features.type(torch.FloatTensor)
# LOCAL VISUAL FEATURES
local_features_index = self.image_name2features_index[image_name]
local_features = self.local_feats[int(local_features_index)]
local_features = torch.from_numpy(local_features[:int(self.args.max_visual)][:])
local_features = local_features.type(torch.FloatTensor)
# LOCAL VISUAL BBOXES
local_bboxes_features = self.local_bboxes[int(local_features_index)]
local_bboxes_features = torch.from_numpy(local_bboxes_features[:int(self.args.max_visual)][:])
local_bboxes_features = local_bboxes_features.type(torch.FloatTensor)
return img, label, text_features, local_features, text_bboxes_features, local_bboxes_features, image_name
class Context_Test(data.Dataset):
def __init__(self, args, gt_annotations, text_embedding, embedding_size, local_feats, image_name2features_index, text_bboxes, local_bboxes, transform=None):
self.args = args
self.gt_annotations = gt_annotations
self.text_embedding = text_embedding
self.embedding_size = embedding_size
self.transform = transform
self.image_list = list(gt_annotations['test'].keys())
self.image_name2features_index = image_name2features_index
self.local_feats = local_feats
self.text_bboxes = text_bboxes
self.local_bboxes = local_bboxes
def __len__(self):
return len(self.gt_annotations['test'])
def __getitem__(self, index):
data_path = self.args.data_path
assert index <= len(self), 'index range error'
image_name = self.image_list[index].rstrip()
image_path = data_path+ '/Context/data/JPEGImages/' + image_name
img = Image.open(image_path).convert('RGB')
if self.transform:
img = self.transform(img)
img_class = self.gt_annotations['test'][image_name]
label = np.zeros(28)
label[int(img_class) - 1] = 1
label = torch.from_numpy(label)
label = label.type(torch.FloatTensor)
if self.args.embedding == 'w2vec' or self.args.embedding == 'fasttext' or self.args.embedding == 'glove' or self.args.embedding == 'bert':
text_embedding = np.asarray(self.text_embedding[image_name])
elif self.args.embedding == 'phoc':
with open (data_path + '/Context/yolo_phoc/'+image_name[:-3]+'json') as fp:
phocs = json.load(fp)
text_embedding = np.resize(phocs, (np.shape(phocs)[0], 604))
elif self.args.embedding == 'fisher':
if self.args.ocr == 'yolo_phoc':
relative_path = '/Context/old_fisher_vectors/'
elif self.args.ocr == 'e2e_mlt':
relative_path = '/Context/fasttext_fisher/'
else: print('Not Implemented')
with open (data_path + relative_path +image_name[:-3]+'json')as fp:
fisher_vector = json.load(fp)
text_embedding = np.resize(fisher_vector, (1, 38400))
# FISHER VECTORS DO NOT NEED MAX TEXTUAL
if self.args.embedding != 'fisher':
text_features = np.zeros((self.args.max_textual, self.embedding_size))
if np.shape(text_embedding)[0] == 0:
text_embedding = np.zeros((1,self.embedding_size))
elif np.shape(text_embedding)[0] > self.args.max_textual:
text_embedding = text_embedding[0:self.args.max_textual]
text_features[:len(text_embedding)] = text_embedding
else:
text_features = text_embedding
text_features = torch.from_numpy(text_features)
text_features = text_features.type(torch.FloatTensor)
# SCENE TEXT BBOXES ONLY FOR GOOGLE OCR
text_bboxes = np.asarray(self.text_bboxes[image_name])
if self.args.ocr == 'google_ocr':
text_bboxes_features = np.zeros((self.args.max_textual, 4))
if np.shape(text_bboxes)[0] == 0:
text_bboxes = np.zeros((1, 4))
elif np.shape(text_bboxes)[0] > self.args.max_textual:
text_bboxes = text_bboxes[0:self.args.max_textual]
text_bboxes_features[:len(text_bboxes)] = text_bboxes
else:
# NO BBOXES FOR OTHER OCRs
text_bboxes_features = np.zeros((self.args.max_textual, 4))
text_bboxes_features = torch.from_numpy(text_bboxes_features)
text_bboxes_features = text_bboxes_features.type(torch.FloatTensor)
# LOCAL VISUAL FEATURES
local_features_index = self.image_name2features_index[image_name]
local_features = self.local_feats[int(local_features_index)]
local_features = torch.from_numpy(local_features[:int(self.args.max_visual)][:])
local_features = local_features.type(torch.FloatTensor)
# LOCAL VISUAL BBOXES
local_bboxes_features = self.local_bboxes[int(local_features_index)]
local_bboxes_features = torch.from_numpy(local_bboxes_features[:int(self.args.max_visual)][:])
local_bboxes_features = local_bboxes_features.type(torch.FloatTensor)
return img, label, text_features, local_features, text_bboxes_features, local_bboxes_features, image_name
|
py | 1a31067bd02cd02548837e213163e0acd785d8c6 | from .voxel_set_abstraction import VoxelSetAbstraction
from .def_voxel_set_abstraction import DefVoxelSetAbstraction
__all__ = {
'VoxelSetAbstraction': VoxelSetAbstraction,
'DefVoxelSetAbstraction': DefVoxelSetAbstraction
}
|
py | 1a310776abf7875f377eb507b9ab3fb7f21f81c5 | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Bundle a context and its packages into a relocatable dir.
'''
from __future__ import print_function
import os
import os.path
import sys
def setup_parser(parser, completions=False):
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-s", "--skip-non-relocatable", action="store_true",
help="leave non-relocatable packages non-bundled, rather than raise an error")
group.add_argument(
"-f", "--force", action="store_true",
help="bundle package even if it isn't relocatable (use at your own risk)")
group.add_argument(
"-n", "--no-lib-patch", action="store_true",
help="don't apply library patching within the bundle")
parser.add_argument(
"RXT",
help="context to bundle")
parser.add_argument(
"DEST_DIR",
help="directory to create bundle in; must not exist")
def command(opts, parser, extra_arg_groups=None):
from rez.utils.logging_ import print_error
from rez.bundle_context import bundle_context
from rez.resolved_context import ResolvedContext
rxt_filepath = os.path.abspath(os.path.expanduser(opts.RXT))
dest_dir = os.path.abspath(os.path.expanduser(opts.DEST_DIR))
# sanity checks
if not os.path.exists(rxt_filepath):
print_error("File does not exist: %s", rxt_filepath)
sys.exit(1)
context = ResolvedContext.load(rxt_filepath)
bundle_context(
context=context,
dest_dir=dest_dir,
force=opts.force,
skip_non_relocatable=opts.skip_non_relocatable,
verbose=opts.verbose,
patch_libs=(not opts.no_lib_patch)
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.