ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a5054b196a7126b94e542b4fcd25a0ab4b9de7b | #!/usr/bin/env python
"""
plot_hub.py: the plot tool
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plt_fidelity_vs_iter(fidelities,losses,config,indx=0):
fig, (axs1, axs2) = plt.subplots(1, 2)
axs1.plot(range(len(fidelities)), fidelities)
axs1.set_xlabel('Epoch')
axs1.set_ylabel('Fidelity between real and fake states')
axs2.plot(range(len(losses)), losses)
axs2.set_xlabel('Epoch')
axs2.set_ylabel('Wasserstein Loss')
plt.tight_layout()
plt.savefig('{}/{}qubit_{}_{}.png'.format(config.figure_path,config.system_size, config.label, indx)) |
py | 1a5054f3d651d52b5975c1c39d164b59eaf1226d | # Generated by Django 2.2.9 on 2020-02-06 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20200206_0955'),
]
operations = [
migrations.AlterField(
model_name='uniforme',
name='categoria',
field=models.CharField(choices=[('MALHARIA', 'Peças Têxteis'), ('CALCADO', 'Calçados')], default='MALHARIA',
max_length=10),
),
]
|
py | 1a50555e2c8c0d9b867e19eda3d74a62fee31ac4 | # -*- coding: utf-8 -*-
import os
import sys
import numpy as np
IMAGE_SIZE = 64
#按照指定图像大小调整尺寸
def resize_image(image, height = IMAGE_SIZE, width = IMAGE_SIZE):
top, bottom, left, right = (0, 0, 0, 0)
#获取图像尺寸
h, w, _ = image.shape
#对于长宽不相等的图片,找到最长的一边
longest_edge = max(h, w)
#计算短边需要增加多上像素宽度使其与长边等长
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
BLACK = [0, 0, 0]
#给图像增加边界,是图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value = BLACK)
#调整图像大小并返回
return cv2.resize(constant, (height, width))
#读取训练数据
images = []
labels = []
def read_images(path_name):
for dir_item in os.listdir(path_name):
full_path = os.path.abspath(os.path.join(path_name, dir_item))
if os.path.isdir(full_path):
read_images(full_path)
else:
if dir_item.endswith('.jpg'):
print(full_path)
image = cv2.imread(full_path)
image = resize_image(image, IMAGE_SIZE, IMAGE_SIZE)
images.append(image)
labels.append(path_name)
return images,labels
#从指定路径读取训练数据
def load_dataset(path_name):
images,labels = read_images(path_name)
#将输入的所有图片转成四维数组,尺寸为(图片数量*IMAGE_SIZE*IMAGE_SIZE*3)
#图片为64 * 64像素,一个像素3个颜色值(RGB)
images = np.array(images)
labels = np.array([0 if label.endswith('yangwk') else 1 for label in labels])
return images, labels
if __name__ == '__main__':
path_name = './data/'
images, labels = load_dataset(path_name)
print(images.shape)
print(labels.shape) |
py | 1a50583c0e1e67341ae5ee4af878a1a190bd7eef | """
============================================================================
Decoding in time-frequency space data using the Common Spatial Pattern (CSP)
============================================================================
The time-frequency decomposition is estimated by iterating over raw data that
has been band-passed at different frequencies. This is used to compute a
covariance matrix over each epoch or a rolling time-window and extract the CSP
filtered signals. A linear discriminant classifier is then applied to these
signals.
"""
# Authors: Laura Gwilliams <[email protected]>
# Jean-Remi King <[email protected]>
# Alex Barachant <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, find_events, create_info
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
from mne.time_frequency import AverageTFR
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
###############################################################################
# Set parameters and read data
event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet
subject = 1
runs = [6, 10, 14]
raw_fnames = eegbci.load_data(subject, runs)
raw_files = [read_raw_edf(f, stim_channel='auto', preload=True)
for f in raw_fnames]
raw = concatenate_raws(raw_files)
# Extract information from the raw file
sfreq = raw.info['sfreq']
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads')
# Assemble the classifier using scikit-learn pipeline
clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False),
LinearDiscriminantAnalysis())
n_splits = 5 # how many folds to use for cross-validation
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
# Classification & Time-frequency parameters
tmin, tmax = -.200, 2.000
n_cycles = 10. # how many complete cycles: used to define window size
min_freq = 5.
max_freq = 25.
n_freqs = 8 # how many frequency bins to use
# Assemble list of frequency range tuples
freqs = np.linspace(min_freq, max_freq, n_freqs) # assemble frequencies
freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples
# Infer window spacing from the max freq and number of cycles to avoid gaps
window_spacing = (n_cycles / np.max(freqs) / 2.)
centered_w_times = np.arange(tmin, tmax, window_spacing)[1:]
n_windows = len(centered_w_times)
# Instantiate label encoder
le = LabelEncoder()
###############################################################################
# Loop through frequencies, apply classifier and save scores
# init scores
freq_scores = np.zeros((n_freqs - 1,))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
X = epochs.get_data()
# Save mean scores over folds for each frequency and time window
freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot frequency results
plt.bar(left=freqs[:-1], height=freq_scores, width=np.diff(freqs)[0],
align='edge', edgecolor='black')
plt.xticks(freqs)
plt.ylim([0, 1])
plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--',
label='chance level')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decoding Scores')
plt.title('Frequency Decoding Scores')
###############################################################################
# Loop through frequencies and time, apply classifier and save scores
# init scores
tf_scores = np.zeros((n_freqs - 1, n_windows))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
# Roll covariance, csp and lda over time
for t, w_time in enumerate(centered_w_times):
# Center the min and max of the window
w_tmin = w_time - w_size / 2.
w_tmax = w_time + w_size / 2.
# Crop data into time-window of interest
X = epochs.copy().crop(w_tmin, w_tmax).get_data()
# Save mean scores over folds for each frequency and time window
tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot time-frequency results
# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
centered_w_times, freqs[1:], 1)
chance = np.mean(y) # set chance level to white in the plot
av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores",
cmap=plt.cm.Reds)
|
py | 1a505852c76159452dfff8aede8d932bc3b6230f | """
Copyright 2019-present NAVER Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#-*- coding: utf-8 -*-
import os
import json
import math
import random
import argparse
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.optim as optim
import Levenshtein as Lev
import label_loader
from data_loader import AudioDataLoader, SpectrogramDataset, BucketingSampler
from models import EncoderRNN, DecoderRNN, Seq2Seq
# @Kwang-Ho
import time
import datetime
from initialize import initialize
char2index = dict()
index2char = dict()
SOS_token = 0
EOS_token = 0
PAD_token = 0
def label_to_string(labels):
if len(labels.shape) == 1:
sent = str()
for i in labels:
if i.item() == EOS_token:
break
sent += index2char[i.item()]
return sent
elif len(labels.shape) == 2:
sents = list()
for i in labels:
sent = str()
for j in i:
if j.item() == EOS_token:
break
sent += index2char[j.item()]
sents.append(sent)
return sents
def char_distance(ref, hyp):
ref = ref.replace(' ', '')
hyp = hyp.replace(' ', '')
dist = Lev.distance(hyp, ref)
length = len(ref.replace(' ', ''))
return dist, length
def get_distance(ref_labels, hyp_labels):
total_dist = 0
total_length = 0
transcripts = []
for i in range(len(ref_labels)):
ref = label_to_string(ref_labels[i])
hyp = label_to_string(hyp_labels[i])
transcripts.append('{hyp}\t{ref}'.format(hyp=hyp, ref=ref))
dist, length = char_distance(ref, hyp)
total_dist += dist
total_length += length
return total_dist, total_length, transcripts
def train(model, data_loader, criterion, optimizer, device, epoch, train_sampler, max_norm=400, teacher_forcing_ratio=1):
total_loss = 0.
total_num = 0
total_dist = 0
total_length = 0
total_sent_num = 0
model.train()
for i, (data) in enumerate(data_loader):
feats, scripts, feat_lengths, script_lengths = data
optimizer.zero_grad()
feats = feats.to(device)
scripts = scripts.to(device)
feat_lengths = feat_lengths.to(device)
src_len = scripts.size(1)
target = scripts[:, 1:]
logit = model(feats, feat_lengths, scripts, teacher_forcing_ratio=teacher_forcing_ratio)
logit = torch.stack(logit, dim=1).to(device)
y_hat = logit.max(-1)[1]
loss = criterion(logit.contiguous().view(-1, logit.size(-1)), target.contiguous().view(-1))
batch_size = logit.size(0)
loss = loss / batch_size
total_loss += loss.item()
total_num += sum(feat_lengths).item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
dist, length, _ = get_distance(target, y_hat)
total_dist += dist
total_length += length
cer = float(dist / length) * 100
total_sent_num += target.size(0)
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss:.4f}\t'
'Cer {cer:.4f}'.format(
(epoch + 1), (i + 1), len(train_sampler), loss=loss, cer=cer))
# return total_loss / total_num, (total_dist / total_length) * 100
return total_loss / len(data_loader), (total_dist / total_length) * 100
def evaluate(model, data_loader, criterion, device, save_output=False, teacher_forcing_ratio=0.0):
total_loss = 0.
total_num = 0
total_dist = 0
total_length = 0
total_sent_num = 0
transcripts_list = []
model.eval()
with torch.no_grad():
for i, (data) in tqdm(enumerate(data_loader), total=len(data_loader)):
feats, scripts, feat_lengths, script_lengths = data
feats = feats.to(device)
scripts = scripts.to(device)
feat_lengths = feat_lengths.to(device)
src_len = scripts.size(1)
target = scripts[:, 1:]
logit = model(feats, feat_lengths, scripts, teacher_forcing_ratio=teacher_forcing_ratio) # 3-th args: None
logit = torch.stack(logit, dim=1).to(device)
y_hat = logit.max(-1)[1]
logit = logit[:,:target.size(1),:] # cut over length to calculate loss
loss = criterion(logit.contiguous().view(-1, logit.size(-1)), target.contiguous().view(-1))
batch_size = logit.size(0)
loss = loss / batch_size
total_loss += loss.item()
total_num += sum(feat_lengths).item()
dist, length, transcripts = get_distance(target, y_hat)
cer = float(dist / length) * 100
total_dist += dist
total_length += length
if save_output == True:
transcripts_list += transcripts
total_sent_num += target.size(0)
# aver_loss = total_loss / total_num
aver_loss = total_loss / len(data_loader)
aver_cer = float(total_dist / total_length) * 100
return aver_loss, aver_cer, transcripts_list
def main():
global char2index
global index2char
global SOS_token
global EOS_token
global PAD_token
parser = argparse.ArgumentParser(description='LAS')
parser.add_argument('--model-name', type=str, default='LAS')
# Dataset
parser.add_argument('--train-file', type=str,
help='data list about train dataset', default='data/ClovaCall/train_ClovaCall.json')
parser.add_argument('--test-file-list', nargs='*',
help='data list about test dataset', default=['data/ClovaCall/test_ClovCall.json'])
parser.add_argument('--labels-path', default='data/kor_syllable.json', help='Contains large characters over korean')
parser.add_argument('--dataset-path', default='data/ClovaCall/clean', help='Target dataset path')
# Hyperparameters
parser.add_argument('--rnn-type', default='lstm', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--encoder_layers', type=int, default=3, help='number of layers of model (default: 3)')
parser.add_argument('--encoder_size', type=int, default=512, help='hidden size of model (default: 512)')
parser.add_argument('--decoder_layers', type=int, default=2, help='number of pyramidal layers (default: 2)')
parser.add_argument('--decoder_size', type=int, default=512, help='hidden size of model (default: 512)')
parser.add_argument('--dropout', type=float, default=0.3, help='Dropout rate in training (default: 0.3)')
parser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True, help='Turn off bi-directional RNNs, introduces lookahead convolution')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size in training (default: 32)')
parser.add_argument('--num_workers', type=int, default=4, help='Number of workers in dataset loader (default: 4)')
parser.add_argument('--num_gpu', type=int, default=1, help='Number of gpus (default: 1)')
parser.add_argument('--epochs', type=int, default=100, help='Number of max epochs in training (default: 100)')
parser.add_argument('--lr', type=float, default=3e-4, help='Learning rate (default: 3e-4)')
parser.add_argument('--learning-anneal', default=1.1, type=float, help='Annealing learning rate every epoch')
parser.add_argument('--teacher_forcing', type=float, default=1.0, help='Teacher forcing ratio in decoder (default: 1.0)')
parser.add_argument('--max_len', type=int, default=80, help='Maximum characters of sentence (default: 80)')
parser.add_argument('--max-norm', default=400, type=int, help='Norm cutoff to prevent explosion of gradients')
# Audio Config
parser.add_argument('--sample-rate', default=16000, type=int, help='Sampling Rate')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram')
parser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram')
# System
parser.add_argument('--save-folder', default='models', help='Location to save epoch models')
parser.add_argument('--model-path', default='models/las_final.pth', help='Location to save best validation model')
parser.add_argument('--log-path', default='log/', help='path to predict log about valid and test dataset')
parser.add_argument('--cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=123456, help='random seed (default: 123456)')
parser.add_argument('--mode', type=str, default='train', help='Train or Test')
parser.add_argument('--load-model', action='store_true', default=False, help='Load model')
parser.add_argument('--finetune', dest='finetune', action='store_true', default=False,
help='Finetune the model after load model')
args = parser.parse_args()
args.max_norm = 5.0
args.dropout = 0.0
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
char2index, index2char = label_loader.load_label_json(args.labels_path)
SOS_token = char2index['<s>']
EOS_token = char2index['</s>']
PAD_token = char2index['_']
device = torch.device('cuda' if args.cuda else 'cpu')
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride)
# Batch Size
batch_size = args.batch_size * args.num_gpu
print(">> Train dataset : ", args.train_file)
trainData_list = []
with open(args.train_file, 'r', encoding='utf-8') as f:
trainData_list = json.load(f)
if args.num_gpu != 1:
last_batch = len(trainData_list) % batch_size
if last_batch != 0 and last_batch < args.num_gpu:
trainData_list = trainData_list[:-last_batch]
train_dataset = SpectrogramDataset(audio_conf=audio_conf,
dataset_path=args.dataset_path,
data_list=trainData_list,
char2index=char2index, sos_id=SOS_token, eos_id=EOS_token,
normalize=True)
train_sampler = BucketingSampler(train_dataset, batch_size=batch_size)
train_loader = AudioDataLoader(train_dataset, num_workers=args.num_workers, batch_sampler=train_sampler)
print(">> Test dataset : ", args.test_file_list)
testLoader_dict = {}
for test_file in args.test_file_list:
testData_list = []
with open(test_file, 'r', encoding='utf-8') as f:
testData_list = json.load(f)
test_dataset = SpectrogramDataset(audio_conf=audio_conf,
dataset_path=args.dataset_path,
data_list=testData_list,
char2index=char2index, sos_id=SOS_token, eos_id=EOS_token,
normalize=True)
testLoader_dict[test_file] = AudioDataLoader(test_dataset, batch_size=1, num_workers=args.num_workers)
# input_size = int(math.floor((args.sample_rate * args.window_size) / 2) + 1)
input_size = 80
enc = EncoderRNN(input_size, args.encoder_size, n_layers=args.encoder_layers,
dropout_p=args.dropout, bidirectional=args.bidirectional,
rnn_cell=args.rnn_type, variable_lengths=False)
dec = DecoderRNN(len(char2index), args.max_len, args.decoder_size, args.encoder_size,
SOS_token, EOS_token, PAD_token,
n_layers=args.decoder_layers, rnn_cell=args.rnn_type,
dropout_p=args.dropout, bidirectional_encoder=args.bidirectional)
model = Seq2Seq(enc, dec)
initialize(model, init='xavier_uniform')
save_folder = args.save_folder
os.makedirs(save_folder, exist_ok=True)
optim_state = None
if args.load_model: # Starting from previous model
print("Loading checkpoint model %s" % args.model_path)
state = torch.load(args.model_path)
model.load_state_dict(state['model'])
print('Model loaded')
if not args.finetune: # Just load model
optim_state = state['optimizer']
model = model.to(device)
# optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
optimizer = optim.Adadelta(model.parameters(), lr=1.0, rho=0.95, eps=1e-08, weight_decay=0)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=1, verbose=True)
if optim_state is not None:
optimizer.load_state_dict(optim_state)
# criterion = nn.CrossEntropyLoss(reduction='mean').to(device)
criterion = nn.CrossEntropyLoss(reduction='sum').to(device) # ignore_index=PAD_token
print(model)
print("Number of parameters: %d" % Seq2Seq.get_param_size(model))
train_model = nn.DataParallel(model)
if args.mode != "train":
for test_file in args.test_file_list:
test_loader = testLoader_dict[test_file]
test_loss, test_cer, transcripts_list = evaluate(model, test_loader, criterion, device, save_output=True)
for idx, line in enumerate(transcripts_list):
# print(line)
hyp, ref = line.split('\t')
print("({:3d}/{:3d}) [REF]: {}".format(idx+1, len(transcripts_list), ref))
print("({:3d}/{:3d}) [HYP]: {}".format(idx+1, len(transcripts_list), hyp))
print()
print("Test {} CER : {}".format(test_file, test_cer))
else:
best_cer = 1e10
begin_epoch = 0
# start_time = time.time()
start_time = datetime.datetime.now()
for epoch in range(begin_epoch, args.epochs):
train_loss, train_cer = train(train_model, train_loader, criterion, optimizer, device, epoch, train_sampler, args.max_norm, args.teacher_forcing)
# end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = datetime.datetime.now() - start_time
train_log = 'Train({name}) Summary Epoch: [{0}]\tAverage Loss {loss:.3f}\tAverage CER {cer:.3f}\tTime {time:}'.format(epoch + 1, name='train', loss=train_loss, cer=train_cer, time=elapsed_time)
print(train_log)
cer_list = []
for test_file in args.test_file_list:
test_loader = testLoader_dict[test_file]
test_loss_tf, test_cer_tf, _ = evaluate(model, test_loader, criterion, device, save_output=False, teacher_forcing_ratio=1.0)
test_log = '(TF=1.0) Test({name}) Summary Epoch: [{0}]\tAverage Loss {loss:.3f}\tAverage CER {cer:.3f}\t'.format(
epoch + 1, name=test_file, loss=test_loss_tf, cer=test_cer_tf)
print(test_log)
test_loss, test_cer, _ = evaluate(model, test_loader, criterion, device, save_output=False, teacher_forcing_ratio=0.0)
test_log = '(TF=0.0) Test({name}) Summary Epoch: [{0}]\tAverage Loss {loss:.3f}\tAverage CER {cer:.3f}\t'.format(
epoch + 1, name=test_file, loss=test_loss, cer=test_cer)
print(test_log)
cer_list.append(test_cer)
if best_cer > cer_list[0]:
print("Found better validated model, saving to %s" % args.model_path)
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict()
}
torch.save(state, args.model_path)
best_cer = cer_list[0]
print("Shuffling batches...")
train_sampler.shuffle(epoch)
scheduler.step(float(test_loss_tf))
# print('Learning rate annealed to: {lr:.6f}'.format(lr=scheduler.get_lr()))
# for g in optimizer.param_groups:
# g['lr'] = g['lr'] / args.learning_anneal
# print('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr']))
if __name__ == "__main__":
main()
|
py | 1a5058be428f46409c27374ac8a8ac0c57117fbb | """
Create a class to measure the average time lapsed between mark() calls
This is useful to measure how frequent is the price update (i.e. we call mark() method on every price update)
"""
import time
import random
class LatencyMetric:
def __init__(self):
self._last_received_timestamp = time.time_ns()
self._max_duration = 0
self._sum = 0
self._count = 0
def mark(self):
# calculate time lapsed
ts = time.time_ns()
duration = ts - self._last_received_timestamp
self._last_received_timestamp = ts
self._sum += duration
self._count += 1
if duration > self._max_duration:
self._max_duration = duration
def get_max(self) -> int:
return self._max_duration
def get_mean(self) -> int:
""" get mean in milliseconds """
return self._sum / self._count / 1000000
# A simple driver class to demonstrate the usage
if __name__ == '__main__':
metric = LatencyMetric()
while True:
# a random time between 0.9 and 1.1 seconds
random_duration = float(random.randint(90, 110)) / 100.0
time.sleep(random_duration)
metric.mark()
# we expect to print an average time of ~1 second
print('Average: {}, max: {}'.format(metric.get_mean(), metric.get_max()))
|
py | 1a50594bdaa755ffc98cb9dd240762611c173211 | #
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from PySide2 import QtWidgets, QtGui, QtCore
import opentimelineio as otio
class Details(QtWidgets.QTextEdit):
"""Text widget with the JSON string of the specified OTIO object."""
def __init__(self, *args, **kwargs):
super(Details, self).__init__(*args, **kwargs)
self.setReadOnly(True)
self.font = QtGui.QFontDatabase.systemFont(
QtGui.QFontDatabase.FixedFont)
self.font.setPointSize(12)
self.setFont(self.font)
self.backgroundColor = QtGui.QColor(33, 33, 33)
self.textColor = QtGui.QColor(180, 180, 180)
self.highlightColor = QtGui.QColor(255, 198, 109)
self.keywordColor = QtGui.QColor(204, 120, 50)
self.palette = QtGui.QPalette()
self.palette.setColor(QtGui.QPalette.Base, self.backgroundColor)
self.palette.setColor(QtGui.QPalette.Text, self.textColor)
self.palette.setColor(QtGui.QPalette.BrightText, self.highlightColor)
self.palette.setColor(QtGui.QPalette.Link, self.keywordColor)
self.setPalette(self.palette)
self.highlighter = OTIOSyntaxHighlighter(self.palette, self.document())
def set_item(self, item):
if item is None:
self.setPlainText('')
else:
s = otio.adapters.write_to_string(item, 'otio_json')
self.setPlainText(s)
class OTIOSyntaxHighlighter(QtGui.QSyntaxHighlighter):
def __init__(self, palette, parent=None):
super(OTIOSyntaxHighlighter, self).__init__(parent)
self.punctuation_format = QtGui.QTextCharFormat()
self.punctuation_format.setForeground(palette.link())
self.punctuation_format.setFontWeight(QtGui.QFont.Bold)
self.key_format = QtGui.QTextCharFormat()
# self.key_format.setFontItalic(True)
self.literal_format = QtGui.QTextCharFormat()
self.literal_format.setForeground(palette.brightText())
self.literal_format.setFontWeight(QtGui.QFont.Bold)
self.value_format = QtGui.QTextCharFormat()
self.value_format.setForeground(palette.brightText())
self.value_format.setFontWeight(QtGui.QFont.Bold)
self.schema_format = QtGui.QTextCharFormat()
self.schema_format.setForeground(QtGui.QColor(161, 194, 97))
self.schema_format.setFontWeight(QtGui.QFont.Bold)
def highlightBlock(self, text):
expression = QtCore.QRegExp("(\\{|\\}|\\[|\\]|\\:|\\,)")
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, self.punctuation_format)
index = expression.indexIn(text, index + length)
text.replace("\\\"", " ")
expression = QtCore.QRegExp("\".*\" *\\:")
expression.setMinimal(True)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length - 1, self.key_format)
index = expression.indexIn(text, index + length)
expression = QtCore.QRegExp("\\: *\".*\"")
expression.setMinimal(True)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
firstQuoteIndex = text.index('"', index)
valueLength = length - (firstQuoteIndex - index) - 2
self.setFormat(firstQuoteIndex + 1, valueLength, self.value_format)
index = expression.indexIn(text, index + length)
expression = QtCore.QRegExp(r"\\: (null|true|false|[0-9\.]+)")
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, self.literal_format)
index = expression.indexIn(text, index + length)
expression = QtCore.QRegExp(r"\"OTIO_SCHEMA\"\s*:\s*\".*\"")
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, self.schema_format)
index = expression.indexIn(text, index + length)
|
py | 1a505ca95a45d4fd64f27e6753ef81bffcac18de | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from copy import copy
import itertools
import math
import os
import pytest
import random
import shutil
import tempfile
import time
from subprocess import check_call
from tests.common.test_dimensions import create_exec_option_dimension_from_dict
from tests.common.impala_test_suite import ImpalaTestSuite, LOG
from tests.util.filesystem_utils import WAREHOUSE, get_fs_path
from tests.util.test_file_parser import QueryTestSectionReader
# Random fuzz testing of HDFS scanners. Existing tables for any HDFS file format
# are corrupted in random ways to flush out bugs with handling of corrupted data.
class TestScannersFuzzing(ImpalaTestSuite):
# Use abort_on_error = False to ensure we scan all the files.
ABORT_ON_ERROR_VALUES = [False]
# Only run on all nodes - num_nodes=1 would not provide additional coverage.
NUM_NODES_VALUES = [0]
# Limit memory to avoid causing other concurrent tests to fail.
MEM_LIMITS = ['512m']
# Test the codegen and non-codegen paths.
DISABLE_CODEGEN_VALUES = [True, False]
# Test a range of batch sizes to exercise different corner cases.
BATCH_SIZES = [0, 1, 16, 10000]
# Test with denial of reservations at varying frequency. This will affect the number
# of scanner threads that can be spun up.
DEBUG_ACTION_VALUES = [None,
'-1:OPEN:[email protected]',
'-1:OPEN:[email protected]']
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannersFuzzing, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension_from_dict({
'abort_on_error' : cls.ABORT_ON_ERROR_VALUES,
'num_nodes' : cls.NUM_NODES_VALUES,
'mem_limit' : cls.MEM_LIMITS,
'debug_action' : cls.DEBUG_ACTION_VALUES}))
# TODO: enable for more table formats once they consistently pass the fuzz test.
# TODO(IMPALA-6772): enable for ORC formats once a new version after release-1.4.3
# of ORC library is released.
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format in ('avro', 'parquet') or
(v.get_value('table_format').file_format == 'text' and
v.get_value('table_format').compression_codec in ('none', 'lzo')))
def test_fuzz_alltypes(self, vector, unique_database):
table_format = vector.get_value('table_format')
src_db = QueryTestSectionReader.get_db_name(table_format)
table_name = "alltypes"
self.run_fuzz_test(vector, src_db, table_name, unique_database, table_name)
def test_fuzz_decimal_tbl(self, vector, unique_database):
table_format = vector.get_value('table_format')
table_name = "decimal_tbl"
if table_format.file_format == 'avro':
table_name = "avro_decimal_tbl"
if table_format.compression_codec != 'snap' or \
table_format.compression_type != 'block':
pytest.skip()
elif table_format.file_format == 'rc' or \
table_format.file_format == 'seq':
pytest.skip()
elif table_format.file_format == 'text' and \
table_format.compression_codec != 'none':
# decimal_tbl is not present for these file formats
pytest.skip()
src_db = QueryTestSectionReader.get_db_name(table_format)
self.run_fuzz_test(vector, src_db, table_name, unique_database, table_name, 10)
def test_fuzz_nested_types(self, vector, unique_database):
table_format = vector.get_value('table_format')
table_name = "complextypestbl"
src_db = QueryTestSectionReader.get_db_name(table_format)
if table_format.file_format != 'parquet': pytest.skip()
self.run_fuzz_test(vector, src_db, table_name, unique_database, table_name, 10)
def test_fuzz_uncompressed_parquet(self, vector, unique_database):
"""Parquet tables in default schema are compressed, so in order
to do the fuzz_test on an uncompressed parquet table, this test
clones from an existing parquet table into a new table with
no compression.
"""
table_format = vector.get_value('table_format')
if vector.get_value('table_format').compression_codec != 'none': pytest.skip()
if table_format.file_format != 'parquet': pytest.skip()
"""Even when the compression_codec is none, the default compression type is snappy
so compression codec is changed explicitly to be none.
"""
self.execute_query("set compression_codec=none")
tbl_list = ["alltypes", "decimal_tbl"]
for orig_tbl_name in tbl_list:
src_table_name = "parquet_uncomp_src_" + orig_tbl_name
fuzz_table_name = "parquet_uncomp_dst_" + orig_tbl_name
fq_tbl_name = unique_database + "." + src_table_name
create_tbl = ("create table {0} stored as parquet as select * from"
" functional_parquet.{1}".format(fq_tbl_name, orig_tbl_name))
self.execute_query(create_tbl)
self.run_fuzz_test(vector, unique_database, src_table_name, unique_database,
fuzz_table_name, 10)
# TODO: add test coverage for additional data types like char and varchar
def run_fuzz_test(self, vector, src_db, src_table, fuzz_db, fuzz_table, num_copies=1):
""" Do some basic fuzz testing: create a copy of an existing table with randomly
corrupted files and make sure that we don't crash or behave in an unexpected way.
'unique_database' is used for the table, so it will be cleaned up automatically.
If 'num_copies' is set, create that many corrupted copies of each input file.
SCANNER_FUZZ_SEED can be set in the environment to reproduce the result (assuming that
input files are the same).
SCANNER_FUZZ_KEEP_FILES can be set in the environment to keep the generated files.
"""
# Create and seed a new random number generator for reproducibility.
rng = random.Random()
random_seed = os.environ.get("SCANNER_FUZZ_SEED") or time.time()
LOG.info("Using random seed %d", random_seed)
rng.seed(long(random_seed))
tmp_table_dir = tempfile.mkdtemp(prefix="tmp-scanner-fuzz-%s" % fuzz_table,
dir=os.path.join(os.environ['IMPALA_HOME'], "testdata"))
self.execute_query("create table %s.%s like %s.%s" % (fuzz_db, fuzz_table,
src_db, src_table))
fuzz_table_location = get_fs_path("/test-warehouse/{0}.db/{1}".format(
fuzz_db, fuzz_table))
LOG.info("Generating corrupted version of %s in %s. Local working directory is %s",
fuzz_table, fuzz_db, tmp_table_dir)
# Find the location of the existing table and get the full table directory structure.
fq_table_name = src_db + "." + src_table
table_loc = self._get_table_location(fq_table_name, vector)
check_call(['hdfs', 'dfs', '-copyToLocal', table_loc + "/*", tmp_table_dir])
partitions = self.walk_and_corrupt_table_data(tmp_table_dir, num_copies, rng)
for partition in partitions:
self.execute_query('alter table {0}.{1} add partition ({2})'.format(
fuzz_db, fuzz_table, ','.join(partition)))
# Copy all of the local files and directories to hdfs.
to_copy = ["%s/%s" % (tmp_table_dir, file_or_dir)
for file_or_dir in os.listdir(tmp_table_dir)]
self.filesystem_client.copy_from_local(to_copy, fuzz_table_location)
if "SCANNER_FUZZ_KEEP_FILES" not in os.environ:
shutil.rmtree(tmp_table_dir)
# Querying the corrupted files should not DCHECK or crash.
self.execute_query("refresh %s.%s" % (fuzz_db, fuzz_table))
# Execute a query that tries to read all the columns and rows in the file.
# Also execute a count(*) that materializes no columns, since different code
# paths are exercised.
queries = [
'select count(*) from (select distinct * from {0}.{1}) q'.format(
fuzz_db, fuzz_table),
'select count(*) from {0}.{1} q'.format(fuzz_db, fuzz_table)]
for query, batch_size, disable_codegen in \
itertools.product(queries, self.BATCH_SIZES, self.DISABLE_CODEGEN_VALUES):
query_options = copy(vector.get_value('exec_option'))
query_options['batch_size'] = batch_size
query_options['disable_codegen'] = disable_codegen
query_options['disable_codegen_rows_threshold'] = 0
try:
result = self.execute_query(query, query_options = query_options)
LOG.info('\n'.join(result.log))
except Exception as e:
if 'memory limit exceeded' in str(e).lower():
# Memory limit error should fail query.
continue
msg = "Should not throw error when abort_on_error=0: '{0}'".format(e)
LOG.error(msg)
# Parquet and compressed text can fail the query for some parse errors.
# E.g. corrupt Parquet footer (IMPALA-3773) or a corrupt LZO index file
# (IMPALA-4013).
table_format = vector.get_value('table_format')
if table_format.file_format != 'parquet' \
and not (table_format.file_format == 'text' and \
table_format.compression_codec != 'none') \
and not table_format.file_format == 'rc' \
and not table_format.file_format == 'seq':
raise
def walk_and_corrupt_table_data(self, tmp_table_dir, num_copies, rng):
""" Walks a local copy of a HDFS table directory. Returns a list of partitions, each
as a list of "key=val" pairs. Ensures there is 'num_copies' copies of each file,
and corrupts each of the copies.
"""
partitions = []
# Iterate over the partitions and files we downloaded.
for subdir, dirs, files in os.walk(tmp_table_dir):
if '_impala_insert_staging' in subdir: continue
if len(dirs) != 0: continue # Skip non-leaf directories
rel_subdir = os.path.relpath(subdir, tmp_table_dir)
if rel_subdir != ".":
# Create metadata for any directory partitions.
partitions.append(self.partitions_from_path(rel_subdir))
# Corrupt all of the files that we find.
for filename in files:
filepath = os.path.join(subdir, filename)
copies = [filepath]
for copy_num in range(1, num_copies):
copypath = os.path.join(subdir, "copy{0}_{1}".format(copy_num, filename))
shutil.copyfile(filepath, copypath)
copies.append(copypath)
for filepath in copies:
self.corrupt_file(filepath, rng)
return partitions
def partitions_from_path(self, relpath):
""" Return a list of "key=val" parts from partitions inferred from the directory path.
"""
reversed_partitions = []
while relpath != '':
relpath, suffix = os.path.split(relpath)
reversed_partitions.append(suffix)
return reversed(reversed_partitions)
def corrupt_file(self, path, rng):
""" Corrupt the file at 'path' in the local file system in a randomised way using the
random number generator 'rng'. Rewrites the file in-place.
Logs a message to describe how the file was corrupted, so the error is reproducible.
"""
with open(path, "rb") as f:
data = bytearray(f.read())
num_corruptions = rng.randint(0, int(math.log(len(data))))
for _ in xrange(num_corruptions):
flip_offset = rng.randint(0, len(data) - 1)
flip_val = rng.randint(0, 255)
LOG.info("corrupt file: Flip byte in {0} at {1} from {2} to {3}".format(
path, flip_offset, data[flip_offset], flip_val))
data[flip_offset] = flip_val
if rng.random() < 0.4:
truncation = rng.randint(0, len(data))
LOG.info("corrupt file: Truncate {0} to {1}".format(path, truncation))
data = data[:truncation]
with open(path, "wb") as f:
f.write(data)
|
py | 1a505cae0d0cb34d4705ea0719adabfeeea23a48 | import os
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import gym
import numpy as np
from stable_baselines3.common import base_class
from stable_baselines3.common.callbacks import EvalCallback, BaseCallback
from stable_baselines3.common.vec_env import VecEnv, sync_envs_normalization
from controller.helpers.logging import merge_dicts, log_dict, get_done_or_dones
# we're adapting stable_baseline's eval function to also return averaged info dict
def evaluate_policy_with_info(
model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 10,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[Dict[str, Any], Dict[str, Any]], None]] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
This is made to work only with one env.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment. In the case of a ``VecEnv``
this must contain only one environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episde lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
is_monitor_wrapped = False
# Avoid circular import
from stable_baselines3.common.env_util import is_wrapped
from stable_baselines3.common.monitor import Monitor
if isinstance(env, VecEnv):
assert env.num_envs == 1, "You must pass only one environment when using this function"
is_monitor_wrapped = env.env_is_wrapped(Monitor)[0]
else:
is_monitor_wrapped = is_wrapped(env, Monitor)
if not is_monitor_wrapped and warn:
warnings.warn(
"Evaluation environment is not wrapped with a ``Monitor`` wrapper. "
"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. "
"Consider wrapping environment first with ``Monitor`` wrapper.",
UserWarning,
)
episode_rewards, episode_lengths = [], []
not_reseted = True
all_infos = {}
while len(episode_rewards) < n_eval_episodes:
# Number of loops here might differ from true episodes
# played, if underlying wrappers modify episode lengths.
# Avoid double reset, as VecEnv are reset automatically.
if not isinstance(env, VecEnv) or not_reseted:
obs = env.reset()
not_reseted = False
done, state = False, None
episode_reward = 0.0
episode_length = 0
while not done:
action, state = model.predict(obs, state=state, deterministic=deterministic)
obs, reward, done, info = env.step(action)
episode_reward += reward
if callback is not None:
callback(locals(), globals())
episode_length += 1
if render:
env.render()
info = info[0] # access dict within list
all_infos = merge_dicts(info, all_infos)
if is_monitor_wrapped:
# Do not trust "done" with episode endings.
# Remove vecenv stacking (if any)
if isinstance(env, VecEnv):
info = info[0]
if "episode" in info.keys():
# Monitor wrapper includes "episode" key in info if environment
# has been wrapped with it. Use those rewards instead.
episode_rewards.append(info["episode"]["r"])
episode_lengths.append(info["episode"]["l"])
else:
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: " f"{mean_reward:.2f} < {reward_threshold:.2f}"
if return_episode_rewards:
return episode_rewards, episode_lengths, all_infos
return mean_reward, std_reward, all_infos
class EvalCallbackWithInfo(EvalCallback):
def __init__(
self,
eval_env: Union[gym.Env, VecEnv],
callback_on_new_best: Optional[BaseCallback] = None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: str = None,
best_model_save_path: str = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1,
warn: bool = True,
exclude_infos_from_logging=["terminal_observation"],
eval_at_init=False,
eval_after_episode=True,
):
super(EvalCallbackWithInfo, self).__init__(
eval_env, callback_on_new_best, n_eval_episodes, eval_freq, log_path, best_model_save_path, deterministic, render, verbose, warn
)
self.exclude_infos_from_logging = exclude_infos_from_logging
self.eval_at_init = eval_at_init
self.eval_after_episode = eval_after_episode
self.epside_counter = 0
def _init_callback(self) -> None:
# Does not work in some corner cases, where the wrapper is not the same
if not isinstance(self.training_env, type(self.eval_env)):
warnings.warn("Training and eval env are not of the same type" f"{self.training_env} != {self.eval_env}")
# Create folders if needed
if self.best_model_save_path is not None:
os.makedirs(self.best_model_save_path, exist_ok=True)
if self.log_path is not None:
os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
# test performance right at the beginning to see how well random policy does
if self.eval_at_init:
self.eval_with_info()
def eval_with_info(self):
# Sync training and eval env if there is VecNormalize
sync_envs_normalization(self.training_env, self.eval_env)
# Reset success rate buffer
self._is_success_buffer = []
episode_rewards, episode_lengths, all_infos = evaluate_policy_with_info(
self.model,
self.eval_env,
n_eval_episodes=self.n_eval_episodes,
render=self.render,
deterministic=self.deterministic,
return_episode_rewards=True,
warn=self.warn,
callback=self._log_success_callback,
)
if self.log_path is not None:
self.evaluations_timesteps.append(self.num_timesteps)
self.evaluations_results.append(episode_rewards)
self.evaluations_length.append(episode_lengths)
kwargs = {}
# Save success log if present
if len(self._is_success_buffer) > 0:
self.evaluations_successes.append(self._is_success_buffer)
kwargs = dict(successes=self.evaluations_successes)
np.savez(
self.log_path,
timesteps=self.evaluations_timesteps,
results=self.evaluations_results,
ep_lengths=self.evaluations_length,
**kwargs,
)
mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)
mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)
self.last_mean_reward = mean_reward
if self.verbose > 0:
print(f"Eval num_timesteps={self.num_timesteps}, " f"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
# log mean infos from evaluation runs
log_dict(all_infos, self.logger, "eval/mean_", "mean", self.exclude_infos_from_logging)
# Add to current Logger
self.logger.record("eval/mean_reward", float(mean_reward))
self.logger.record("eval/mean_ep_length", mean_ep_length)
if len(self._is_success_buffer) > 0:
success_rate = np.mean(self._is_success_buffer)
if self.verbose > 0:
print(f"Success rate: {100 * success_rate:.2f}%")
self.logger.record("eval/success_rate", success_rate)
if mean_reward > self.best_mean_reward:
if self.verbose > 0:
print("New best mean reward!")
if self.best_model_save_path is not None:
self.model.save(os.path.join(self.best_model_save_path, "best_model"))
self.best_mean_reward = mean_reward
# Trigger callback if needed
if self.callback is not None:
return self._on_event()
def _on_step(self) -> bool:
if get_done_or_dones(self):
self.epside_counter += 1
eval_after_step = self.n_calls % self.eval_freq == 0 and not self.eval_after_episode
eval_after_episode = self.epside_counter % self.eval_freq == 0 and self.eval_after_episode
is_final_step = self.num_timesteps == self.model._total_timesteps
if self.eval_freq > 0 and (eval_after_step or eval_after_episode or is_final_step):
self.eval_with_info()
return True
|
py | 1a505d0fb0ea265556e4247bea3d4c9c21449508 | # -*- coding: utf-8 -*-
countries = {
"ad" : "Andorra",
"ae" : "the United Arab Emirates",
"af" : "Afghanistan",
"ag" : "Antigua and Barbuda",
"ai" : "Anguilla",
"al" : "Albania",
"am" : "Armenia",
"an" : "the Netherlands Antilles",
"ao" : "Angola",
"aq" : "Antarctica",
"ar" : "Argentina",
"as" : "American Samoa",
"at" : "Austria",
"au" : "Australia",
"aw" : "Aruba",
"ax" : "the Aland Islands",
"az" : "Azerbaijan",
"ba" : "Bosnia and Herzegovina",
"bb" : "Barbados",
"bd" : "Bangladesh",
"be" : "Belgium",
"bf" : "Burkina Faso",
"bg" : "Bulgaria",
"bh" : "Bahrain",
"bi" : "Burundi",
"bj" : "Benin",
"bl" : "Saint Bartelemey",
"bm" : "Bermuda",
"bn" : "Brunei",
"bo" : "Bolivia",
"bq" : "Bonaire, Sint Eustatius and Saba",
"br" : "Brazil",
"bs" : "the Bahamas",
"bt" : "Bhutan",
"bv" : "the Bouvet Island",
"bw" : "Botswana",
"by" : "Belarus",
"bz" : "Belize",
"ca" : "Canada",
"cc" : "the Cocos (Keeling) Islands",
"cd" : "the Democratic Republic of the Congo",
"cf" : "Central African Republic",
"cg" : "Congo",
"ch" : "Switzerland",
"ci" : u"Côte d'Ivoire",
"ck" : "the Cook Islands",
"cl" : "Chile",
"cm" : "Cameroon",
"cn" : "China",
"co" : "Colombia",
"cr" : "Costa Rica",
"cu" : "Cuba",
"cv" : "Cape Verde",
"cw" : u"Curaçao",
"cx" : "the Christmas Island",
"cy" : "Cyprus",
"cz" : "the Czech Republic",
"de" : "Germany",
"dj" : "Djibouti",
"dk" : "Denmark",
"dm" : "Dominica",
"do" : "the Dominican Republic",
"dz" : "Algeria",
"ec" : "Ecuador",
"ee" : "Estonia",
"eg" : "Egypt",
"eh" : "the Western Sahara",
"er" : "Eritrea",
"es" : "Spain",
"et" : "Ethiopia",
"fi" : "Finland",
"fj" : "Fiji",
"fk" : "the Falkland Islands (Malvinas)",
"fm" : "the Federated States of Micronesia",
"fo" : "the Faroe Islands",
"fr" : "France",
"ga" : "Gabon",
"gb" : "the United Kingdom",
"gd" : "Grenada",
"ge" : "Georgia",
"gf" : "French Guiana",
"gg" : "Guernsey",
"gh" : "Ghana",
"gi" : "Gibraltar",
"gl" : "Greenland",
"gm" : "Gambia",
"gn" : "Guinea",
"gp" : "Guadeloupe",
"gq" : "Equatorial Guinea",
"gr" : "Greece",
"gs" : "South Georgia and the South Sandwich Islands",
"gt" : "Guatemala",
"gu" : "Guam",
"gw" : "Guinea-Bissau",
"gy" : "Guyana",
"hk" : "Hong Kong",
"hm" : "Heard Island and McDonald Islands",
"hn" : "Honduras",
"hr" : "Croatia",
"ht" : "Haiti",
"hu" : "Hungary",
"id" : "Indonesia",
"ie" : "Ireland",
"il" : "Israel",
"im" : "the Isle of Man",
"in" : "India",
"io" : "the British Indian Ocean Territory",
"iq" : "Iraq",
"ir" : "Iran",
"is" : "Iceland",
"it" : "Italy",
"je" : "Jersey",
"jm" : "Jamaica",
"jo" : "Jordan",
"jp" : "Japan",
"ke" : "Kenya",
"kg" : "Kyrgyzstan",
"kh" : "Cambodia",
"ki" : "Kiribati",
"km" : "Comoros",
"kn" : "Saint Kitts and Nevis",
"kp" : "North Korea",
"kr" : "the Republic of Korea",
"kw" : "Kuwait",
"ky" : "the Cayman Islands",
"kz" : "Kazakhstan",
"la" : "Laos",
"lb" : "Lebanon",
"lc" : "Saint Lucia",
"li" : "Liechtenstein",
"lk" : "Sri Lanka",
"lr" : "Liberia",
"ls" : "Lesotho",
"lt" : "Lithuania",
"lu" : "Luxembourg",
"lv" : "Latvia",
"ly" : "Libya",
"ma" : "Morocco",
"mc" : "Monaco",
"md" : "the Republic of Moldova",
"me" : "Montenegro",
"mf" : "Saint Martin",
"mg" : "Madagascar",
"mh" : "the Marshall Islands",
"mk" : "Macedonia",
"ml" : "Mali",
"mm" : "Burma",
"mn" : "Mongolia",
"mo" : "Macau",
"mp" : "the Northern Mariana Islands",
"mq" : "Martinique",
"mr" : "Mauritania",
"ms" : "Montserrat",
"mt" : "Malta",
"mu" : "Mauritius",
"mv" : "the Maldives",
"mw" : "Malawi",
"mx" : "Mexico",
"my" : "Malaysia",
"mz" : "Mozambique",
"na" : "Namibia",
"nc" : "New Caledonia",
"ne" : "Niger",
"nf" : "Norfolk Island",
"ng" : "Nigeria",
"ni" : "Nicaragua",
"nl" : "the Netherlands",
"no" : "Norway",
"np" : "Nepal",
"nr" : "Nauru",
"nu" : "Niue",
"nz" : "New Zealand",
"om" : "Oman",
"pa" : "Panama",
"pe" : "Peru",
"pf" : "French Polynesia",
"pg" : "Papua New Guinea",
"ph" : "the Philippines",
"pk" : "Pakistan",
"pl" : "Poland",
"pm" : "Saint Pierre and Miquelon",
"pn" : "the Pitcairn Islands",
"pr" : "Puerto Rico",
"ps" : "the Palestinian Territory",
"pt" : "Portugal",
"pw" : "Palau",
"py" : "Paraguay",
"qa" : "Qatar",
"re" : "Reunion",
"ro" : "Romania",
"rs" : "Serbia",
"ru" : "Russia",
"rw" : "Rwanda",
"sa" : "Saudi Arabia",
"sb" : "the Solomon Islands",
"sc" : "the Seychelles",
"sd" : "Sudan",
"se" : "Sweden",
"sg" : "Singapore",
"sh" : "Saint Helena",
"si" : "Slovenia",
"sj" : "Svalbard and Jan Mayen",
"sk" : "Slovakia",
"sl" : "Sierra Leone",
"sm" : "San Marino",
"sn" : "Senegal",
"so" : "Somalia",
"sr" : "Suriname",
"ss" : "South Sudan",
"st" : u"São Tomé and Príncipe",
"sv" : "El Salvador",
"sx" : "Sint Maarten",
"sy" : "the Syrian Arab Republic",
"sz" : "Swaziland",
"tc" : "Turks and Caicos Islands",
"td" : "Chad",
"tf" : "the French Southern Territories",
"tg" : "Togo",
"th" : "Thailand",
"tj" : "Tajikistan",
"tk" : "Tokelau",
"tl" : "East Timor",
"tm" : "Turkmenistan",
"tn" : "Tunisia",
"to" : "Tonga",
"tr" : "Turkey",
"tt" : "Trinidad and Tobago",
"tv" : "Tuvalu",
"tw" : "Taiwan",
"tz" : "the United Republic of Tanzania",
"ua" : "Ukraine",
"ug" : "Uganda",
"um" : "the United States Minor Outlying Islands",
"us" : "the United States",
"uy" : "Uruguay",
"uz" : "Uzbekistan",
"va" : "Vatican City",
"vc" : "Saint Vincent and the Grenadines",
"ve" : "Venezuela",
"vg" : "the British Virgin Islands",
"vi" : "the United States Virgin Islands",
"vn" : "Vietnam",
"vu" : "Vanuatu",
"wf" : "Wallis and Futuna",
"ws" : "Samoa",
"xk" : "Kosovo",
"ye" : "Yemen",
"yt" : "Mayotte",
"za" : "South Africa",
"zm" : "Zambia",
"zw" : "Zimbabwe"
}
|
py | 1a505dbc4efaac7afe57495cfced1e161405e8c3 | """
======================================================================
A demo of structured Ward hierarchical clustering on an image of coins
======================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
# %%
# Generate data
# -------------
from skimage.data import coins
orig_coins = coins()
# %%
# Resize it to 20% of the original size to speed up the processing
# Applying a Gaussian filter for smoothing prior to down-scaling
# reduces aliasing artifacts.
import numpy as np
from scipy.ndimage import gaussian_filter
from skimage.transform import rescale
smoothened_coins = gaussian_filter(orig_coins, sigma=2)
rescaled_coins = rescale(
smoothened_coins,
0.2,
mode="reflect",
anti_aliasing=False,
)
X = np.reshape(rescaled_coins, (-1, 1))
# %%
# Define structure of the data
# ----------------------------
#
# Pixels are connected to their neighbors.
from sklearn.feature_extraction.image import grid_to_graph
connectivity = grid_to_graph(*rescaled_coins.shape)
# %%
# Compute clustering
# ------------------
import time as time
from sklearn.cluster import AgglomerativeClustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 27 # number of regions
ward = AgglomerativeClustering(
n_clusters=n_clusters, linkage="ward", connectivity=connectivity
)
ward.fit(X)
label = np.reshape(ward.labels_, rescaled_coins.shape)
print(f"Elapsed time: {time.time() - st:.3f}s")
print(f"Number of pixels: {label.size}")
print(f"Number of clusters: {np.unique(label).size}")
# %%
# Plot the results on an image
# ----------------------------
#
# Agglomerative clustering is able to segment each coin however, we have had to
# use a ``n_cluster`` larger than the number of coins because the segmentation
# is finding a large in the background.
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 5))
plt.imshow(rescaled_coins, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(
label == l,
colors=[
plt.cm.nipy_spectral(l / float(n_clusters)),
],
)
plt.axis("off")
plt.show()
|
py | 1a505dd13422337ccecbea02563994d94ddc2c8b | from Tkinter import *
class Test(Frame):
def printit(self):
print(self.hi_there["command"])
def createWidgets(self):
# a hello button
self.QUIT = Button(self, text='QUIT', foreground='red',
command=self.quit)
self.QUIT.pack(side=LEFT, fill=BOTH)
self.hi_there = Button(self, text='Hello',
command=self.printit)
self.hi_there.pack(side=LEFT)
# note how Packer defaults to side=TOP
self.guy2 = Button(self, text='button 2')
self.guy2.pack()
self.guy3 = Button(self, text='button 3')
self.guy3.pack()
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
test = Test()
test.mainloop()
|
py | 1a505e2aa95532698fca8f145c2246dd34d6d4f5 | # -*- coding: utf-8 -*-
import time
from common.base_test import BaseTest
from project import INIT0_PK, INIT1_PK, INIT2_PK, INIT3_PK, INIT4_PK
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, not_equal_to
SUITE = {
"description": "Operation 'committee_member_deactivate'"
}
@lcc.prop("main", "type")
@lcc.tags("operations", "committee_member_operations", "committee_member_deactivate")
@lcc.suite("Check work of operation 'committee_member_deactivate'", rank=1)
class CommitteeMemberDeactivate(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.init0 = None
self.init1 = None
self.init2 = None
self.init3 = None
self.init4 = None
def setup_suite(self):
super().setup_suite()
self._connect_to_ethereum()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.committee_members_info = self.get_active_committee_members_info(self.__database_api_identifier)
self.init0 = self.committee_members_info[0]["account_id"]
self.init1 = self.committee_members_info[1]["account_id"]
self.init2 = self.committee_members_info[2]["account_id"]
self.init3 = self.committee_members_info[3]["account_id"]
self.init4 = self.committee_members_info[4]["account_id"]
lcc.log_info(
"Echo initial accounts: {}, {}, {}, {}, {}".format(
self.init0, self.init1, self.init2, self.init3, self.init4
)
)
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of operation 'committee_member_deactivate'")
@lcc.depends_on("Operations.CommitteeMember.CommitteeMemberActivate.CommitteeMemberActivate.method_main_check")
def method_main_check(self):
operation = self.echo_ops.get_committee_member_deactivate_operation(
echo=self.echo,
committee_member_account=self.init0,
committee_to_deactivate=self.committee_members_info[-1]["committee_id"],
signer=INIT0_PK
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
lcc.log_info("Collected successfully")
lcc.set_step("Make proposal of deactivating new account")
operation = self.echo_ops.get_proposal_create_operation(
echo=self.echo,
fee_paying_account=self.init0,
proposed_ops=collected_operation,
expiration_time=self.get_expiration_time(15),
review_period_seconds=10,
signer=INIT0_PK
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=operation)
if not self.is_operation_completed(broadcast_result, expected_static_variant=1):
raise Exception("Operation 'proposal_created' failed while broadcast")
proposal_id = broadcast_result["trx"]["operation_results"][0][1]
lcc.set_step("Make voting of deactivating new account")
operation = self.echo_ops.get_proposal_update_operation(
echo=self.echo,
fee_paying_account=self.init0,
proposal=proposal_id,
active_approvals_to_add=[self.init0, self.init1, self.init2, self.init3, self.init4],
active_approvals_to_remove=[],
key_approvals_to_add=[],
key_approvals_to_remove=[],
signer=[INIT0_PK, INIT1_PK, INIT2_PK, INIT3_PK, INIT4_PK]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
if not self.is_operation_completed(broadcast_result, expected_static_variant=0):
raise Exception("Operation 'proposal_update' failed while broadcast")
lcc.log_info("All committee member has voted")
lcc.set_step(
"Waiting for maintenance and release of two blocks and check that new committee member were deactivated"
)
self.produce_block(self.__database_api_identifier)
time.sleep(15)
self.produce_block(self.__database_api_identifier)
check_that(
"acitve committee member",
self.committee_members_info[-1]["account_id"],
not_equal_to(self.get_active_committee_members_info(self.__database_api_identifier)[-1]["account_id"]),
quiet=True
)
|
py | 1a50602a71266d11227582a7cbd30ef665d0ebd9 | # Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import dataent
import os
import unittest
from dataent.utils.file_manager import save_file, get_file, get_files_path
test_content1 = 'Hello'
test_content2 = 'Hello World'
def make_test_doc():
d = dataent.new_doc('ToDo')
d.description = 'Test'
d.save()
return d.doctype, d.name
class TestSimpleFile(unittest.TestCase):
def setUp(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content = test_content1
self.saved_file = save_file('hello.txt', self.test_content, self.attached_to_doctype, self.attached_to_docname)
self.saved_filename = get_files_path(self.saved_file.file_name)
def test_save(self):
filename, content = get_file(self.saved_file.name)
self.assertEqual(content, self.test_content)
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestSameFileName(unittest.TestCase):
def setUp(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content1 = test_content1
self.test_content2 = test_content2
self.saved_file1 = save_file('hello.txt', self.test_content1, self.attached_to_doctype, self.attached_to_docname)
self.saved_file2 = save_file('hello.txt', self.test_content2, self.attached_to_doctype, self.attached_to_docname)
self.saved_filename1 = get_files_path(self.saved_file1.file_name)
self.saved_filename2 = get_files_path(self.saved_file2.file_name)
def test_saved_content(self):
filename1, content1 = get_file(self.saved_file1.name)
self.assertEqual(content1, self.test_content1)
filename2, content2 = get_file(self.saved_file2.name)
self.assertEqual(content2, self.test_content2)
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestSameContent(unittest.TestCase):
def setUp(self):
self.attached_to_doctype1, self.attached_to_docname1 = make_test_doc()
self.attached_to_doctype2, self.attached_to_docname2 = make_test_doc()
self.test_content1 = test_content1
self.test_content2 = test_content1
self.orig_filename = 'hello.txt'
self.dup_filename = 'hello2.txt'
self.saved_file1 = save_file(self.orig_filename, self.test_content1, self.attached_to_doctype1, self.attached_to_docname1)
self.saved_file2 = save_file(self.dup_filename, self.test_content2, self.attached_to_doctype2, self.attached_to_docname2)
self.saved_filename1 = get_files_path(self.saved_file1.file_name)
self.saved_filename2 = get_files_path(self.saved_file2.file_name)
def test_saved_content(self):
filename1, content1 = get_file(self.saved_file1.name)
filename2, content2 = get_file(self.saved_file2.name)
self.assertEqual(filename1, filename2)
self.assertFalse(os.path.exists(get_files_path(self.dup_filename)))
def tearDown(self):
# File gets deleted on rollback, so blank
pass
|
py | 1a5061fa8eaa75165019221d64224e6c9602ed66 | #!/usr/bin/env python
def sum_even_fib(limit):
fib_list = [1, 2]
even_fib_list = [2]
next_fib = fib_list[-1] + fib_list[-2]
while(next_fib <= limit):
next_fib = fib_list[-1] + fib_list[-2]
fib_list.append(next_fib)
if next_fib % 2 == 0:
even_fib_list.append(next_fib)
return sum(even_fib_list)
print sum_even_fib(4*10**6)
def sum_even_fib(limit):
even_fib_list = [2]
last_fib = 2
sec_last_fib = 1
next_fib = last_fib + sec_last_fib
while(next_fib <= limit):
next_fib = last_fib + sec_last_fib
sec_last_fib = last_fib
last_fib = next_fib
if last_fib % 2 == 0:
even_fib_list.append(last_fib)
return sum(even_fib_list)
print sum_even_fib(4*10**6)
|
py | 1a5062db634952b09bf2921c99cf52cef5d60fb5 | from flaskapp.models import Question
from test.main.base_classes import BaseUnit
from test.main.utils import test_post_request
class AddQuestionTestCase(BaseUnit):
def test_add_sub_question(self):
# Test valid data
new_question = dict(
question="Is it okay?",
mark=8,
difficulty="Easy",
cognitive_level="Application",
imp=True,
submit="submit",
)
_, question = test_post_request(self,
"/course/1/unit/1/question/sub/new/",
new_question, Question, 1)
# Testing if repr method is working
self.assertEqual(
str(question),
"Question(Is it okay?, 8, Easy, Application, sub, True)",
)
# Test invalid data
new_question = dict(
question="Isn't it okay?",
mark=None,
imp=False,
difficulty="Easy",
cognitive_level="Application",
submit="submit",
)
self.assertRaises(
AttributeError,
test_post_request,
self,
"/course/1/unit/1/question/sub/new/",
new_question,
Question,
2,
)
def test_add_mcq_question(self):
# test valid data
new_mcq = dict(
question="Rate it",
mark=8,
difficulty="Easy",
cognitive_level="Application",
imp=None,
option1="10",
option2="9",
option3="8",
option4="7",
)
_, mcq = test_post_request(self, "/course/1/unit/1/question/mcq/new/",
new_mcq, Question, 1)
# test repr method
self.assertEqual(
str(mcq),
"Question(Rate it, 8, Easy, Application, mcq, False)",
)
# test invalid data
new_mcq = dict(
question=None,
mark=8,
difficulty="Easy",
cognitive_level="Application",
imp=True,
submit="submit",
option1="A",
option2="B",
option3="C",
option4="D",
)
self.assertRaises(
AttributeError,
test_post_request,
self,
"/course/1/unit/1/question/mcq/new/",
new_mcq,
Question,
2,
)
|
py | 1a5063640af479e08d6762eb2ca4cd99fc16093d | import keras.metrics
import tensorflow as tf
def weighted_crossentropy(y_true, y_pred):
class_weights = tf.constant([[[[1., 1., 10.]]]])
unweighted_losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=y_pred)
weights = tf.reduce_sum(class_weights * y_true, axis=-1)
weighted_losses = weights * unweighted_losses
loss = tf.reduce_mean(weighted_losses)
return loss
|
py | 1a5063a23abd1c9c30c96e9eee1a5d3823383a83 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
from scipy.integrate._ivp.ivp import OdeResult
import matplotlib.pyplot as plt
plt.style.use('seaborn')
def solve_ivp(fun, t_span, y0, t_eval=None, dt=0.01):
t0, tf = float(t_span[0]), float(t_span[-1])
if t_eval is not None:
assert t0 == t_eval[0]
assert tf == t_eval[-1]
# these variables are only needed if t_eval is not None
i = 1
tp = t0
yp = y0
t = t0
y = y0
ts = [t]
ys = [y]
while t < tf :
y = y + dt*fun(t,y)
t = t + dt
if t_eval is not None:
while i < len(t_eval) and t >= t_eval[i]:
if t == t_eval[i]:
ts.append(t)
ys.append(y)
i += 1
elif t > t_eval[i]:
yint = yp + (t_eval[i]-tp)*(y-yp)/(t-tp)
ts.append(t_eval[i])
ys.append(yint)
i += 1
tp = t
yp = y
else:
ts.append(t)
ys.append(y)
ts = np.hstack(ts)
ys = np.vstack(ys).T
return OdeResult(t=ts, y=ys)
if __name__ == "__main__":
# stability region for Euler forward for this problem is h<2/50=0.04
@np.vectorize
def func(t,y):
return -50*y
# t_span = (0,1)
# y0 = np.array([1,1])
#
# sol = solve_ivp(func, t_span, y0 )
#
# plt.figure()
# plt.plot(sol.t, sol.y)
t_eval = np.linspace(0,1,10)
y0 = np.array([1])
sol = solve_ivp(func, [t_eval[0], t_eval[-1]], y0, t_eval=t_eval)
|
py | 1a5063bab8a95246ce4b824f868d12a78e67227f | #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re
import string
from test.helper import FakeYDL
from yt_dlp.extractor import YoutubeIE
from yt_dlp.compat import compat_str, compat_urlretrieve
_TESTS = [
(
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
86,
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
85,
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
90,
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
84,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
84,
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
83,
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
'312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12',
'112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3',
)
]
class TestPlayerInfo(unittest.TestCase):
def test_youtube_extract_player_info(self):
PLAYER_URLS = (
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'),
# obsolete
('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
('https://www.youtube.com/yts/jsbin/player_ias-vflCPQUIL/en_US/base.js', 'vflCPQUIL'),
('https://www.youtube.com/yts/jsbin/player-vflzQZbt7/en_US/base.js', 'vflzQZbt7'),
('https://www.youtube.com/yts/jsbin/player-en_US-vflaxXRn1/base.js', 'vflaxXRn1'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
)
for player_url, expected_player_id in PLAYER_URLS:
player_id = YoutubeIE._extract_player_info(player_url)
self.assertEqual(player_id, expected_player_id)
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
if not os.path.exists(self.TESTDATA_DIR):
os.mkdir(self.TESTDATA_DIR)
def make_tfunc(url, sig_input, expected_sig):
m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
assert m, '%r should follow URL format' % url
test_id = m.group(1)
def test_func(self):
basename = 'player-%s.js' % test_id
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
compat_urlretrieve(url, fn)
ydl = FakeYDL()
ie = YoutubeIE(ydl)
with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read()
func = ie._parse_sig_js(jscode)
src_sig = (
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
got_sig = func(src_sig)
self.assertEqual(got_sig, expected_sig)
test_func.__name__ = str('test_signature_js_' + test_id)
setattr(TestSignature, test_func.__name__, test_func)
for test_spec in _TESTS:
make_tfunc(*test_spec)
if __name__ == '__main__':
unittest.main()
|
py | 1a50644e69a39683c380f6120f5e6d4a42979907 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import time
import warnings
from oauthlib.common import generate_token
from oauthlib.oauth2.rfc6749 import tokens
from oauthlib.oauth2.rfc6749.errors import (InsecureTransportError,
TokenExpiredError)
from oauthlib.oauth2.rfc6749.parameters import (parse_token_response,
prepare_token_request,
prepare_token_revocation_request)
from oauthlib.oauth2.rfc6749.utils import is_secure_transport
AUTH_HEADER = 'auth_header'
URI_QUERY = 'query'
BODY = 'body'
FORM_ENC_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded'
}
class Client(object):
"""Base OAuth2 client responsible for access token management.
This class also acts as a generic interface providing methods common to all
client types such as ``prepare_authorization_request`` and
``prepare_token_revocation_request``. The ``prepare_x_request`` methods are
the recommended way of interacting with clients (as opposed to the abstract
prepare uri/body/etc methods). They are recommended over the older set
because they are easier to use (more consistent) and add a few additional
security checks, such as HTTPS and state checking.
Some of these methods require further implementation only provided by the
specific purpose clients such as
:py:class:`oauthlib.oauth2.MobileApplicationClient` and thus you should always
seek to use the client class matching the OAuth workflow you need. For
Python, this is usually :py:class:`oauthlib.oauth2.WebApplicationClient`.
"""
refresh_token_key = 'refresh_token'
def __init__(self, client_id,
default_token_placement=AUTH_HEADER,
token_type='Bearer',
access_token=None,
refresh_token=None,
mac_key=None,
mac_algorithm=None,
token=None,
scope=None,
state=None,
redirect_url=None,
state_generator=generate_token,
**kwargs):
"""Initialize a client with commonly used attributes.
:param client_id: Client identifier given by the OAuth provider upon
registration.
:param default_token_placement: Tokens can be supplied in the Authorization
header (default), the URL query component (``query``) or the request
body (``body``).
:param token_type: OAuth 2 token type. Defaults to Bearer. Change this
if you specify the ``access_token`` parameter and know it is of a
different token type, such as a MAC, JWT or SAML token. Can
also be supplied as ``token_type`` inside the ``token`` dict parameter.
:param access_token: An access token (string) used to authenticate
requests to protected resources. Can also be supplied inside the
``token`` dict parameter.
:param refresh_token: A refresh token (string) used to refresh expired
tokens. Can also be supplied inside the ``token`` dict parameter.
:param mac_key: Encryption key used with MAC tokens.
:param mac_algorithm: Hashing algorithm for MAC tokens.
:param token: A dict of token attributes such as ``access_token``,
``token_type`` and ``expires_at``.
:param scope: A list of default scopes to request authorization for.
:param state: A CSRF protection string used during authorization.
:param redirect_url: The redirection endpoint on the client side to which
the user returns after authorization.
:param state_generator: A no argument state generation callable. Defaults
to :py:meth:`oauthlib.common.generate_token`.
"""
self.client_id = client_id
self.default_token_placement = default_token_placement
self.token_type = token_type
self.access_token = access_token
self.refresh_token = refresh_token
self.mac_key = mac_key
self.mac_algorithm = mac_algorithm
self.token = token or {}
self.scope = scope
self.state_generator = state_generator
self.state = state
self.redirect_url = redirect_url
self.code = None
self.expires_in = None
self._expires_at = None
self.populate_token_attributes(self.token)
@property
def token_types(self):
"""Supported token types and their respective methods
Additional tokens can be supported by extending this dictionary.
The Bearer token spec is stable and safe to use.
The MAC token spec is not yet stable and support for MAC tokens
is experimental and currently matching version 00 of the spec.
"""
return {
'Bearer': self._add_bearer_token,
'MAC': self._add_mac_token
}
def prepare_request_uri(self, *args, **kwargs):
"""Abstract method used to create request URIs."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def prepare_request_body(self, *args, **kwargs):
"""Abstract method used to create request bodies."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def parse_request_uri_response(self, *args, **kwargs):
"""Abstract method used to parse redirection responses."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
"""Add token to the request uri, body or authorization header.
The access token type provides the client with the information
required to successfully utilize the access token to make a protected
resource request (along with type-specific attributes). The client
MUST NOT use an access token if it does not understand the token
type.
For example, the "bearer" token type defined in
[`I-D.ietf-oauth-v2-bearer`_] is utilized by simply including the access
token string in the request:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: Bearer mF_9.B5f-4.1JqM
while the "mac" token type defined in [`I-D.ietf-oauth-v2-http-mac`_] is
utilized by issuing a MAC key together with the access token which is
used to sign certain components of the HTTP requests:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: MAC id="h480djs93hd8",
nonce="274312:dj83hs9s",
mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
.. _`I-D.ietf-oauth-v2-bearer`: https://tools.ietf.org/html/rfc6749#section-12.2
.. _`I-D.ietf-oauth-v2-http-mac`: https://tools.ietf.org/html/rfc6749#section-12.2
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
token_placement = token_placement or self.default_token_placement
case_insensitive_token_types = dict(
(k.lower(), v) for k, v in self.token_types.items())
if not self.token_type.lower() in case_insensitive_token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not (self.access_token or self.token.get('access_token')):
raise ValueError("Missing access token.")
if self._expires_at and self._expires_at < time.time():
raise TokenExpiredError()
return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
headers, token_placement, **kwargs)
def prepare_authorization_request(self, authorization_url, state=None,
redirect_url=None, scope=None, **kwargs):
"""Prepare the authorization request.
This is the first step in many OAuth flows in which the user is
redirected to a certain authorization URL. This method adds
required parameters to the authorization URL.
:param authorization_url: Provider authorization endpoint URL.
:param state: CSRF protection string. Will be automatically created if
not provided. The generated state is available via the ``state``
attribute. Clients should verify that the state is unchanged and
present in the authorization response. This verification is done
automatically if using the ``authorization_response`` parameter
with ``prepare_token_request``.
:param redirect_url: Redirect URL to which the user will be returned
after authorization. Must be provided unless previously setup with
the provider. If provided then it must also be provided in the
token request.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(authorization_url):
raise InsecureTransportError()
self.state = state or self.state_generator()
self.redirect_url = redirect_url or self.redirect_url
self.scope = scope or self.scope
auth_url = self.prepare_request_uri(
authorization_url, redirect_uri=self.redirect_url,
scope=self.scope, state=self.state, **kwargs)
return auth_url, FORM_ENC_HEADERS, ''
def prepare_token_request(self, token_url, authorization_response=None,
redirect_url=None, state=None, body='', **kwargs):
"""Prepare a token creation request.
Note that these requests usually require client authentication, either
by including client_id or a set of provider specific authentication
credentials.
:param token_url: Provider token creation endpoint URL.
:param authorization_response: The full redirection URL string, i.e.
the location to which the user was redirected after successfull
authorization. Used to mine credentials needed to obtain a token
in this step, such as authorization code.
:param redirect_url: The redirect_url supplied with the authorization
request (if there was one).
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
state = state or self.state
if authorization_response:
self.parse_request_uri_response(
authorization_response, state=state)
self.redirect_url = redirect_url or self.redirect_url
body = self.prepare_request_body(body=body,
redirect_uri=self.redirect_url, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_refresh_token_request(self, token_url, refresh_token=None,
body='', scope=None, **kwargs):
"""Prepare an access token refresh request.
Expired access tokens can be replaced by new access tokens without
going through the OAuth dance if the client obtained a refresh token.
This refresh token and authentication credentials can be used to
obtain a new access token, and possibly a new refresh token.
:param token_url: Provider token refresh endpoint URL.
:param refresh_token: Refresh token string.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: List of scopes to request. Must be equal to
or a subset of the scopes granted when obtaining the refresh
token.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
self.scope = scope or self.scope
body = self.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_token_revocation_request(self, revocation_url, token,
token_type_hint="access_token", body='', callback=None, **kwargs):
"""Prepare a token revocation request.
:param revocation_url: Provider token revocation endpoint URL.
:param token: The access or refresh token to be revoked (string).
:param token_type_hint: ``"access_token"`` (default) or
``"refresh_token"``. This is optional and if you wish to not pass it you
must provide ``token_type_hint=None``.
:param callback: A jsonp callback such as ``package.callback`` to be invoked
upon receiving the response. Not that it should not include a () suffix.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
Note that JSONP request may use GET requests as the parameters will
be added to the request URL query as opposed to the request body.
An example of a revocation request
.. code-block: http
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
An example of a jsonp revocation request
.. code-block: http
GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
and an error response
.. code-block: http
package.myCallback({"error":"unsupported_token_type"});
Note that these requests usually require client credentials, client_id in
the case for public clients and provider specific authentication
credentials for confidential clients.
"""
if not is_secure_transport(revocation_url):
raise InsecureTransportError()
return prepare_token_revocation_request(revocation_url, token,
token_type_hint=token_type_hint, body=body, callback=callback,
**kwargs)
def parse_request_body_response(self, body, scope=None, **kwargs):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
self.token = parse_token_response(body, scope=scope)
self.populate_token_attributes(self.token)
return self.token
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
"""
refresh_token = refresh_token or self.refresh_token
return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
refresh_token=refresh_token, **kwargs)
def _add_bearer_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=None):
"""Add a bearer token to the request uri, body or authorization header."""
if token_placement == AUTH_HEADER:
headers = tokens.prepare_bearer_headers(self.access_token, headers)
elif token_placement == URI_QUERY:
uri = tokens.prepare_bearer_uri(self.access_token, uri)
elif token_placement == BODY:
body = tokens.prepare_bearer_body(self.access_token, body)
else:
raise ValueError("Invalid token placement.")
return uri, headers, body
def _add_mac_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
"""Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable.
"""
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body
def _populate_attributes(self, response):
warnings.warn("Please switch to the public method "
"populate_token_attributes.", DeprecationWarning)
return self.populate_token_attributes(response)
def populate_code_attributes(self, response):
"""Add attributes from an auth code response to self."""
if 'code' in response:
self.code = response.get('code')
def populate_token_attributes(self, response):
"""Add attributes from a token exchange response to self."""
if 'access_token' in response:
self.access_token = response.get('access_token')
if 'refresh_token' in response:
self.refresh_token = response.get('refresh_token')
if 'token_type' in response:
self.token_type = response.get('token_type')
if 'expires_in' in response:
self.expires_in = response.get('expires_in')
self._expires_at = time.time() + int(self.expires_in)
if 'expires_at' in response:
self._expires_at = int(response.get('expires_at'))
if 'mac_key' in response:
self.mac_key = response.get('mac_key')
if 'mac_algorithm' in response:
self.mac_algorithm = response.get('mac_algorithm')
|
py | 1a50646bd81f469e039bc65dbaee2423d2bd8d4f | import numpy as np
Y=np.array([[0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [0,1], [1,0], [1,0], [1,0], [1,0], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [1,0], [1,0], [1,0], [1,0], [1,0], [1,0], [0,1], [1,0], [1,0], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1], [0,1], [0,1], [0,1], [1,0], [0,1]])
X = np.array([[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 3]])
|
py | 1a5064e8f07a0ae8af6f1d8fdd42461dbb265210 | from typing import TYPE_CHECKING, List
from django.conf import settings
from saleor.plugins.base_plugin import BasePlugin, ConfigurationTypeField
from . import (
GatewayConfig,
authorize,
capture,
get_client_token,
list_client_sources,
process_payment,
refund,
void,
)
GATEWAY_NAME = "Braintree"
if TYPE_CHECKING:
# flake8: noqa
from . import GatewayResponse, PaymentData, TokenConfig
from ...interface import CustomerSource
def require_active_plugin(fn):
def wrapped(self, *args, **kwargs):
previous = kwargs.get("previous_value", None)
if not self.active:
return previous
return fn(self, *args, **kwargs)
return wrapped
class BraintreeGatewayPlugin(BasePlugin):
PLUGIN_ID = "mirumee.payments.braintree"
PLUGIN_NAME = GATEWAY_NAME
DEFAULT_ACTIVE = settings.BRAINTREE_PLUGIN_ACTIVE
DEFAULT_CONFIGURATION = [
{"name": "Public API key", "value": settings.BRAINTREE_PUBLIC_KEY},
{"name": "Secret API key", "value": settings.BRAINTREE_PRIVATE_KEY},
{"name": "Use sandbox", "value": settings.BRAINTREE_SANDBOX_MODE},
{"name": "Merchant ID", "value": settings.BRAINTREE_MERCHANT_ID},
{"name": "Store customers card", "value": False},
{"name": "Automatic payment capture", "value": True},
{"name": "Require 3D secure", "value": False},
]
CONFIG_STRUCTURE = {
"Public API key": {
"type": ConfigurationTypeField.SECRET,
"help_text": "Provide Braintree public API key",
"label": "Public API key",
},
"Secret API key": {
"type": ConfigurationTypeField.SECRET,
"help_text": "Provide Braintree secret API key",
"label": "Secret API key",
},
"Merchant ID": {
"type": ConfigurationTypeField.SECRET,
"help_text": "Provide Braintree merchant ID",
"label": "Merchant ID",
},
"Use sandbox": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should use Braintree sandbox API.",
"label": "Use sandbox",
},
"Store customers card": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should store cards on payments"
" in Braintree customer.",
"label": "Store customers card",
},
"Automatic payment capture": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should automaticaly capture payments.",
"label": "Automatic payment capture",
},
"Require 3D secure": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should enforce 3D secure during payment.",
"label": "Require 3D secure",
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
configuration = {item["name"]: item["value"] for item in self.configuration}
self.config = GatewayConfig(
gateway_name=GATEWAY_NAME,
auto_capture=configuration["Automatic payment capture"],
connection_params={
"sandbox_mode": configuration["Use sandbox"],
"merchant_id": configuration["Merchant ID"],
"public_key": configuration["Public API key"],
"private_key": configuration["Secret API key"],
},
store_customer=configuration["Store customers card"],
require_3d_secure=configuration["Require 3D secure"],
)
def _get_gateway_config(self) -> GatewayConfig:
return self.config
@require_active_plugin
def authorize_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return authorize(payment_information, self._get_gateway_config())
@require_active_plugin
def capture_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return capture(payment_information, self._get_gateway_config())
@require_active_plugin
def refund_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return refund(payment_information, self._get_gateway_config())
@require_active_plugin
def void_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return void(payment_information, self._get_gateway_config())
@require_active_plugin
def process_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return process_payment(payment_information, self._get_gateway_config())
@require_active_plugin
def list_payment_sources(
self, customer_id: str, previous_value
) -> List["CustomerSource"]:
sources = list_client_sources(self._get_gateway_config(), customer_id)
previous_value.extend(sources)
return previous_value
@require_active_plugin
def get_client_token(self, token_config: "TokenConfig", previous_value):
return get_client_token(self._get_gateway_config(), token_config)
@require_active_plugin
def get_payment_config(self, previous_value):
config = self._get_gateway_config()
return [
{"field": "store_customer_card", "value": config.store_customer},
{"field": "client_token", "value": get_client_token(config=config)},
]
|
py | 1a5065d3b071def220ee7a6e2bedd7dd45b8a5c4 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ======================================================================================================================== #
# Project : Natural Language Recommendation #
# Version : 0.1.0 #
# File : \test_logfile.py #
# Language : Python 3.7.11 #
# ------------------------------------------------------------------------------------------------------------------------ #
# Author : John James #
# Company : nov8.ai #
# Email : [email protected] #
# URL : https://github.com/john-james-sf/nlr #
# ------------------------------------------------------------------------------------------------------------------------ #
# Created : Monday, November 8th 2021, 12:47:01 pm #
# Modified : Monday, November 8th 2021, 12:57:22 pm #
# Modifier : John James ([email protected]) #
# ------------------------------------------------------------------------------------------------------------------------ #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2021 nov8.ai #
# ======================================================================================================================== #
# %%
import os
import pytest
import logging
import inspect
from configparser import ConfigParser
from nlr.utils.loggers import LogFile
from nlr.setup import configfile
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class LogFileTests:
def test_get_logfile(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
lf = LogFile()
logname = 'root'
level = 'warning'
key = logname.lower() + '_' + level.lower()
logfilepath_exp = 'logs/root_warning.log'
logfilepath_act = lf.get_logfile(logname, level)
# Confirm correct logfilepath
config = ConfigParser()
config.read(configfile)
assert config['LOGGING'][key], "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
if __name__ == "__main__":
t = LogFileTests()
t.test_get_logfile()
# %%
|
py | 1a50662e99b1bf373709a3ff4cb4845b48d70de2 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import inspect
import sys
from math import trunc
def get_locale(name):
"""Returns an appropriate :class:`Locale <arrow.locales.Locale>`
corresponding to an inpute locale name.
:param name: the name of the locale.
"""
locale_cls = _locales.get(name.lower())
if locale_cls is None:
raise ValueError("Unsupported locale '{}'".format(name))
return locale_cls()
# base locale type.
class Locale(object):
""" Represents locale-specific data and functionality. """
names = []
timeframes = {
"now": "",
"seconds": "",
"minute": "",
"minutes": "",
"hour": "",
"hours": "",
"day": "",
"days": "",
"week": "",
"weeks": "",
"month": "",
"months": "",
"year": "",
"years": "",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
past = None
future = None
month_names = []
month_abbreviations = []
day_names = []
day_abbreviations = []
ordinal_day_re = r"(\d+)"
def __init__(self):
self._month_name_to_ordinal = None
def describe(self, timeframe, delta=0, only_distance=False):
""" Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
humanized = self._format_timeframe(timeframe, delta)
if not only_distance:
humanized = self._format_relative(humanized, timeframe, delta)
return humanized
def day_name(self, day):
""" Returns the day name for a specified day of the week.
:param day: the ``int`` day of the week (1-7).
"""
return self.day_names[day]
def day_abbreviation(self, day):
""" Returns the day abbreviation for a specified day of the week.
:param day: the ``int`` day of the week (1-7).
"""
return self.day_abbreviations[day]
def month_name(self, month):
""" Returns the month name for a specified month of the year.
:param month: the ``int`` month of the year (1-12).
"""
return self.month_names[month]
def month_abbreviation(self, month):
""" Returns the month abbreviation for a specified month of the year.
:param month: the ``int`` month of the year (1-12).
"""
return self.month_abbreviations[month]
def month_number(self, name):
""" Returns the month number for a month specified by name or abbreviation.
:param name: the month name or abbreviation.
"""
if self._month_name_to_ordinal is None:
self._month_name_to_ordinal = self._name_to_ordinal(self.month_names)
self._month_name_to_ordinal.update(
self._name_to_ordinal(self.month_abbreviations)
)
return self._month_name_to_ordinal.get(name)
def year_full(self, year):
""" Returns the year for specific locale if available
:param name: the ``int`` year (4-digit)
"""
return "{:04d}".format(year)
def year_abbreviation(self, year):
""" Returns the year for specific locale if available
:param name: the ``int`` year (4-digit)
"""
return "{:04d}".format(year)[2:]
def meridian(self, hour, token):
""" Returns the meridian indicator for a specified hour and format token.
:param hour: the ``int`` hour of the day.
:param token: the format token.
"""
if token == "a":
return self.meridians["am"] if hour < 12 else self.meridians["pm"]
if token == "A":
return self.meridians["AM"] if hour < 12 else self.meridians["PM"]
def ordinal_number(self, n):
""" Returns the ordinal format of a given integer
:param n: an integer
"""
return self._ordinal_number(n)
def _ordinal_number(self, n):
return "{}".format(n)
def _name_to_ordinal(self, lst):
return dict(map(lambda i: (i[1].lower(), i[0] + 1), enumerate(lst[1:])))
def _format_timeframe(self, timeframe, delta):
return self.timeframes[timeframe].format(trunc(abs(delta)))
def _format_relative(self, humanized, timeframe, delta):
if timeframe == "now":
return humanized
direction = self.past if delta < 0 else self.future
return direction.format(humanized)
# base locale type implementations.
class EnglishLocale(Locale):
names = [
"en",
"en_us",
"en_gb",
"en_au",
"en_be",
"en_jp",
"en_za",
"en_ca",
"en_ph",
]
past = "{0} ago"
future = "in {0}"
timeframes = {
"now": "just now",
"seconds": "seconds",
"minute": "a minute",
"minutes": "{0} minutes",
"hour": "an hour",
"hours": "{0} hours",
"day": "a day",
"days": "{0} days",
"week": "a week",
"weeks": "{0} weeks",
"month": "a month",
"months": "{0} months",
"year": "a year",
"years": "{0} years",
}
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
month_names = [
"",
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
day_names = [
"",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
ordinal_day_re = r"((?P<value>[2-3]?1(?=st)|[2-3]?2(?=nd)|[2-3]?3(?=rd)|[1-3]?[04-9](?=th)|1[1-3](?=th))(st|nd|rd|th))"
def _ordinal_number(self, n):
if n % 100 not in (11, 12, 13):
remainder = abs(n) % 10
if remainder == 1:
return "{}st".format(n)
elif remainder == 2:
return "{}nd".format(n)
elif remainder == 3:
return "{}rd".format(n)
return "{}th".format(n)
def describe(self, timeframe, delta=0, only_distance=False):
""" Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
humanized = super(EnglishLocale, self).describe(timeframe, delta, only_distance)
if only_distance and timeframe == "now":
humanized = "instantly"
return humanized
class ItalianLocale(Locale):
names = ["it", "it_it"]
past = "{0} fa"
future = "tra {0}"
timeframes = {
"now": "adesso",
"seconds": "qualche secondo",
"minute": "un minuto",
"minutes": "{0} minuti",
"hour": "un'ora",
"hours": "{0} ore",
"day": "un giorno",
"days": "{0} giorni",
"month": "un mese",
"months": "{0} mesi",
"year": "un anno",
"years": "{0} anni",
}
month_names = [
"",
"gennaio",
"febbraio",
"marzo",
"aprile",
"maggio",
"giugno",
"luglio",
"agosto",
"settembre",
"ottobre",
"novembre",
"dicembre",
]
month_abbreviations = [
"",
"gen",
"feb",
"mar",
"apr",
"mag",
"giu",
"lug",
"ago",
"set",
"ott",
"nov",
"dic",
]
day_names = [
"",
"lunedì",
"martedì",
"mercoledì",
"giovedì",
"venerdì",
"sabato",
"domenica",
]
day_abbreviations = ["", "lun", "mar", "mer", "gio", "ven", "sab", "dom"]
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=[ºª]))[ºª])"
def _ordinal_number(self, n):
return "{}º".format(n)
class SpanishLocale(Locale):
names = ["es", "es_es"]
past = "hace {0}"
future = "en {0}"
timeframes = {
"now": "ahora",
"seconds": "segundos",
"minute": "un minuto",
"minutes": "{0} minutos",
"hour": "una hora",
"hours": "{0} horas",
"day": "un día",
"days": "{0} días",
"week": "una semana",
"weeks": "{0} semanas",
"month": "un mes",
"months": "{0} meses",
"year": "un año",
"years": "{0} años",
}
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
month_names = [
"",
"enero",
"febrero",
"marzo",
"abril",
"mayo",
"junio",
"julio",
"agosto",
"septiembre",
"octubre",
"noviembre",
"diciembre",
]
month_abbreviations = [
"",
"ene",
"feb",
"mar",
"abr",
"may",
"jun",
"jul",
"ago",
"sep",
"oct",
"nov",
"dic",
]
day_names = [
"",
"lunes",
"martes",
"miércoles",
"jueves",
"viernes",
"sábado",
"domingo",
]
day_abbreviations = ["", "lun", "mar", "mie", "jue", "vie", "sab", "dom"]
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=[ºª]))[ºª])"
def _ordinal_number(self, n):
return "{}º".format(n)
class FrenchLocale(Locale):
names = ["fr", "fr_fr"]
past = "il y a {0}"
future = "dans {0}"
timeframes = {
"now": "maintenant",
"seconds": "quelques secondes",
"minute": "une minute",
"minutes": "{0} minutes",
"hour": "une heure",
"hours": "{0} heures",
"day": "un jour",
"days": "{0} jours",
"week": "une semaine",
"weeks": "{0} semaines",
"month": "un mois",
"months": "{0} mois",
"year": "un an",
"years": "{0} ans",
}
month_names = [
"",
"janvier",
"février",
"mars",
"avril",
"mai",
"juin",
"juillet",
"août",
"septembre",
"octobre",
"novembre",
"décembre",
]
month_abbreviations = [
"",
"janv",
"févr",
"mars",
"avr",
"mai",
"juin",
"juil",
"août",
"sept",
"oct",
"nov",
"déc",
]
day_names = [
"",
"lundi",
"mardi",
"mercredi",
"jeudi",
"vendredi",
"samedi",
"dimanche",
]
day_abbreviations = ["", "lun", "mar", "mer", "jeu", "ven", "sam", "dim"]
ordinal_day_re = (
r"((?P<value>\b1(?=er\b)|[1-3]?[02-9](?=e\b)|[1-3]1(?=e\b))(er|e)\b)"
)
def _ordinal_number(self, n):
if abs(n) == 1:
return "{}er".format(n)
return "{}e".format(n)
class GreekLocale(Locale):
names = ["el", "el_gr"]
past = "{0} πριν"
future = "σε {0}"
timeframes = {
"now": "τώρα",
"seconds": "δευτερόλεπτα",
"minute": "ένα λεπτό",
"minutes": "{0} λεπτά",
"hour": "μία ώρα",
"hours": "{0} ώρες",
"day": "μία μέρα",
"days": "{0} μέρες",
"month": "ένα μήνα",
"months": "{0} μήνες",
"year": "ένα χρόνο",
"years": "{0} χρόνια",
}
month_names = [
"",
"Ιανουαρίου",
"Φεβρουαρίου",
"Μαρτίου",
"Απριλίου",
"Μαΐου",
"Ιουνίου",
"Ιουλίου",
"Αυγούστου",
"Σεπτεμβρίου",
"Οκτωβρίου",
"Νοεμβρίου",
"Δεκεμβρίου",
]
month_abbreviations = [
"",
"Ιαν",
"Φεβ",
"Μαρ",
"Απρ",
"Μαϊ",
"Ιον",
"Ιολ",
"Αυγ",
"Σεπ",
"Οκτ",
"Νοε",
"Δεκ",
]
day_names = [
"",
"Δευτέρα",
"Τρίτη",
"Τετάρτη",
"Πέμπτη",
"Παρασκευή",
"Σάββατο",
"Κυριακή",
]
day_abbreviations = ["", "Δευ", "Τρι", "Τετ", "Πεμ", "Παρ", "Σαβ", "Κυρ"]
class JapaneseLocale(Locale):
names = ["ja", "ja_jp"]
past = "{0}前"
future = "{0}後"
timeframes = {
"now": "現在",
"seconds": "数秒",
"minute": "1分",
"minutes": "{0}分",
"hour": "1時間",
"hours": "{0}時間",
"day": "1日",
"days": "{0}日",
"week": "1週間",
"weeks": "{0}週間",
"month": "1ヶ月",
"months": "{0}ヶ月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日", "日曜日"]
day_abbreviations = ["", "月", "火", "水", "木", "金", "土", "日"]
class SwedishLocale(Locale):
names = ["sv", "sv_se"]
past = "för {0} sen"
future = "om {0}"
timeframes = {
"now": "just nu",
"seconds": "några sekunder",
"minute": "en minut",
"minutes": "{0} minuter",
"hour": "en timme",
"hours": "{0} timmar",
"day": "en dag",
"days": "{0} dagar",
"month": "en månad",
"months": "{0} månader",
"year": "ett år",
"years": "{0} år",
}
month_names = [
"",
"januari",
"februari",
"mars",
"april",
"maj",
"juni",
"juli",
"augusti",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"måndag",
"tisdag",
"onsdag",
"torsdag",
"fredag",
"lördag",
"söndag",
]
day_abbreviations = ["", "mån", "tis", "ons", "tor", "fre", "lör", "sön"]
class FinnishLocale(Locale):
names = ["fi", "fi_fi"]
# The finnish grammar is very complex, and its hard to convert
# 1-to-1 to something like English.
past = "{0} sitten"
future = "{0} kuluttua"
timeframes = {
"now": ["juuri nyt", "juuri nyt"],
"seconds": ["muutama sekunti", "muutaman sekunnin"],
"minute": ["minuutti", "minuutin"],
"minutes": ["{0} minuuttia", "{0} minuutin"],
"hour": ["tunti", "tunnin"],
"hours": ["{0} tuntia", "{0} tunnin"],
"day": ["päivä", "päivä"],
"days": ["{0} päivää", "{0} päivän"],
"month": ["kuukausi", "kuukauden"],
"months": ["{0} kuukautta", "{0} kuukauden"],
"year": ["vuosi", "vuoden"],
"years": ["{0} vuotta", "{0} vuoden"],
}
# Months and days are lowercase in Finnish
month_names = [
"",
"tammikuu",
"helmikuu",
"maaliskuu",
"huhtikuu",
"toukokuu",
"kesäkuu",
"heinäkuu",
"elokuu",
"syyskuu",
"lokakuu",
"marraskuu",
"joulukuu",
]
month_abbreviations = [
"",
"tammi",
"helmi",
"maalis",
"huhti",
"touko",
"kesä",
"heinä",
"elo",
"syys",
"loka",
"marras",
"joulu",
]
day_names = [
"",
"maanantai",
"tiistai",
"keskiviikko",
"torstai",
"perjantai",
"lauantai",
"sunnuntai",
]
day_abbreviations = ["", "ma", "ti", "ke", "to", "pe", "la", "su"]
def _format_timeframe(self, timeframe, delta):
return (
self.timeframes[timeframe][0].format(abs(delta)),
self.timeframes[timeframe][1].format(abs(delta)),
)
def _format_relative(self, humanized, timeframe, delta):
if timeframe == "now":
return humanized[0]
direction = self.past if delta < 0 else self.future
which = 0 if delta < 0 else 1
return direction.format(humanized[which])
def _ordinal_number(self, n):
return "{}.".format(n)
class ChineseCNLocale(Locale):
names = ["zh", "zh_cn"]
past = "{0}前"
future = "{0}后"
timeframes = {
"now": "刚才",
"seconds": "几秒",
"minute": "1分钟",
"minutes": "{0}分钟",
"hour": "1小时",
"hours": "{0}小时",
"day": "1天",
"days": "{0}天",
"week": "一周",
"weeks": "{0}周",
"month": "1个月",
"months": "{0}个月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"一月",
"二月",
"三月",
"四月",
"五月",
"六月",
"七月",
"八月",
"九月",
"十月",
"十一月",
"十二月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"]
day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
class ChineseTWLocale(Locale):
names = ["zh_tw"]
past = "{0}前"
future = "{0}後"
timeframes = {
"now": "剛才",
"seconds": "幾秒",
"minute": "1分鐘",
"minutes": "{0}分鐘",
"hour": "1小時",
"hours": "{0}小時",
"day": "1天",
"days": "{0}天",
"month": "1個月",
"months": "{0}個月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "周一", "周二", "周三", "周四", "周五", "周六", "周日"]
day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
class KoreanLocale(Locale):
names = ["ko", "ko_kr"]
past = "{0} 전"
future = "{0} 후"
timeframes = {
"now": "지금",
"seconds": "몇 초",
"minute": "1분",
"minutes": "{0}분",
"hour": "1시간",
"hours": "{0}시간",
"day": "1일",
"days": "{0}일",
"month": "1개월",
"months": "{0}개월",
"year": "1년",
"years": "{0}년",
}
month_names = [
"",
"1월",
"2월",
"3월",
"4월",
"5월",
"6월",
"7월",
"8월",
"9월",
"10월",
"11월",
"12월",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일", "일요일"]
day_abbreviations = ["", "월", "화", "수", "목", "금", "토", "일"]
# derived locale types & implementations.
class DutchLocale(Locale):
names = ["nl", "nl_nl"]
past = "{0} geleden"
future = "over {0}"
timeframes = {
"now": "nu",
"seconds": "seconden",
"minute": "een minuut",
"minutes": "{0} minuten",
"hour": "een uur",
"hours": "{0} uur",
"day": "een dag",
"days": "{0} dagen",
"month": "een maand",
"months": "{0} maanden",
"year": "een jaar",
"years": "{0} jaar",
}
# In Dutch names of months and days are not starting with a capital letter
# like in the English language.
month_names = [
"",
"januari",
"februari",
"maart",
"april",
"mei",
"juni",
"juli",
"augustus",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mrt",
"apr",
"mei",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"maandag",
"dinsdag",
"woensdag",
"donderdag",
"vrijdag",
"zaterdag",
"zondag",
]
day_abbreviations = ["", "ma", "di", "wo", "do", "vr", "za", "zo"]
class SlavicBaseLocale(Locale):
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, list):
if delta % 10 == 1 and delta % 100 != 11:
form = form[0]
elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[1]
else:
form = form[2]
return form.format(delta)
class BelarusianLocale(SlavicBaseLocale):
names = ["be", "be_by"]
past = "{0} таму"
future = "праз {0}"
timeframes = {
"now": "зараз",
"seconds": "некалькі секунд",
"minute": "хвіліну",
"minutes": ["{0} хвіліну", "{0} хвіліны", "{0} хвілін"],
"hour": "гадзіну",
"hours": ["{0} гадзіну", "{0} гадзіны", "{0} гадзін"],
"day": "дзень",
"days": ["{0} дзень", "{0} дні", "{0} дзён"],
"month": "месяц",
"months": ["{0} месяц", "{0} месяцы", "{0} месяцаў"],
"year": "год",
"years": ["{0} год", "{0} гады", "{0} гадоў"],
}
month_names = [
"",
"студзеня",
"лютага",
"сакавіка",
"красавіка",
"траўня",
"чэрвеня",
"ліпеня",
"жніўня",
"верасня",
"кастрычніка",
"лістапада",
"снежня",
]
month_abbreviations = [
"",
"студ",
"лют",
"сак",
"крас",
"трав",
"чэрв",
"ліп",
"жнів",
"вер",
"каст",
"ліст",
"снеж",
]
day_names = [
"",
"панядзелак",
"аўторак",
"серада",
"чацвер",
"пятніца",
"субота",
"нядзеля",
]
day_abbreviations = ["", "пн", "ат", "ср", "чц", "пт", "сб", "нд"]
class PolishLocale(SlavicBaseLocale):
names = ["pl", "pl_pl"]
past = "{0} temu"
future = "za {0}"
timeframes = {
"now": "teraz",
"seconds": "kilka sekund",
"minute": "minutę",
"minutes": ["{0} minut", "{0} minuty", "{0} minut"],
"hour": "godzina",
"hours": ["{0} godzin", "{0} godziny", "{0} godzin"],
"day": "dzień",
"days": ["{0} dzień", "{0} dni", "{0} dni"],
"month": "miesiąc",
"months": ["{0} miesiąc", "{0} miesiące", "{0} miesięcy"],
"year": "rok",
"years": ["{0} rok", "{0} lata", "{0} lat"],
}
month_names = [
"",
"styczeń",
"luty",
"marzec",
"kwiecień",
"maj",
"czerwiec",
"lipiec",
"sierpień",
"wrzesień",
"październik",
"listopad",
"grudzień",
]
month_abbreviations = [
"",
"sty",
"lut",
"mar",
"kwi",
"maj",
"cze",
"lip",
"sie",
"wrz",
"paź",
"lis",
"gru",
]
day_names = [
"",
"poniedziałek",
"wtorek",
"środa",
"czwartek",
"piątek",
"sobota",
"niedziela",
]
day_abbreviations = ["", "Pn", "Wt", "Śr", "Czw", "Pt", "So", "Nd"]
class RussianLocale(SlavicBaseLocale):
names = ["ru", "ru_ru"]
past = "{0} назад"
future = "через {0}"
timeframes = {
"now": "сейчас",
"seconds": "несколько секунд",
"minute": "минуту",
"minutes": ["{0} минуту", "{0} минуты", "{0} минут"],
"hour": "час",
"hours": ["{0} час", "{0} часа", "{0} часов"],
"day": "день",
"days": ["{0} день", "{0} дня", "{0} дней"],
"week": "неделю",
"weeks": ["{0} неделю", "{0} недели", "{0} недель"],
"month": "месяц",
"months": ["{0} месяц", "{0} месяца", "{0} месяцев"],
"year": "год",
"years": ["{0} год", "{0} года", "{0} лет"],
}
month_names = [
"",
"января",
"февраля",
"марта",
"апреля",
"мая",
"июня",
"июля",
"августа",
"сентября",
"октября",
"ноября",
"декабря",
]
month_abbreviations = [
"",
"янв",
"фев",
"мар",
"апр",
"май",
"июн",
"июл",
"авг",
"сен",
"окт",
"ноя",
"дек",
]
day_names = [
"",
"понедельник",
"вторник",
"среда",
"четверг",
"пятница",
"суббота",
"воскресенье",
]
day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "вс"]
class AfrikaansLocale(Locale):
names = ["af", "af_nl"]
past = "{0} gelede"
future = "in {0}"
timeframes = {
"now": "nou",
"seconds": "sekondes",
"minute": "minuut",
"minutes": "{0} minute",
"hour": "uur",
"hours": "{0} ure",
"day": "een dag",
"days": "{0} dae",
"month": "een maand",
"months": "{0} maande",
"year": "een jaar",
"years": "{0} jaar",
}
month_names = [
"",
"Januarie",
"Februarie",
"Maart",
"April",
"Mei",
"Junie",
"Julie",
"Augustus",
"September",
"Oktober",
"November",
"Desember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mrt",
"Apr",
"Mei",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Des",
]
day_names = [
"",
"Maandag",
"Dinsdag",
"Woensdag",
"Donderdag",
"Vrydag",
"Saterdag",
"Sondag",
]
day_abbreviations = ["", "Ma", "Di", "Wo", "Do", "Vr", "Za", "So"]
class BulgarianLocale(SlavicBaseLocale):
names = ["bg", "bg_BG"]
past = "{0} назад"
future = "напред {0}"
timeframes = {
"now": "сега",
"seconds": "няколко секунди",
"minute": "минута",
"minutes": ["{0} минута", "{0} минути", "{0} минути"],
"hour": "час",
"hours": ["{0} час", "{0} часа", "{0} часа"],
"day": "ден",
"days": ["{0} ден", "{0} дни", "{0} дни"],
"month": "месец",
"months": ["{0} месец", "{0} месеца", "{0} месеца"],
"year": "година",
"years": ["{0} година", "{0} години", "{0} години"],
}
month_names = [
"",
"януари",
"февруари",
"март",
"април",
"май",
"юни",
"юли",
"август",
"септември",
"октомври",
"ноември",
"декември",
]
month_abbreviations = [
"",
"ян",
"февр",
"март",
"апр",
"май",
"юни",
"юли",
"авг",
"септ",
"окт",
"ноем",
"дек",
]
day_names = [
"",
"понеделник",
"вторник",
"сряда",
"четвъртък",
"петък",
"събота",
"неделя",
]
day_abbreviations = ["", "пон", "вт", "ср", "четв", "пет", "съб", "нед"]
class UkrainianLocale(SlavicBaseLocale):
names = ["ua", "uk_ua"]
past = "{0} тому"
future = "за {0}"
timeframes = {
"now": "зараз",
"seconds": "кілька секунд",
"minute": "хвилину",
"minutes": ["{0} хвилину", "{0} хвилини", "{0} хвилин"],
"hour": "годину",
"hours": ["{0} годину", "{0} години", "{0} годин"],
"day": "день",
"days": ["{0} день", "{0} дні", "{0} днів"],
"month": "місяць",
"months": ["{0} місяць", "{0} місяці", "{0} місяців"],
"year": "рік",
"years": ["{0} рік", "{0} роки", "{0} років"],
}
month_names = [
"",
"січня",
"лютого",
"березня",
"квітня",
"травня",
"червня",
"липня",
"серпня",
"вересня",
"жовтня",
"листопада",
"грудня",
]
month_abbreviations = [
"",
"січ",
"лют",
"бер",
"квіт",
"трав",
"черв",
"лип",
"серп",
"вер",
"жовт",
"лист",
"груд",
]
day_names = [
"",
"понеділок",
"вівторок",
"середа",
"четвер",
"п’ятниця",
"субота",
"неділя",
]
day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "нд"]
class MacedonianLocale(SlavicBaseLocale):
names = ["mk", "mk_mk"]
past = "пред {0}"
future = "за {0}"
timeframes = {
"now": "сега",
"seconds": "секунди",
"minute": "една минута",
"minutes": ["{0} минута", "{0} минути", "{0} минути"],
"hour": "еден саат",
"hours": ["{0} саат", "{0} саати", "{0} саати"],
"day": "еден ден",
"days": ["{0} ден", "{0} дена", "{0} дена"],
"month": "еден месец",
"months": ["{0} месец", "{0} месеци", "{0} месеци"],
"year": "една година",
"years": ["{0} година", "{0} години", "{0} години"],
}
meridians = {"am": "дп", "pm": "пп", "AM": "претпладне", "PM": "попладне"}
month_names = [
"",
"Јануари",
"Февруари",
"Март",
"Април",
"Мај",
"Јуни",
"Јули",
"Август",
"Септември",
"Октомври",
"Ноември",
"Декември",
]
month_abbreviations = [
"",
"Јан.",
" Фев.",
" Мар.",
" Апр.",
" Мај",
" Јун.",
" Јул.",
" Авг.",
" Септ.",
" Окт.",
" Ноем.",
" Декем.",
]
day_names = [
"",
"Понеделник",
" Вторник",
" Среда",
" Четврток",
" Петок",
" Сабота",
" Недела",
]
day_abbreviations = [
"",
"Пон.",
" Вт.",
" Сре.",
" Чет.",
" Пет.",
" Саб.",
" Нед.",
]
class DeutschBaseLocale(Locale):
past = "vor {0}"
future = "in {0}"
timeframes = {
"now": "gerade eben",
"seconds": "Sekunden",
"minute": "einer Minute",
"minutes": "{0} Minuten",
"hour": "einer Stunde",
"hours": "{0} Stunden",
"day": "einem Tag",
"days": "{0} Tagen",
"month": "einem Monat",
"months": "{0} Monaten",
"year": "einem Jahr",
"years": "{0} Jahren",
}
timeframes_only_distance = timeframes.copy()
timeframes_only_distance["minute"] = "eine Minute"
timeframes_only_distance["hour"] = "eine Stunde"
timeframes_only_distance["day"] = "ein Tag"
timeframes_only_distance["month"] = "ein Monat"
timeframes_only_distance["year"] = "ein Jahr"
month_names = [
"",
"Januar",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mär",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dez",
]
day_names = [
"",
"Montag",
"Dienstag",
"Mittwoch",
"Donnerstag",
"Freitag",
"Samstag",
"Sonntag",
]
day_abbreviations = ["", "Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"]
def _ordinal_number(self, n):
return "{}.".format(n)
def describe(self, timeframe, delta=0, only_distance=False):
""" Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta)))
if not only_distance:
humanized = self._format_timeframe(timeframe, delta)
humanized = self._format_relative(humanized, timeframe, delta)
return humanized
class GermanLocale(DeutschBaseLocale, Locale):
names = ["de", "de_de"]
class AustrianLocale(DeutschBaseLocale, Locale):
names = ["de_at"]
month_names = [
"",
"Jänner",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
class NorwegianLocale(Locale):
names = ["nb", "nb_no"]
past = "for {0} siden"
future = "om {0}"
timeframes = {
"now": "nå nettopp",
"seconds": "noen sekunder",
"minute": "ett minutt",
"minutes": "{0} minutter",
"hour": "en time",
"hours": "{0} timer",
"day": "en dag",
"days": "{0} dager",
"month": "en måned",
"months": "{0} måneder",
"year": "ett år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"mars",
"april",
"mai",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"mai",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"des",
]
day_names = [
"",
"mandag",
"tirsdag",
"onsdag",
"torsdag",
"fredag",
"lørdag",
"søndag",
]
day_abbreviations = ["", "ma", "ti", "on", "to", "fr", "lø", "sø"]
class NewNorwegianLocale(Locale):
names = ["nn", "nn_no"]
past = "for {0} sidan"
future = "om {0}"
timeframes = {
"now": "no nettopp",
"seconds": "nokre sekund",
"minute": "ett minutt",
"minutes": "{0} minutt",
"hour": "ein time",
"hours": "{0} timar",
"day": "ein dag",
"days": "{0} dagar",
"month": "en månad",
"months": "{0} månader",
"year": "eit år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"mars",
"april",
"mai",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"mai",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"des",
]
day_names = [
"",
"måndag",
"tysdag",
"onsdag",
"torsdag",
"fredag",
"laurdag",
"sundag",
]
day_abbreviations = ["", "må", "ty", "on", "to", "fr", "la", "su"]
class PortugueseLocale(Locale):
names = ["pt", "pt_pt"]
past = "há {0}"
future = "em {0}"
timeframes = {
"now": "agora",
"second": "um segundo",
"seconds": "{0} segundos",
"minute": "um minuto",
"minutes": "{0} minutos",
"hour": "uma hora",
"hours": "{0} horas",
"day": "um dia",
"days": "{0} dias",
"week": "uma semana",
"weeks": "{0} semanas",
"month": "um mês",
"months": "{0} meses",
"year": "um ano",
"years": "{0} anos",
}
month_names = [
"",
"janeiro",
"fevereiro",
"março",
"abril",
"maio",
"junho",
"julho",
"agosto",
"setembro",
"outubro",
"novembro",
"dezembro",
]
month_abbreviations = [
"",
"jan",
"fev",
"mar",
"abr",
"maio",
"jun",
"jul",
"ago",
"set",
"out",
"nov",
"dez",
]
day_names = [
"",
"segunda-feira",
"terça-feira",
"quarta-feira",
"quinta-feira",
"sexta-feira",
"sábado",
"domingo",
]
day_abbreviations = ["", "seg", "ter", "qua", "qui", "sex", "sab", "dom"]
class BrazilianPortugueseLocale(PortugueseLocale):
names = ["pt_br"]
past = "faz {0}"
future = "em {0}"
timeframes = {
"now": "agora",
"second": "um segundo",
"seconds": "{0} segundos",
"minute": "um minuto",
"minutes": "{0} minutos",
"hour": "uma hora",
"hours": "{0} horas",
"day": "um dia",
"days": "{0} dias",
"week": "uma semana",
"weeks": "{0} semanas",
"month": "um mês",
"months": "{0} meses",
"year": "um ano",
"years": "{0} anos",
}
month_names = [
"",
"Janeiro",
"Fevereiro",
"Março",
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro",
]
month_abbreviations = [
"",
"Jan",
"Fev",
"Mar",
"Abr",
"Mai",
"Jun",
"Jul",
"Ago",
"Set",
"Out",
"Nov",
"Dez",
]
day_names = [
"",
"Segunda-feira",
"Terça-feira",
"Quarta-feira",
"Quinta-feira",
"Sexta-feira",
"Sábado",
"Domingo",
]
day_abbreviations = ["", "Seg", "Ter", "Qua", "Qui", "Sex", "Sab", "Dom"]
class TagalogLocale(Locale):
names = ["tl", "tl_ph"]
past = "nakaraang {0}"
future = "{0} mula ngayon"
timeframes = {
"now": "ngayon lang",
"seconds": "segundo",
"minute": "isang minuto",
"minutes": "{0} minuto",
"hour": "isang oras",
"hours": "{0} oras",
"day": "isang araw",
"days": "{0} araw",
"month": "isang buwan",
"months": "{0} buwan",
"year": "isang taon",
"years": "{0} taon",
}
month_names = [
"",
"Enero",
"Pebrero",
"Marso",
"Abril",
"Mayo",
"Hunyo",
"Hulyo",
"Agosto",
"Setyembre",
"Oktubre",
"Nobyembre",
"Disyembre",
]
month_abbreviations = [
"",
"Ene",
"Peb",
"Mar",
"Abr",
"May",
"Hun",
"Hul",
"Ago",
"Set",
"Okt",
"Nob",
"Dis",
]
day_names = [
"",
"Lunes",
"Martes",
"Miyerkules",
"Huwebes",
"Biyernes",
"Sabado",
"Linggo",
]
day_abbreviations = ["", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab", "Lin"]
def _ordinal_number(self, n):
return "ika-{}".format(n)
class VietnameseLocale(Locale):
names = ["vi", "vi_vn"]
past = "{0} trước"
future = "{0} nữa"
timeframes = {
"now": "hiện tại",
"seconds": "giây",
"minute": "một phút",
"minutes": "{0} phút",
"hour": "một giờ",
"hours": "{0} giờ",
"day": "một ngày",
"days": "{0} ngày",
"week": "một tuần",
"weeks": "{0} tuần",
"month": "một tháng",
"months": "{0} tháng",
"year": "một năm",
"years": "{0} năm",
}
month_names = [
"",
"Tháng Một",
"Tháng Hai",
"Tháng Ba",
"Tháng Tư",
"Tháng Năm",
"Tháng Sáu",
"Tháng Bảy",
"Tháng Tám",
"Tháng Chín",
"Tháng Mười",
"Tháng Mười Một",
"Tháng Mười Hai",
]
month_abbreviations = [
"",
"Tháng 1",
"Tháng 2",
"Tháng 3",
"Tháng 4",
"Tháng 5",
"Tháng 6",
"Tháng 7",
"Tháng 8",
"Tháng 9",
"Tháng 10",
"Tháng 11",
"Tháng 12",
]
day_names = [
"",
"Thứ Hai",
"Thứ Ba",
"Thứ Tư",
"Thứ Năm",
"Thứ Sáu",
"Thứ Bảy",
"Chủ Nhật",
]
day_abbreviations = ["", "Thứ 2", "Thứ 3", "Thứ 4", "Thứ 5", "Thứ 6", "Thứ 7", "CN"]
class TurkishLocale(Locale):
names = ["tr", "tr_tr"]
past = "{0} önce"
future = "{0} sonra"
timeframes = {
"now": "şimdi",
"seconds": "saniye",
"minute": "bir dakika",
"minutes": "{0} dakika",
"hour": "bir saat",
"hours": "{0} saat",
"day": "bir gün",
"days": "{0} gün",
"month": "bir ay",
"months": "{0} ay",
"year": "yıl",
"years": "{0} yıl",
}
month_names = [
"",
"Ocak",
"Şubat",
"Mart",
"Nisan",
"Mayıs",
"Haziran",
"Temmuz",
"Ağustos",
"Eylül",
"Ekim",
"Kasım",
"Aralık",
]
month_abbreviations = [
"",
"Oca",
"Şub",
"Mar",
"Nis",
"May",
"Haz",
"Tem",
"Ağu",
"Eyl",
"Eki",
"Kas",
"Ara",
]
day_names = [
"",
"Pazartesi",
"Salı",
"Çarşamba",
"Perşembe",
"Cuma",
"Cumartesi",
"Pazar",
]
day_abbreviations = ["", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt", "Paz"]
class AzerbaijaniLocale(Locale):
names = ["az", "az_az"]
past = "{0} əvvəl"
future = "{0} sonra"
timeframes = {
"now": "indi",
"seconds": "saniyə",
"minute": "bir dəqiqə",
"minutes": "{0} dəqiqə",
"hour": "bir saat",
"hours": "{0} saat",
"day": "bir gün",
"days": "{0} gün",
"month": "bir ay",
"months": "{0} ay",
"year": "il",
"years": "{0} il",
}
month_names = [
"",
"Yanvar",
"Fevral",
"Mart",
"Aprel",
"May",
"İyun",
"İyul",
"Avqust",
"Sentyabr",
"Oktyabr",
"Noyabr",
"Dekabr",
]
month_abbreviations = [
"",
"Yan",
"Fev",
"Mar",
"Apr",
"May",
"İyn",
"İyl",
"Avq",
"Sen",
"Okt",
"Noy",
"Dek",
]
day_names = [
"",
"Bazar ertəsi",
"Çərşənbə axşamı",
"Çərşənbə",
"Cümə axşamı",
"Cümə",
"Şənbə",
"Bazar",
]
day_abbreviations = ["", "Ber", "Çax", "Çər", "Cax", "Cüm", "Şnb", "Bzr"]
class ArabicLocale(Locale):
names = [
"ar",
"ar_ae",
"ar_bh",
"ar_dj",
"ar_eg",
"ar_eh",
"ar_er",
"ar_km",
"ar_kw",
"ar_ly",
"ar_om",
"ar_qa",
"ar_sa",
"ar_sd",
"ar_so",
"ar_ss",
"ar_td",
"ar_ye",
]
past = "منذ {0}"
future = "خلال {0}"
timeframes = {
"now": "الآن",
"seconds": {"double": "ثانيتين", "ten": "{0} ثوان", "higher": "{0} ثانية"},
"minute": "دقيقة",
"minutes": {"double": "دقيقتين", "ten": "{0} دقائق", "higher": "{0} دقيقة"},
"hour": "ساعة",
"hours": {"double": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"},
"day": "يوم",
"days": {"double": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"},
"month": "شهر",
"months": {"double": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"},
"year": "سنة",
"years": {"double": "سنتين", "ten": "{0} سنوات", "higher": "{0} سنة"},
}
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"مايو",
"يونيو",
"يوليو",
"أغسطس",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"مايو",
"يونيو",
"يوليو",
"أغسطس",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
day_names = [
"",
"الإثنين",
"الثلاثاء",
"الأربعاء",
"الخميس",
"الجمعة",
"السبت",
"الأحد",
]
day_abbreviations = ["", "إثنين", "ثلاثاء", "أربعاء", "خميس", "جمعة", "سبت", "أحد"]
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, dict):
if delta == 2:
form = form["double"]
elif delta > 2 and delta <= 10:
form = form["ten"]
else:
form = form["higher"]
return form.format(delta)
class LevantArabicLocale(ArabicLocale):
names = ["ar_iq", "ar_jo", "ar_lb", "ar_ps", "ar_sy"]
month_names = [
"",
"كانون الثاني",
"شباط",
"آذار",
"نيسان",
"أيار",
"حزيران",
"تموز",
"آب",
"أيلول",
"تشرين الأول",
"تشرين الثاني",
"كانون الأول",
]
month_abbreviations = [
"",
"كانون الثاني",
"شباط",
"آذار",
"نيسان",
"أيار",
"حزيران",
"تموز",
"آب",
"أيلول",
"تشرين الأول",
"تشرين الثاني",
"كانون الأول",
]
class AlgeriaTunisiaArabicLocale(ArabicLocale):
names = ["ar_tn", "ar_dz"]
month_names = [
"",
"جانفي",
"فيفري",
"مارس",
"أفريل",
"ماي",
"جوان",
"جويلية",
"أوت",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
month_abbreviations = [
"",
"جانفي",
"فيفري",
"مارس",
"أفريل",
"ماي",
"جوان",
"جويلية",
"أوت",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
class MauritaniaArabicLocale(ArabicLocale):
names = ["ar_mr"]
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"إبريل",
"مايو",
"يونيو",
"يوليو",
"أغشت",
"شتمبر",
"أكتوبر",
"نوفمبر",
"دجمبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"إبريل",
"مايو",
"يونيو",
"يوليو",
"أغشت",
"شتمبر",
"أكتوبر",
"نوفمبر",
"دجمبر",
]
class MoroccoArabicLocale(ArabicLocale):
names = ["ar_ma"]
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"ماي",
"يونيو",
"يوليوز",
"غشت",
"شتنبر",
"أكتوبر",
"نونبر",
"دجنبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"ماي",
"يونيو",
"يوليوز",
"غشت",
"شتنبر",
"أكتوبر",
"نونبر",
"دجنبر",
]
class IcelandicLocale(Locale):
def _format_timeframe(self, timeframe, delta):
timeframe = self.timeframes[timeframe]
if delta < 0:
timeframe = timeframe[0]
elif delta > 0:
timeframe = timeframe[1]
return timeframe.format(abs(delta))
names = ["is", "is_is"]
past = "fyrir {0} síðan"
future = "eftir {0}"
timeframes = {
"now": "rétt í þessu",
"seconds": ("nokkrum sekúndum", "nokkrar sekúndur"),
"minute": ("einni mínútu", "eina mínútu"),
"minutes": ("{0} mínútum", "{0} mínútur"),
"hour": ("einum tíma", "einn tíma"),
"hours": ("{0} tímum", "{0} tíma"),
"day": ("einum degi", "einn dag"),
"days": ("{0} dögum", "{0} daga"),
"month": ("einum mánuði", "einn mánuð"),
"months": ("{0} mánuðum", "{0} mánuði"),
"year": ("einu ári", "eitt ár"),
"years": ("{0} árum", "{0} ár"),
}
meridians = {"am": "f.h.", "pm": "e.h.", "AM": "f.h.", "PM": "e.h."}
month_names = [
"",
"janúar",
"febrúar",
"mars",
"apríl",
"maí",
"júní",
"júlí",
"ágúst",
"september",
"október",
"nóvember",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maí",
"jún",
"júl",
"ágú",
"sep",
"okt",
"nóv",
"des",
]
day_names = [
"",
"mánudagur",
"þriðjudagur",
"miðvikudagur",
"fimmtudagur",
"föstudagur",
"laugardagur",
"sunnudagur",
]
day_abbreviations = ["", "mán", "þri", "mið", "fim", "fös", "lau", "sun"]
class DanishLocale(Locale):
names = ["da", "da_dk"]
past = "for {0} siden"
future = "efter {0}"
timeframes = {
"now": "lige nu",
"seconds": "et par sekunder",
"minute": "et minut",
"minutes": "{0} minutter",
"hour": "en time",
"hours": "{0} timer",
"day": "en dag",
"days": "{0} dage",
"month": "en måned",
"months": "{0} måneder",
"year": "et år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"marts",
"april",
"maj",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"mandag",
"tirsdag",
"onsdag",
"torsdag",
"fredag",
"lørdag",
"søndag",
]
day_abbreviations = ["", "man", "tir", "ons", "tor", "fre", "lør", "søn"]
class MalayalamLocale(Locale):
names = ["ml"]
past = "{0} മുമ്പ്"
future = "{0} ശേഷം"
timeframes = {
"now": "ഇപ്പോൾ",
"seconds": "സെക്കന്റ്",
"minute": "ഒരു മിനിറ്റ്",
"minutes": "{0} മിനിറ്റ്",
"hour": "ഒരു മണിക്കൂർ",
"hours": "{0} മണിക്കൂർ",
"day": "ഒരു ദിവസം ",
"days": "{0} ദിവസം ",
"month": "ഒരു മാസം ",
"months": "{0} മാസം ",
"year": "ഒരു വർഷം ",
"years": "{0} വർഷം ",
}
meridians = {
"am": "രാവിലെ",
"pm": "ഉച്ചക്ക് ശേഷം",
"AM": "രാവിലെ",
"PM": "ഉച്ചക്ക് ശേഷം",
}
month_names = [
"",
"ജനുവരി",
"ഫെബ്രുവരി",
"മാർച്ച്",
"ഏപ്രിൽ ",
"മെയ് ",
"ജൂണ്",
"ജൂലൈ",
"ഓഗസ്റ്റ്",
"സെപ്റ്റംബർ",
"ഒക്ടോബർ",
"നവംബർ",
"ഡിസംബർ",
]
month_abbreviations = [
"",
"ജനു",
"ഫെബ് ",
"മാർ",
"ഏപ്രിൽ",
"മേയ്",
"ജൂണ്",
"ജൂലൈ",
"ഓഗസ്റ",
"സെപ്റ്റ",
"ഒക്ടോ",
"നവം",
"ഡിസം",
]
day_names = ["", "തിങ്കള്", "ചൊവ്വ", "ബുധന്", "വ്യാഴം", "വെള്ളി", "ശനി", "ഞായര്"]
day_abbreviations = [
"",
"തിങ്കള്",
"ചൊവ്വ",
"ബുധന്",
"വ്യാഴം",
"വെള്ളി",
"ശനി",
"ഞായര്",
]
class HindiLocale(Locale):
names = ["hi"]
past = "{0} पहले"
future = "{0} बाद"
timeframes = {
"now": "अभी",
"seconds": "सेकंड्",
"minute": "एक मिनट ",
"minutes": "{0} मिनट ",
"hour": "एक घंटा",
"hours": "{0} घंटे",
"day": "एक दिन",
"days": "{0} दिन",
"month": "एक माह ",
"months": "{0} महीने ",
"year": "एक वर्ष ",
"years": "{0} साल ",
}
meridians = {"am": "सुबह", "pm": "शाम", "AM": "सुबह", "PM": "शाम"}
month_names = [
"",
"जनवरी",
"फरवरी",
"मार्च",
"अप्रैल ",
"मई",
"जून",
"जुलाई",
"अगस्त",
"सितंबर",
"अक्टूबर",
"नवंबर",
"दिसंबर",
]
month_abbreviations = [
"",
"जन",
"फ़र",
"मार्च",
"अप्रै",
"मई",
"जून",
"जुलाई",
"आग",
"सित",
"अकत",
"नवे",
"दिस",
]
day_names = [
"",
"सोमवार",
"मंगलवार",
"बुधवार",
"गुरुवार",
"शुक्रवार",
"शनिवार",
"रविवार",
]
day_abbreviations = ["", "सोम", "मंगल", "बुध", "गुरुवार", "शुक्र", "शनि", "रवि"]
class CzechLocale(Locale):
names = ["cs", "cs_cz"]
timeframes = {
"now": "Teď",
"seconds": {"past": "{0} sekundami", "future": ["{0} sekundy", "{0} sekund"]},
"minute": {"past": "minutou", "future": "minutu", "zero": "{0} minut"},
"minutes": {"past": "{0} minutami", "future": ["{0} minuty", "{0} minut"]},
"hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodin"},
"hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodin"]},
"day": {"past": "dnem", "future": "den", "zero": "{0} dnů"},
"days": {"past": "{0} dny", "future": ["{0} dny", "{0} dnů"]},
"month": {"past": "měsícem", "future": "měsíc", "zero": "{0} měsíců"},
"months": {"past": "{0} měsíci", "future": ["{0} měsíce", "{0} měsíců"]},
"year": {"past": "rokem", "future": "rok", "zero": "{0} let"},
"years": {"past": "{0} lety", "future": ["{0} roky", "{0} let"]},
}
past = "Před {0}"
future = "Za {0}"
month_names = [
"",
"leden",
"únor",
"březen",
"duben",
"květen",
"červen",
"červenec",
"srpen",
"září",
"říjen",
"listopad",
"prosinec",
]
month_abbreviations = [
"",
"led",
"úno",
"bře",
"dub",
"kvě",
"čvn",
"čvc",
"srp",
"zář",
"říj",
"lis",
"pro",
]
day_names = [
"",
"pondělí",
"úterý",
"středa",
"čtvrtek",
"pátek",
"sobota",
"neděle",
]
day_abbreviations = ["", "po", "út", "st", "čt", "pá", "so", "ne"]
def _format_timeframe(self, timeframe, delta):
"""Czech aware time frame format function, takes into account
the differences between past and future forms."""
form = self.timeframes[timeframe]
if isinstance(form, dict):
if delta == 0:
form = form["zero"] # And *never* use 0 in the singular!
elif delta > 0:
form = form["future"]
else:
form = form["past"]
delta = abs(delta)
if isinstance(form, list):
if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[0]
else:
form = form[1]
return form.format(delta)
class SlovakLocale(Locale):
names = ["sk", "sk_sk"]
timeframes = {
"now": "Teraz",
"seconds": {"past": "pár sekundami", "future": ["{0} sekundy", "{0} sekúnd"]},
"minute": {"past": "minútou", "future": "minútu", "zero": "{0} minút"},
"minutes": {"past": "{0} minútami", "future": ["{0} minúty", "{0} minút"]},
"hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodín"},
"hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodín"]},
"day": {"past": "dňom", "future": "deň", "zero": "{0} dní"},
"days": {"past": "{0} dňami", "future": ["{0} dni", "{0} dní"]},
"month": {"past": "mesiacom", "future": "mesiac", "zero": "{0} mesiacov"},
"months": {"past": "{0} mesiacmi", "future": ["{0} mesiace", "{0} mesiacov"]},
"year": {"past": "rokom", "future": "rok", "zero": "{0} rokov"},
"years": {"past": "{0} rokmi", "future": ["{0} roky", "{0} rokov"]},
}
past = "Pred {0}"
future = "O {0}"
month_names = [
"",
"január",
"február",
"marec",
"apríl",
"máj",
"jún",
"júl",
"august",
"september",
"október",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"máj",
"jún",
"júl",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"pondelok",
"utorok",
"streda",
"štvrtok",
"piatok",
"sobota",
"nedeľa",
]
day_abbreviations = ["", "po", "ut", "st", "št", "pi", "so", "ne"]
def _format_timeframe(self, timeframe, delta):
"""Slovak aware time frame format function, takes into account
the differences between past and future forms."""
form = self.timeframes[timeframe]
if isinstance(form, dict):
if delta == 0:
form = form["zero"] # And *never* use 0 in the singular!
elif delta > 0:
form = form["future"]
else:
form = form["past"]
delta = abs(delta)
if isinstance(form, list):
if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[0]
else:
form = form[1]
return form.format(delta)
class FarsiLocale(Locale):
names = ["fa", "fa_ir"]
past = "{0} قبل"
future = "در {0}"
timeframes = {
"now": "اکنون",
"seconds": "ثانیه",
"minute": "یک دقیقه",
"minutes": "{0} دقیقه",
"hour": "یک ساعت",
"hours": "{0} ساعت",
"day": "یک روز",
"days": "{0} روز",
"month": "یک ماه",
"months": "{0} ماه",
"year": "یک سال",
"years": "{0} سال",
}
meridians = {
"am": "قبل از ظهر",
"pm": "بعد از ظهر",
"AM": "قبل از ظهر",
"PM": "بعد از ظهر",
}
month_names = [
"",
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
day_names = [
"",
"دو شنبه",
"سه شنبه",
"چهارشنبه",
"پنجشنبه",
"جمعه",
"شنبه",
"یکشنبه",
]
day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
class HebrewLocale(Locale):
names = ["he", "he_IL"]
past = "לפני {0}"
future = "בעוד {0}"
timeframes = {
"now": "הרגע",
"seconds": "שניות",
"minute": "דקה",
"minutes": "{0} דקות",
"hour": "שעה",
"hours": "{0} שעות",
"2-hours": "שעתיים",
"day": "יום",
"days": "{0} ימים",
"2-days": "יומיים",
"month": "חודש",
"months": "{0} חודשים",
"2-months": "חודשיים",
"year": "שנה",
"years": "{0} שנים",
"2-years": "שנתיים",
}
meridians = {
"am": 'לפנ"צ',
"pm": 'אחר"צ',
"AM": "לפני הצהריים",
"PM": "אחרי הצהריים",
}
month_names = [
"",
"ינואר",
"פברואר",
"מרץ",
"אפריל",
"מאי",
"יוני",
"יולי",
"אוגוסט",
"ספטמבר",
"אוקטובר",
"נובמבר",
"דצמבר",
]
month_abbreviations = [
"",
"ינו׳",
"פבר׳",
"מרץ",
"אפר׳",
"מאי",
"יוני",
"יולי",
"אוג׳",
"ספט׳",
"אוק׳",
"נוב׳",
"דצמ׳",
]
day_names = ["", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שבת", "ראשון"]
day_abbreviations = ["", "ב׳", "ג׳", "ד׳", "ה׳", "ו׳", "ש׳", "א׳"]
def _format_timeframe(self, timeframe, delta):
"""Hebrew couple of <timeframe> aware"""
couple = "2-{}".format(timeframe)
if abs(delta) == 2 and couple in self.timeframes:
return self.timeframes[couple].format(abs(delta))
else:
return self.timeframes[timeframe].format(abs(delta))
class MarathiLocale(Locale):
names = ["mr"]
past = "{0} आधी"
future = "{0} नंतर"
timeframes = {
"now": "सद्य",
"seconds": "सेकंद",
"minute": "एक मिनिट ",
"minutes": "{0} मिनिट ",
"hour": "एक तास",
"hours": "{0} तास",
"day": "एक दिवस",
"days": "{0} दिवस",
"month": "एक महिना ",
"months": "{0} महिने ",
"year": "एक वर्ष ",
"years": "{0} वर्ष ",
}
meridians = {"am": "सकाळ", "pm": "संध्याकाळ", "AM": "सकाळ", "PM": "संध्याकाळ"}
month_names = [
"",
"जानेवारी",
"फेब्रुवारी",
"मार्च",
"एप्रिल",
"मे",
"जून",
"जुलै",
"अॉगस्ट",
"सप्टेंबर",
"अॉक्टोबर",
"नोव्हेंबर",
"डिसेंबर",
]
month_abbreviations = [
"",
"जान",
"फेब्रु",
"मार्च",
"एप्रि",
"मे",
"जून",
"जुलै",
"अॉग",
"सप्टें",
"अॉक्टो",
"नोव्हें",
"डिसें",
]
day_names = [
"",
"सोमवार",
"मंगळवार",
"बुधवार",
"गुरुवार",
"शुक्रवार",
"शनिवार",
"रविवार",
]
day_abbreviations = ["", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि", "रवि"]
def _map_locales():
locales = {}
for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if issubclass(cls, Locale): # pragma: no branch
for name in cls.names:
locales[name.lower()] = cls
return locales
class CatalanLocale(Locale):
names = ["ca", "ca_es", "ca_ad", "ca_fr", "ca_it"]
past = "Fa {0}"
future = "En {0}"
timeframes = {
"now": "Ara mateix",
"seconds": "segons",
"minute": "1 minut",
"minutes": "{0} minuts",
"hour": "una hora",
"hours": "{0} hores",
"day": "un dia",
"days": "{0} dies",
"month": "un mes",
"months": "{0} mesos",
"year": "un any",
"years": "{0} anys",
}
month_names = [
"",
"Gener",
"Febrer",
"Març",
"Abril",
"Maig",
"Juny",
"Juliol",
"Agost",
"Setembre",
"Octubre",
"Novembre",
"Desembre",
]
month_abbreviations = [
"",
"Gener",
"Febrer",
"Març",
"Abril",
"Maig",
"Juny",
"Juliol",
"Agost",
"Setembre",
"Octubre",
"Novembre",
"Desembre",
]
day_names = [
"",
"Dilluns",
"Dimarts",
"Dimecres",
"Dijous",
"Divendres",
"Dissabte",
"Diumenge",
]
day_abbreviations = [
"",
"Dilluns",
"Dimarts",
"Dimecres",
"Dijous",
"Divendres",
"Dissabte",
"Diumenge",
]
class BasqueLocale(Locale):
names = ["eu", "eu_eu"]
past = "duela {0}"
future = "{0}" # I don't know what's the right phrase in Basque for the future.
timeframes = {
"now": "Orain",
"seconds": "segundu",
"minute": "minutu bat",
"minutes": "{0} minutu",
"hour": "ordu bat",
"hours": "{0} ordu",
"day": "egun bat",
"days": "{0} egun",
"month": "hilabete bat",
"months": "{0} hilabet",
"year": "urte bat",
"years": "{0} urte",
}
month_names = [
"",
"urtarrilak",
"otsailak",
"martxoak",
"apirilak",
"maiatzak",
"ekainak",
"uztailak",
"abuztuak",
"irailak",
"urriak",
"azaroak",
"abenduak",
]
month_abbreviations = [
"",
"urt",
"ots",
"mar",
"api",
"mai",
"eka",
"uzt",
"abu",
"ira",
"urr",
"aza",
"abe",
]
day_names = [
"",
"astelehena",
"asteartea",
"asteazkena",
"osteguna",
"ostirala",
"larunbata",
"igandea",
]
day_abbreviations = ["", "al", "ar", "az", "og", "ol", "lr", "ig"]
class HungarianLocale(Locale):
names = ["hu", "hu_hu"]
past = "{0} ezelőtt"
future = "{0} múlva"
timeframes = {
"now": "éppen most",
"seconds": {"past": "másodpercekkel", "future": "pár másodperc"},
"minute": {"past": "egy perccel", "future": "egy perc"},
"minutes": {"past": "{0} perccel", "future": "{0} perc"},
"hour": {"past": "egy órával", "future": "egy óra"},
"hours": {"past": "{0} órával", "future": "{0} óra"},
"day": {"past": "egy nappal", "future": "egy nap"},
"days": {"past": "{0} nappal", "future": "{0} nap"},
"month": {"past": "egy hónappal", "future": "egy hónap"},
"months": {"past": "{0} hónappal", "future": "{0} hónap"},
"year": {"past": "egy évvel", "future": "egy év"},
"years": {"past": "{0} évvel", "future": "{0} év"},
}
month_names = [
"",
"január",
"február",
"március",
"április",
"május",
"június",
"július",
"augusztus",
"szeptember",
"október",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"febr",
"márc",
"ápr",
"máj",
"jún",
"júl",
"aug",
"szept",
"okt",
"nov",
"dec",
]
day_names = [
"",
"hétfő",
"kedd",
"szerda",
"csütörtök",
"péntek",
"szombat",
"vasárnap",
]
day_abbreviations = ["", "hét", "kedd", "szer", "csüt", "pént", "szom", "vas"]
meridians = {"am": "de", "pm": "du", "AM": "DE", "PM": "DU"}
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
if isinstance(form, dict):
if delta > 0:
form = form["future"]
else:
form = form["past"]
return form.format(abs(delta))
class EsperantoLocale(Locale):
names = ["eo", "eo_xx"]
past = "antaŭ {0}"
future = "post {0}"
timeframes = {
"now": "nun",
"seconds": "kelkaj sekundoj",
"minute": "unu minuto",
"minutes": "{0} minutoj",
"hour": "un horo",
"hours": "{0} horoj",
"day": "unu tago",
"days": "{0} tagoj",
"month": "unu monato",
"months": "{0} monatoj",
"year": "unu jaro",
"years": "{0} jaroj",
}
month_names = [
"",
"januaro",
"februaro",
"marto",
"aprilo",
"majo",
"junio",
"julio",
"aŭgusto",
"septembro",
"oktobro",
"novembro",
"decembro",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aŭg",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"lundo",
"mardo",
"merkredo",
"ĵaŭdo",
"vendredo",
"sabato",
"dimanĉo",
]
day_abbreviations = ["", "lun", "mar", "mer", "ĵaŭ", "ven", "sab", "dim"]
meridians = {"am": "atm", "pm": "ptm", "AM": "ATM", "PM": "PTM"}
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=a))a)"
def _ordinal_number(self, n):
return "{}a".format(n)
class ThaiLocale(Locale):
names = ["th", "th_th"]
past = "{0}{1}ที่ผ่านมา"
future = "ในอีก{1}{0}"
timeframes = {
"now": "ขณะนี้",
"seconds": "ไม่กี่วินาที",
"minute": "1 นาที",
"minutes": "{0} นาที",
"hour": "1 ชั่วโมง",
"hours": "{0} ชั่วโมง",
"day": "1 วัน",
"days": "{0} วัน",
"month": "1 เดือน",
"months": "{0} เดือน",
"year": "1 ปี",
"years": "{0} ปี",
}
month_names = [
"",
"มกราคม",
"กุมภาพันธ์",
"มีนาคม",
"เมษายน",
"พฤษภาคม",
"มิถุนายน",
"กรกฎาคม",
"สิงหาคม",
"กันยายน",
"ตุลาคม",
"พฤศจิกายน",
"ธันวาคม",
]
month_abbreviations = [
"",
"ม.ค.",
"ก.พ.",
"มี.ค.",
"เม.ย.",
"พ.ค.",
"มิ.ย.",
"ก.ค.",
"ส.ค.",
"ก.ย.",
"ต.ค.",
"พ.ย.",
"ธ.ค.",
]
day_names = ["", "จันทร์", "อังคาร", "พุธ", "พฤหัสบดี", "ศุกร์", "เสาร์", "อาทิตย์"]
day_abbreviations = ["", "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"]
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
BE_OFFSET = 543
def year_full(self, year):
"""Thai always use Buddhist Era (BE) which is CE + 543"""
year += self.BE_OFFSET
return "{:04d}".format(year)
def year_abbreviation(self, year):
"""Thai always use Buddhist Era (BE) which is CE + 543"""
year += self.BE_OFFSET
return "{:04d}".format(year)[2:]
def _format_relative(self, humanized, timeframe, delta):
"""Thai normally doesn't have any space between words"""
if timeframe == "now":
return humanized
space = "" if timeframe == "seconds" else " "
direction = self.past if delta < 0 else self.future
return direction.format(humanized, space)
class BengaliLocale(Locale):
names = ["bn", "bn_bd", "bn_in"]
past = "{0} আগে"
future = "{0} পরে"
timeframes = {
"now": "এখন",
"seconds": "সেকেন্ড",
"minute": "এক মিনিট",
"minutes": "{0} মিনিট",
"hour": "এক ঘণ্টা",
"hours": "{0} ঘণ্টা",
"day": "এক দিন",
"days": "{0} দিন",
"month": "এক মাস",
"months": "{0} মাস ",
"year": "এক বছর",
"years": "{0} বছর",
}
meridians = {"am": "সকাল", "pm": "বিকাল", "AM": "সকাল", "PM": "বিকাল"}
month_names = [
"",
"জানুয়ারি",
"ফেব্রুয়ারি",
"মার্চ",
"এপ্রিল",
"মে",
"জুন",
"জুলাই",
"আগস্ট",
"সেপ্টেম্বর",
"অক্টোবর",
"নভেম্বর",
"ডিসেম্বর",
]
month_abbreviations = [
"",
"জানু",
"ফেব",
"মার্চ",
"এপ্রি",
"মে",
"জুন",
"জুল",
"অগা",
"সেপ্ট",
"অক্টো",
"নভে",
"ডিসে",
]
day_names = [
"",
"সোমবার",
"মঙ্গলবার",
"বুধবার",
"বৃহস্পতিবার",
"শুক্রবার",
"শনিবার",
"রবিবার",
]
day_abbreviations = ["", "সোম", "মঙ্গল", "বুধ", "বৃহঃ", "শুক্র", "শনি", "রবি"]
def _ordinal_number(self, n):
if n > 10 or n == 0:
return "{}তম".format(n)
if n in [1, 5, 7, 8, 9, 10]:
return "{}ম".format(n)
if n in [2, 3]:
return "{}য়".format(n)
if n == 4:
return "{}র্থ".format(n)
if n == 6:
return "{}ষ্ঠ".format(n)
class RomanshLocale(Locale):
names = ["rm", "rm_ch"]
past = "avant {0}"
future = "en {0}"
timeframes = {
"now": "en quest mument",
"seconds": "secundas",
"minute": "ina minuta",
"minutes": "{0} minutas",
"hour": "in'ura",
"hours": "{0} ura",
"day": "in di",
"days": "{0} dis",
"month": "in mais",
"months": "{0} mais",
"year": "in onn",
"years": "{0} onns",
}
month_names = [
"",
"schaner",
"favrer",
"mars",
"avrigl",
"matg",
"zercladur",
"fanadur",
"avust",
"settember",
"october",
"november",
"december",
]
month_abbreviations = [
"",
"schan",
"fav",
"mars",
"avr",
"matg",
"zer",
"fan",
"avu",
"set",
"oct",
"nov",
"dec",
]
day_names = [
"",
"glindesdi",
"mardi",
"mesemna",
"gievgia",
"venderdi",
"sonda",
"dumengia",
]
day_abbreviations = ["", "gli", "ma", "me", "gie", "ve", "so", "du"]
class SwissLocale(Locale):
names = ["de", "de_ch"]
past = "vor {0}"
future = "in {0}"
timeframes = {
"now": "gerade eben",
"seconds": "Sekunden",
"minute": "einer Minute",
"minutes": "{0} Minuten",
"hour": "einer Stunde",
"hours": "{0} Stunden",
"day": "einem Tag",
"days": "{0} Tagen",
"week": "einer Woche",
"weeks": "{0} Wochen",
"month": "einem Monat",
"months": "{0} Monaten",
"year": "einem Jahr",
"years": "{0} Jahren",
}
month_names = [
"",
"Januar",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mär",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dez",
]
day_names = [
"",
"Montag",
"Dienstag",
"Mittwoch",
"Donnerstag",
"Freitag",
"Samstag",
"Sonntag",
]
day_abbreviations = ["", "Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"]
class RomanianLocale(Locale):
names = ["ro", "ro_ro"]
past = "{0} în urmă"
future = "peste {0}"
timeframes = {
"now": "acum",
"seconds": "câteva secunde",
"minute": "un minut",
"minutes": "{0} minute",
"hour": "o oră",
"hours": "{0} ore",
"day": "o zi",
"days": "{0} zile",
"month": "o lună",
"months": "{0} luni",
"year": "un an",
"years": "{0} ani",
}
month_names = [
"",
"ianuarie",
"februarie",
"martie",
"aprilie",
"mai",
"iunie",
"iulie",
"august",
"septembrie",
"octombrie",
"noiembrie",
"decembrie",
]
month_abbreviations = [
"",
"ian",
"febr",
"mart",
"apr",
"mai",
"iun",
"iul",
"aug",
"sept",
"oct",
"nov",
"dec",
]
day_names = [
"",
"luni",
"marți",
"miercuri",
"joi",
"vineri",
"sâmbătă",
"duminică",
]
day_abbreviations = ["", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm", "Dum"]
class SlovenianLocale(Locale):
names = ["sl", "sl_si"]
past = "pred {0}"
future = "čez {0}"
timeframes = {
"now": "zdaj",
"seconds": "sekund",
"minute": "minuta",
"minutes": "{0} minutami",
"hour": "uro",
"hours": "{0} ur",
"day": "dan",
"days": "{0} dni",
"month": "mesec",
"months": "{0} mesecev",
"year": "leto",
"years": "{0} let",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
month_names = [
"",
"Januar",
"Februar",
"Marec",
"April",
"Maj",
"Junij",
"Julij",
"Avgust",
"September",
"Oktober",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"Maj",
"Jun",
"Jul",
"Avg",
"Sep",
"Okt",
"Nov",
"Dec",
]
day_names = [
"",
"Ponedeljek",
"Torek",
"Sreda",
"Četrtek",
"Petek",
"Sobota",
"Nedelja",
]
day_abbreviations = ["", "Pon", "Tor", "Sre", "Čet", "Pet", "Sob", "Ned"]
class IndonesianLocale(Locale):
names = ["id", "id_id"]
past = "{0} yang lalu"
future = "dalam {0}"
timeframes = {
"now": "baru saja",
"seconds": "detik",
"minute": "1 menit",
"minutes": "{0} menit",
"hour": "1 jam",
"hours": "{0} jam",
"day": "1 hari",
"days": "{0} hari",
"month": "1 bulan",
"months": "{0} bulan",
"year": "1 tahun",
"years": "{0} tahun",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
month_names = [
"",
"Januari",
"Februari",
"Maret",
"April",
"Mei",
"Juni",
"Juli",
"Agustus",
"September",
"Oktober",
"November",
"Desember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"Mei",
"Jun",
"Jul",
"Ags",
"Sept",
"Okt",
"Nov",
"Des",
]
day_names = ["", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu", "Minggu"]
day_abbreviations = [
"",
"Senin",
"Selasa",
"Rabu",
"Kamis",
"Jumat",
"Sabtu",
"Minggu",
]
class NepaliLocale(Locale):
names = ["ne", "ne_np"]
past = "{0} पहिले"
future = "{0} पछी"
timeframes = {
"now": "अहिले",
"seconds": "सेकण्ड",
"minute": "मिनेट",
"minutes": "{0} मिनेट",
"hour": "एक घण्टा",
"hours": "{0} घण्टा",
"day": "एक दिन",
"days": "{0} दिन",
"month": "एक महिना",
"months": "{0} महिना",
"year": "एक बर्ष",
"years": "बर्ष",
}
meridians = {"am": "पूर्वाह्न", "pm": "अपरान्ह", "AM": "पूर्वाह्न", "PM": "अपरान्ह"}
month_names = [
"",
"जनवरी",
"फेब्रुअरी",
"मार्च",
"एप्रील",
"मे",
"जुन",
"जुलाई",
"अगष्ट",
"सेप्टेम्बर",
"अक्टोबर",
"नोवेम्बर",
"डिसेम्बर",
]
month_abbreviations = [
"",
"जन",
"फेब",
"मार्च",
"एप्रील",
"मे",
"जुन",
"जुलाई",
"अग",
"सेप",
"अक्ट",
"नोव",
"डिस",
]
day_names = [
"",
"सोमवार",
"मंगलवार",
"बुधवार",
"बिहिवार",
"शुक्रवार",
"शनिवार",
"आइतवार",
]
day_abbreviations = ["", "सोम", "मंगल", "बुध", "बिहि", "शुक्र", "शनि", "आइत"]
class EstonianLocale(Locale):
names = ["ee", "et"]
past = "{0} tagasi"
future = "{0} pärast"
timeframes = {
"now": {"past": "just nüüd", "future": "just nüüd"},
"second": {"past": "üks sekund", "future": "ühe sekundi"},
"seconds": {"past": "{0} sekundit", "future": "{0} sekundi"},
"minute": {"past": "üks minut", "future": "ühe minuti"},
"minutes": {"past": "{0} minutit", "future": "{0} minuti"},
"hour": {"past": "tund aega", "future": "tunni aja"},
"hours": {"past": "{0} tundi", "future": "{0} tunni"},
"day": {"past": "üks päev", "future": "ühe päeva"},
"days": {"past": "{0} päeva", "future": "{0} päeva"},
"month": {"past": "üks kuu", "future": "ühe kuu"},
"months": {"past": "{0} kuud", "future": "{0} kuu"},
"year": {"past": "üks aasta", "future": "ühe aasta"},
"years": {"past": "{0} aastat", "future": "{0} aasta"},
}
month_names = [
"",
"Jaanuar",
"Veebruar",
"Märts",
"Aprill",
"Mai",
"Juuni",
"Juuli",
"August",
"September",
"Oktoober",
"November",
"Detsember",
]
month_abbreviations = [
"",
"Jan",
"Veb",
"Mär",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dets",
]
day_names = [
"",
"Esmaspäev",
"Teisipäev",
"Kolmapäev",
"Neljapäev",
"Reede",
"Laupäev",
"Pühapäev",
]
day_abbreviations = ["", "Esm", "Teis", "Kolm", "Nelj", "Re", "Lau", "Püh"]
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
if delta > 0:
form = form["future"]
else:
form = form["past"]
return form.format(abs(delta))
_locales = _map_locales()
|
py | 1a5068aa8f2841531113ccb78811f79dcbf18fa9 | # Copyright 2020 The MuLT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from optimization import LightGBMOptimizer, SVMOptimizer, LogisticRegressionOptimizer
from optimization import KNNOptimizer, MLPOptimizer, RFOptimizer
from pipeline import SelectMarker
from lightgbm import LGBMModel
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import os
class SMLA(SelectMarker):
def __init__(self,
predictor,
optimizer_default_params=None,
model_default_params=None,
verbose=-1,
random_state=None,
use_gpu=False,
test_size=.2,
n_gene_limit=None,
output_path='.',
experiment_number=1,
number_of_experiments=1,
export_metadata=True
):
assert isinstance(optimizer_default_params, dict) or optimizer_default_params is None
assert isinstance(model_default_params, dict) or model_default_params is None
self.output_path = output_path
#
self.predictor = predictor
self.model_default_params = model_default_params
#
self.optimized_params = None
self.optimizer_default_params = optimizer_default_params
#
self.model = None
self.fitted_shape = None
#
self.random_state = random_state
self.verbose = verbose
self.use_gpu = use_gpu
#
self.test_size = test_size
#
self.scaler = MinMaxScaler()
#
self.n_gene_limit = n_gene_limit
self.selected_clinical = None
self.selected_genes = None
#
self.experiment_number = experiment_number
self.number_of_experiments = number_of_experiments
self.export_metadata = export_metadata
#
if self.predictor == 'lightgbm':
self.optimizer = LightGBMOptimizer(**self.optimizer_default_params)
elif self.predictor == 'svm':
self.optimizer = SVMOptimizer(**self.optimizer_default_params)
elif self.predictor == 'knn':
self.optimizer = KNNOptimizer(**self.optimizer_default_params)
elif self.predictor == 'lr':
self.optimizer = LogisticRegressionOptimizer(**self.optimizer_default_params)
elif self.predictor == 'mlp':
self.optimizer = MLPOptimizer(**self.optimizer_default_params)
elif self.predictor == 'rf':
self.optimizer = RFOptimizer(**self.optimizer_default_params)
else:
raise ValueError('predictor should be one of the following: '
'lightgbm, svm, knn, lr, or mlp')
for subdir in ['selected_markers']:
path = os.path.join(self.output_path, subdir)
if not os.path.exists(path):
os.makedirs(path)
def fit(self,
genes,
outcome,
clinical_markers=None, treatments=None,
clinical_marker_selection_threshold=0.1,
genes_marker_selection_threshold=0.1,
early_stopping_rounds=100):
# feature selection
if clinical_markers is not None:
self.selected_clinical = self.select_markers(
clinical_markers, outcome, threshold=clinical_marker_selection_threshold)
x = clinical_markers.loc[:, self.selected_clinical[0]]
if treatments is not None:
x = x.join(treatments) if x is not None else treatments
self.selected_genes = self.select_markers(
genes, outcome, threshold=genes_marker_selection_threshold, random_state=self.random_state)
if self.export_metadata:
if self.selected_clinical is not None:
pd.DataFrame({'clinical_marker': self.selected_clinical[0],
'pvalue': self.selected_clinical[1],
'entropy': self.selected_clinical[2]}).to_csv(
os.path.join(
self.output_path, 'selected_markers',
'clinical_{0:03}_{1:03}.csv'.format(
self.experiment_number, self.number_of_experiments)),
index=False)
pd.DataFrame({'gene': self.selected_genes[0],
'pvalue': self.selected_genes[1],
'entropy': self.selected_genes[2]}).to_csv(
os.path.join(
self.output_path, 'selected_markers',
'genes_{0:03}_{1:03}.csv'.format(
self.experiment_number, self.number_of_experiments)),
index=False)
genes = genes.loc[:, self.selected_genes[0]]
# join data sets
x = x.join(genes, how='inner').fillna(0).values if x is not None else genes.fillna(0).values
y = outcome.values
x = self.scaler.fit_transform(x)
######
self.fitted_shape = x.shape
self.optimized_params = self.optimizer.optimize(x, y)
self.optimized_params['random_state'] = self.random_state
self.optimized_params['n_jobs'] = -1
if self.model_default_params is not None:
self.optimized_params.update(self.model_default_params)
if self.predictor == 'lightgbm':
self.fit_lightgbm(x, y, early_stopping_rounds)
elif self.predictor == 'svm':
self.fit_svm(x, y)
elif self.predictor == 'knn':
self.fit_knn(x, y)
elif self.predictor == 'lr':
self.fit_lr(x, y)
elif self.predictor == 'mlp':
self.fit_mlp(x, y, early_stopping_rounds)
elif self.predictor == 'rf':
self.fit_rf(x, y)
def fit_rf(self, x, y):
self.model = RandomForestClassifier(**self.optimized_params)
self.model.fit(x, y)
def fit_lightgbm(self, x, y, early_stopping_rounds):
self.model = LGBMModel(**self.optimized_params)
self.model.fit(x, y)
if early_stopping_rounds is not None and early_stopping_rounds > 0:
x_train, x_valid, y_train, y_valid = train_test_split(x, y, stratify=y, shuffle=True,
test_size=self.test_size, random_state=self.random_state)
self.model.fit(x_train, y_train, eval_set=[(x_valid, y_valid)], verbose=self.verbose)
else:
self.model.fit(x, y)
def fit_svm(self, x, y):
del self.optimized_params['n_jobs']
self.model = SVC(**self.optimized_params, probability=True)
self.model.fit(x, y)
def fit_lr(self, x, y):
self.model = LogisticRegression(**self.optimized_params)
self.model.fit(x, y)
def fit_mlp(self, x, y, early_stopping_rounds):
esr = early_stopping_rounds is not None and early_stopping_rounds > 0
del self.optimized_params['n_jobs']
self.model = MLPClassifier(**self.optimized_params,
early_stopping=esr,
validation_fraction=self.test_size)
self.model.fit(x, y)
def fit_knn(self, x, y):
del self.optimized_params['random_state']
self.model = KNeighborsClassifier(**self.optimized_params)
self.model.fit(x, y)
def predict(self, genes, clinical_markers=None, treatments=None):
assert isinstance(genes, pd.DataFrame), 'genes should a pd.DataFrame'
if clinical_markers is not None:
x = clinical_markers.loc[:, self.selected_clinical[0]]
if treatments is not None:
x = x.join(treatments) if x is not None else treatments
genes = genes.loc[:, self.selected_genes[0]]
x = x.join(genes, how='inner').fillna(0).values if x is not None else genes.fillna(0)
x = np.maximum(0, np.minimum(1, self.scaler.transform(x)))
assert x.shape[1] == self.fitted_shape[1], \
'new data should have same number of features used to fit model'
if self.predictor == 'lightgbm':
result = self.model.predict(x)
else:
result = self.model.predict_proba(x)
if len(result.shape) > 1:
result = result[:, -1]
return result
|
py | 1a506b1b56cac2bacd0a0508a58dc18706262fd5 | """
Программа для построения интегральных кривых дифференциального уравнения 3-го порядка,
разрешенного относительно производной y''' = f(x, y, y', y'').
Левая кнопка мыши - зафиксировать начальное условие или зафиксировать интегральную кривую.
Правая кнопка мыши - сменить началные условия.
"""
import matplotlib
matplotlib.use('TkAgg')
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from scipy.integrate import ode
def f(x, y):
""" Правая часть дифференциального уравнения y'''=(x, y, y', y'')
Здесь y <--> y[0]; y' <--> y[1]; y'' <--> y[2]
"""
return [y[1], y[2], x + y[0] + y[1]+ y[2]]
def on_move(event):
global x0, y0, x1, y1, dy0
if not event.xdata or not event.ydata: # выход курсора за пределы области
line.set_data([], [])
dot.set_data([], [])
tang.set_data([], [])
circ.set_radius(0)
ax.set_title("")
fig.canvas.draw_idle()
return
if x0 is None: # инициализация 1-го начального условия
dot.set_data([event.xdata], [event.ydata])
ax.set_title(f"y({event.xdata:.2f})={event.ydata:.2f}")
if event.button == 1:
x0 = event.xdata
y0 = event.ydata
elif x1 is None: # инициализация 2-го начального условия
# восстановление доп. построений, если они были удалены при выходе
# мыши за пределы области
dot.set_data([x0], [y0])
tang.set_data([2*x0 - event.xdata, event.xdata], [2*y0 - event.ydata, event.ydata])
delta_x = event.xdata - x0
delta_y = event.ydata - y0
if delta_x == 0: # деление на ноль запрещено
return
ax.set_title(f"y({x0:.2f})={y0:.2f}, y'({x0:.2f})={delta_y/delta_x:.2f}")
if event.button == 1:
x1 = event.xdata
y1 = event.ydata
dy0 = delta_y / delta_x
else: # инициализация 3-го начального условия и построение интегральной кривой
x2 = event.xdata
y2 = event.ydata
# восстановление доп. построений, если они были удалены при выходе
# мыши за пределы области
dot.set_data([x0], [y0])
tang.set_data([2*x0 - x1, x1], [2*y0 - y1, y1])
if dy0 == 0:
x_c = x0
y_c = 0.5*(y2+y0+(x2-x0)**2/(y2-y0))
R = abs(y0-y_c)
else:
# Угловой коэффициент прямой, на кот. должен лежать центр окр.
k = -1/dy0
# Координаты центра окружности
x_c = .5 * (y2**2 - y0**2 + x2**2 - x0**2 + 2 * (y0 - k*x0) * (y0 - y2)) / (k*(y2 - y0) + x2 - x0)
y_c = k * (x_c - x0) + y0
# Радиус окружности
R = np.hypot(y0 - y_c, x0 - x_c)
# Отрисовка окружности
circ.center = (x_c, y_c)
circ.set_radius(R)
# Начальное значение 2ой производной
d2y0 = (1+dy0**2)**(3/2)/R*np.sign(y_c-y0)
ax.set_title(f"y({x0:.2f})={y0:.2f}, y'({x0:.2f})={dy0:.2f}, y''({x0:.2f})={d2y0:.2f}")
de = ode(f)
de.set_integrator('dop853')
# de.set_integrator('zvode', method='bdf')
dt = 0.05
sol = []
de.set_initial_value([y0, dy0, d2y0], x0)
while de.successful() and de.t <= xlim.end:
de.integrate(de.t + dt)
sol.append((de.t, de.y[0]))
de.set_initial_value([y0, dy0, d2y0], x0)
while de.successful() and de.t >= xlim.start:
de.integrate(de.t - dt)
sol.append((de.t, de.y[0]))
sol.sort(key=lambda x: x[0])
sol = list(zip(*sol))
if event.button == 1: # зафиксировать интегральную кривую
ax.plot(sol[0], sol[1], 'r')
elif event.button == 3: # сменить начальную точку
x0 = event.xdata
y0 = event.ydata
x1 = None
y1 = None
dy0 = None
dot.set_data([x0], [y0])
tang.set_data([], [])
circ.set_radius(0)
line.set_data([], [])
ax.set_title(f"y({x0:.2f})={y0:.2f}")
else: # текущая интегральная кривая
line.set_data(sol[0], sol[1])
print(f"y''({x0:.2f})={d2y0:.2f}")
fig.canvas.draw_idle()
Lim = namedtuple('Lim', ['start', 'end'])
xlim = Lim(-5, 5)
ylim = Lim(-5, 5)
x0 = None
y0 = None
x1 = None
y1 = None
dy0 = None
fig, ax = plt.subplots()
ax.grid()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal')
ax.hlines(0, *xlim, lw=0.5)
ax.vlines(0, *ylim, lw=0.5)
fig.canvas.mpl_connect('button_press_event', on_move)
fig.canvas.mpl_connect('motion_notify_event', on_move)
line, = ax.plot([], [], 'm')
dot, = ax.plot([], [], '.m')
tang, = ax.plot([], [], 'g', lw=0.5)
circ = Circle((0, 0), 0, color='g', lw=0.5, fill=False)
ax.add_patch(circ)
plt.show()
|
py | 1a506ce907cc18af49447b92f3f28d1737b9d769 | import _x64dbg
def _plugin_logprintf(text='', *args):
_x64dbg._plugin_logprintf(text % args)
def _plugin_logputs(text=''):
_plugin_logprintf('%s\n' % text)
|
py | 1a506d0e6570d18481953003f181d0c3055c467f | from typing import List
from pyrep.objects.dummy import Dummy
from pyrep.objects.joint import Joint
from rlbench.backend.task import Task
from rlbench.backend.conditions import JointCondition
OPTIONS = ['left', 'right']
class TurnTap(Task):
def init_task(self) -> None:
self.left_start = Dummy('waypoint0')
self.left_end = Dummy('waypoint1')
self.right_start = Dummy('waypoint5')
self.right_end = Dummy('waypoint6')
self.left_joint = Joint('left_joint')
self.right_joint = Joint('right_joint')
def init_episode(self, index: int) -> List[str]:
option = OPTIONS[index]
if option == 'right':
self.left_start.set_position(self.right_start.get_position())
self.left_start.set_orientation(self.right_start.get_orientation())
self.left_end.set_position(self.right_end.get_position())
self.left_end.set_orientation(self.right_end.get_orientation())
self.register_success_conditions(
[JointCondition(self.right_joint, 1.57)])
else:
self.register_success_conditions(
[JointCondition(self.left_joint, 1.57)])
return ['turn %s tap' % option,
'rotate the %s tap' % option,
'grasp the %s tap and turn it' % option]
def variation_count(self) -> int:
return 2
|
py | 1a506d7336bd2be1617b16314b9704f08b53643a | import glob
import os
import ast
import sys
import json
from collections import Counter
sys.setrecursionlimit(1000000)
CODE_DIR = "python_top_code"
OUT_DIR = "stats"
def make_dir_ignore_exists(d):
try:
return os.mkdir(d)
except FileExistsError as E:
pass
def decode_data(data):
try:
data = data.decode("utf8")
return data
except UnicodeDecodeError:
pass
data = data.decode("ISO-8859-1")
return data
def gen_repo_asts(repo):
ok = 0
bad = 0
for file in glob.glob(f"{CODE_DIR}/{repo}/*.py"):
data = open(file, "rb").read()
data = decode_data(data)
if "generated" in data[:1024]:
print(f"skipping file {file}: autogenerated")
continue
try:
yield ast.parse(data)
ok += 1
except Exception:
bad += 1
print(f"ast generation finished: ok {ok}, bad {bad}")
def gen_ast_subnodes(ast_node):
for child in ast.iter_child_nodes(ast_node):
yield child
yield from gen_ast_subnodes(child)
def save_counter(repo, filename, counter):
make_dir_ignore_exists(OUT_DIR)
make_dir_ignore_exists(f"{OUT_DIR}/{repo}")
with open(f"{OUT_DIR}/{repo}/{filename}", "w") as f:
for k, v in counter.most_common():
print(v, k, file=f)
c = Counter()
class_keywords_c = Counter()
class_bases_c = Counter()
class_decorators_c = Counter()
function_decorators_c = Counter()
async_function_decorators_c = Counter()
exception_handlers_c = Counter()
attributes_c = Counter()
func_names_c = Counter()
async_func_names_c = Counter()
class_names_c = Counter()
module_names_c = Counter()
from_module_names_c = Counter()
repo = sys.argv[1].replace("/", "_")
for cur_ast in gen_repo_asts(repo):
for ast_node in gen_ast_subnodes(cur_ast):
name = type(ast_node).__name__
c[name] += 1
if isinstance(ast_node, ast.For):
if ast_node.orelse:
c["bay_for_with_else"] += 1
elif isinstance(ast_node, ast.While):
if ast_node.orelse:
c["bay_while_with_else"] += 1
elif isinstance(ast_node, ast.ClassDef):
has_metaclass = False
for keyword in ast_node.keywords:
if keyword.arg == "metaclass":
has_metaclass = True
class_keywords_c[f"{keyword.arg}={ast.unparse(keyword.value)}"] += 1
if has_metaclass:
c["bay_class_with_metaclass"] += 1
bases = [ast.unparse(b) for b in ast_node.bases]
if bases and bases != ["object"]:
c["bay_class_with_bases"] += 1
if bases:
class_bases_c.update(bases)
else:
class_bases_c["<no_base_class>"] += 1
decorators = [ast.unparse(b) for b in ast_node.decorator_list]
if decorators:
c["bay_class_with_decorators"] += 1
if decorators:
class_decorators_c.update(decorators)
else:
class_decorators_c["<no_decorators>"] += 1
if ast.get_docstring(ast_node):
c["bay_class_with_docstring"] += 1
class_names_c[ast_node.name] += 1
elif isinstance(ast_node, ast.Try):
has_handlers = ast_node.handlers
has_final = ast_node.finalbody
has_else = ast_node.orelse
except_type = "bay_try"
if has_handlers:
except_type += "_except"
if has_final:
except_type += "_finally"
if has_else:
except_type += "_else"
c[except_type] += 1
elif isinstance(ast_node, ast.FunctionDef):
decorators = [ast.unparse(b) for b in ast_node.decorator_list]
if decorators:
c["bay_functions_with_decorators"] += 1
if decorators:
function_decorators_c.update(decorators)
else:
function_decorators_c["<no_decorators>"] += 1
if ast.get_docstring(ast_node):
c["bay_functions_with_docstring"] += 1
func_names_c[ast_node.name] += 1
if ast_node.returns:
c["bay_functions_annotation_in_returns"] += 1
elif isinstance(ast_node, ast.AsyncFunctionDef):
decorators = [ast.unparse(b) for b in ast_node.decorator_list]
if decorators:
c["bay_async_functions_with_decorators"] += 1
if decorators:
async_function_decorators_c.update(decorators)
else:
async_function_decorators_c["<no_decorators>"] += 1
if ast.get_docstring(ast_node):
c["bay_async_functions_with_docstring"] += 1
async_func_names_c[ast_node.name] += 1
if ast_node.returns:
c["bay_async_functions_annotation_in_returns"] += 1
elif isinstance(ast_node, ast.Assign):
if isinstance(ast_node.value, ast.Yield):
c["bay_assign_yield"] += 1
if isinstance(ast_node.value, ast.YieldFrom):
c["bay_assign_yield_from"] += 1
elif isinstance(ast_node, ast.ExceptHandler):
except_type = ast_node.type
try:
exception_handlers_c[ast.unparse(except_type)] += 1
except Exception:
pass
elif isinstance(ast_node, ast.Attribute):
attributes_c[ast_node.attr] += 1
elif isinstance(ast_node, ast.Import):
modules = [ast.unparse(b) for b in ast_node.names]
module_names_c.update(modules)
elif isinstance(ast_node, ast.ImportFrom):
from_module_names_c[ast_node.module] += 1
elif isinstance(ast_node, ast.arg):
if ast_node.annotation:
c["bay_arg_annotation"] += 1
save_counter(repo, "stat_ast.txt", c)
save_counter(repo, "stat_class_keywords.txt", class_keywords_c)
save_counter(repo, "stat_class_bases.txt", class_bases_c)
save_counter(repo, "stat_class_decorators_c.txt", class_decorators_c)
save_counter(repo, "stat_function_decorators_c.txt", function_decorators_c)
save_counter(repo, "stat_async_function_decorators.txt", async_function_decorators_c)
save_counter(repo, "stat_exception_handlers.txt", exception_handlers_c)
save_counter(repo, "stat_attributes.txt", attributes_c)
save_counter(repo, "stat_func_names.txt", func_names_c)
save_counter(repo, "stat_async_func_names.txt", async_func_names_c)
save_counter(repo, "stat_class_names.txt", class_names_c)
save_counter(repo, "stat_module_names.txt", module_names_c)
save_counter(repo, "stat_from_module_names.txt", from_module_names_c)
|
py | 1a506dd488bece9eb4feb69a5a865dbbef76e646 | # qubit number=2
# total number=10
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[0],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=5
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.x(input_qubit[0]) # number=3
prog.y(input_qubit[1]) # number=6
prog.x(input_qubit[0]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class150.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | 1a506ecd619438944adcb4e0d904f0f2200128bb | import logging
import asyncio
from .http_utils import Request, Response
from .exceptions import (
BadRequestException,
NotFoundException,
TimeoutException,
)
TIMEOUT = 5
# 一个 HTTPServer 对象,需要一个 Router 对象和一个 http_parser 模块,并使用它们来初始化
class HTTPServer(object):
"""
Contains objects that are shared by HTTPConnections and schedules async
connections.
:param router: An object that must expose the 'get_handler' interface.
:param http_parser: An object that must expose the 'parse_into' interface,
which works with a Request object and a bytearray.
:param loop: An object that implements the 'asyncio.BaseEventLoop'
interface.
"""
# HTTPServer(self.router, self.http_parser, self.loop)
def __init__(self, router, http_parser, loop):
self.router = router
self.http_parser = http_parser
self.loop = loop
async def handle_connection(self, reader, writer):
"""
Creates and schedules a HTTPConnection given a set (reader, writer)
objects.
:param reader: An object that implements the 'asyncio.StreamReader'
interface.
:param writer: An object that implements the 'asyncio.StreamWriter'
interface.
"""
connection = HTTPConnection(self, reader, writer)
asyncio.ensure_future(connection.handle_request(), loop=self.loop)
'''
HTTPConnection 对象,每一个对象表示一个单独的客户端 HTTP 连接,并且处理其请求-响应周期:
使用 http_parser 模块将收到的字节流解析为一个 Request 对象;
使用一个 Router 实例寻找并调用正确的函数来生成一个响应;
最后将这个响应发送回客户端。
'''
class HTTPConnection(object):
"""
Takes care of whole life cycle of a single TCP connection with a
HTTP client. First reads incoming data, parses it with
'http_server.parser', generates as Response with 'http_server.router'
and sends data back to client.
:param http_server: An instance of HTTPServer.
:param reader: An object that implements the 'asyncio.StreamReader'
interface.
:param writer: An object that implements the 'asyncio.StreamWriter'
interface.
"""
def __init__(self, http_server, reader, writer):
self.router = http_server.router # Router 实例寻找并调用正确的函数来生成一个响应
self.http_parser = http_server.http_parser # http_parser 模块将收到的字节流解析为一个 Request 对象
self.loop = http_server.loop
self._reader = reader
self._writer = writer
self._buffer = bytearray()
self._conn_timeout = None
self.request = Request()
async def handle_request(self):
"""
Reads bytes from a connection and attempts to parse them
incrementally until it can issue a Response and close the
connection.
Also handles resetting the timeout counter for a connection.
"""
try:
while not self.request.finished and not self._reader.at_eof():
data = await self._reader.read(1024)
if data:
self._reset_conn_timeout()
await self.process_data(data)
if self.request.finished:
await self.reply()
elif self._reader.at_eof():
raise BadRequestException()
except (NotFoundException,
BadRequestException) as e:
self.error_reply(e.code, body=Response.reason_phrases[e.code])
except Exception as e:
logging.error(e)
logging.error(e.__traceback__)
self.error_reply(500, body=Response.reason_phrases[500])
self.close_connection()
async def process_data(self, data):
"""
Accumulates data inside of _buffer and attempts to
parse the accumulated data.
:param data: A bytearray object.
"""
self._buffer.extend(data)
self._buffer = self.http_parser.parse_into(
self.request, self._buffer)
def close_connection(self):
"""
Cancels the timeout timer and closes the connection.
"""
logging.debug('Closing connection')
self._cancel_conn_timeout()
self._writer.close()
def error_reply(self, code, body=''):
"""
Generates a simple error response.
:param code: Integer signifying the HTTP error.
:param body: A string that contains an error message.
"""
response = Response(code=code, body=body)
self._writer.write(response.to_bytes())
self._writer.drain()
async def reply(self):
"""
Obtains and applies the correct handler from 'self.router'
and write the Response back to the client.
"""
logging.debug('Replying to request')
request = self.request
handler = self.router.get_handler(request.path)
response = await handler.handle(request)
if not isinstance(response, Response):
response = Response(code=200, body=response)
self._writer.write(response.to_bytes())
await self._writer.drain()
def _conn_timeout_close(self):
self.error_reply(500, 'timeout')
self.close_connection()
def _reset_conn_timeout(self, timeout=TIMEOUT):
self._cancel_conn_timeout()
self._conn_timeout = self.loop.call_later(
timeout, self._conn_timeout_close)
def _cancel_conn_timeout(self):
if self._conn_timeout:
self._conn_timeout.cancel()
|
py | 1a5071834f3db5b1059edd0ae03cec41eab99d5f | # -*- coding: utf-8 -*-
import logging
from pyramid.interfaces import IRequest
from openregistry.assets.core.includeme import IContentConfigurator
from openregistry.assets.core.interfaces import IAssetManager
from openregistry.assets.basic.models import Asset, IBasicAsset
from openregistry.assets.basic.adapters import BasicAssetConfigurator, BasicAssetManagerAdapter
from openregistry.assets.basic.constants import (
DEFAULT_ASSET_BASIC_TYPE,
DEFAULT_LEVEL_OF_ACCREDITATION
)
LOGGER = logging.getLogger(__name__)
def includeme(config, plugin_config=None):
config.scan("openregistry.assets.basic.views")
config.scan("openregistry.assets.basic.subscribers")
config.registry.registerAdapter(BasicAssetConfigurator,
(IBasicAsset, IRequest),
IContentConfigurator)
config.registry.registerAdapter(BasicAssetManagerAdapter,
(IBasicAsset, ),
IAssetManager)
asset_types = plugin_config.get('aliases', [])
if plugin_config.get('use_default', False):
asset_types.append(DEFAULT_ASSET_BASIC_TYPE)
for at in asset_types:
config.add_assetType(Asset, at)
LOGGER.info("Included openregistry.assets.basic plugin", extra={'MESSAGE_ID': 'included_plugin'})
# add accreditation level
if not plugin_config.get('accreditation'):
config.registry.accreditation['asset'][Asset._internal_type] = DEFAULT_LEVEL_OF_ACCREDITATION
else:
config.registry.accreditation['asset'][Asset._internal_type] = plugin_config['accreditation']
|
py | 1a50720ffaa798318a22c8be25c78bd0510c4b63 | import base64
import os
import shutil
import string
import sys
import tempfile
import unittest
from datetime import timedelta
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import \
SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import \
SessionStore as CookieSession
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import patch_logger
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.six.moves import http_cookies
from .custom_db_backend import SessionStore as CustomDatabaseSession
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and 'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_save_doesnt_clear_data(self):
self.session['a'] = 'b'
self.session.save()
self.assertEqual(self.session['a'], 'b')
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail(
"The session object did not save properly. "
"Middleware may be saving cache items without namespaces."
)
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ''
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = '1234567'
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = '12345678'
self.assertEqual(self.session.session_key, '12345678')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertIn('corrupted', calls[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
if self.backend is CookieSession:
raise unittest.SkipTest("Cookie backend doesn't have an external store to create records in.")
session = self.backend('someunknownkey')
session.load()
self.assertFalse(session.exists(session.session_key))
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, 'someunknownkey')
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
session_engine = 'django.contrib.sessions.backends.db'
@property
def model(self):
return self.backend.get_model_class()
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = self.model.objects.get(session_key=session_key)
self.assertEqual(force_text(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
# Change it
self.model.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, self.model.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, self.model.objects.count())
with override_settings(SESSION_ENGINE=self.session_engine):
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, self.model.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CustomDatabaseSessionTests(DatabaseSessionTests):
backend = CustomDatabaseSession
session_engine = 'sessions_tests.custom_db_backend'
def test_extra_session_field(self):
# Set the account ID to be picked up by a custom session storage
# and saved to a custom session model database column.
self.session['_auth_user_id'] = 42
self.session.save()
# Make sure that the customized create_model_instance() was called.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, 42)
# Make the session "anonymous".
self.session.pop('_auth_user_id')
self.session.save()
# Make sure that save() on an existing session did the right job.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, None)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
self.assertRaises(InvalidCacheBackendError, self.backend)
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(caches['default'].get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(caches['default'].get(self.session.cache_key), None)
self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None)
class SessionMiddlewareTests(TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_delete_on_end(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
@override_settings(SESSION_COOKIE_DOMAIN='.example.local')
def test_session_delete_on_end_with_custom_domain(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header with a custom domain looks like:
# Set-Cookie: sessionid=; Domain=.example.local;
# expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; Domain=.example.local; expires=Thu, '
'01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
def test_flush_empty_without_session_cookie_doesnt_set_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# A cookie should not be set.
self.assertEqual(response.cookies, {})
# The session is accessed so "Vary: Cookie" should be set.
self.assertEqual(response['Vary'], 'Cookie')
def test_empty_session_saved(self):
""""
If a session is emptied of data but still has a key, it should still
be updated.
"""
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Set a session key and some data.
middleware.process_request(request)
request.session['foo'] = 'bar'
# Handle the response through the middleware.
response = middleware.process_response(request, response)
self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),))
# A cookie should be set, along with Vary: Cookie.
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Empty the session data.
del request.session['foo']
# Handle the response through the middleware.
response = HttpResponse('Session test')
response = middleware.process_response(request, response)
self.assertEqual(dict(request.session.values()), {})
session = Session.objects.get(session_key=request.session.session_key)
self.assertEqual(session.get_decoded(), {})
# While the session is empty, it hasn't been flushed so a cookie should
# still be set, along with Vary: Cookie.
self.assertGreater(len(request.session.session_key), 8)
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
|
py | 1a507249b464d2ec9598b07830d9ba846c1aaa2a | from django.contrib import admin
from rest_framework.authtoken.models import Token
class TokenAdmin(admin.ModelAdmin):
list_display = ('key', 'user', 'created')
fields = ('user',)
ordering = ('-created',)
admin.site.register(Token, TokenAdmin)
|
py | 1a5072fa3eeab0be2d8b440af4161b250d47d65b | import fdp
def test_fdp():
nstx = fdp.Nstxu()
assert fdp.__version__ is not None |
py | 1a5074ea609d4afb4b9f3590d051ec7b694740d8 | def math():
i_put = int(input())
if 5 < i_put < 2000:
for i in range(1, i_put+1):
if i % 2 == 0:
print(str(i) + '^2 =', i*i)
if __name__ == '__main__':
math()
|
py | 1a5074ec86b9dc5481e17eb5e5ba0fdc5fe5792a | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import shutil
import sys
def DoMain(argv):
parser = argparse.ArgumentParser(description='Generate forwarding headers.')
parser.add_argument('-i', '--list-inputs', action='store_true',
help='List input files and exit.')
parser.add_argument('-o', '--list-outputs', action='store_true',
help='List output files and exit.')
parser.add_argument('-d', '--dest-dir', type=str,
help=('Output directory for forwarding headers.'))
parser.add_argument('filenames', metavar='filename', type=str, nargs='+',
help='Input filenames.')
args = parser.parse_args(argv)
if args.list_inputs:
return list_inputs(args.filenames)
if not args.dest_dir:
print '--dest-dir is required for this command.'
sys.exit(1)
if args.list_outputs:
return ' '.join(
os.path.join(args.dest_dir, os.path.basename(filename))
for filename in args.filenames)
if not os.path.isdir(args.dest_dir):
os.makedirs(args.dest_dir)
for filename in args.filenames:
target_filename = os.path.join(args.dest_dir, os.path.basename(filename))
if os.path.isfile(target_filename):
os.unlink(target_filename)
try:
os.link(filename, target_filename)
except OSError as e:
# Fallbacks to copy if hardlinking fails.
shutil.copy(filename, target_filename)
if __name__ == '__main__':
results = DoMain(sys.argv[1:])
if results:
print results
|
py | 1a50757045c50eee81e5883e79c8c7e8308e09eb | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
'''
------------------------------------------------------------
Main entry for ImgReSizer.
.. module:: `Main`
:platform: Unix
:synopsis: Takes configuration json with :py:class: imgresizer.CommandLine and run image processing
.. moduleauthor:: Tumurtogtokh Davaakhuu <[email protected]>
------------------------------------------------------------
'''
# IMPORT STANDARD
import sys
import os
# IMPORT Local
from imgresizer import Img
from imgresizer import ImageSizerController
from imgresizer import CommandLine
# =============================================================================
# MAIN
def main():
cli = CommandLine()
config = cli.load_configuration()
if cli.exit:
sys.exit(0)
IMG_URLS = cli.process_img_url_file()
TARGET = config['targets']
MAX_THREADS = config['num_threads']
DATA = config['data']
INCOMING = config['input_dir']
OUTGOING = config['output_dir']
img_sizer = ImageSizerController(Img(DATA, INCOMING, OUTGOING, MAX_THREADS),
IMG_URLS, TARGET)
# img_sizer.perform_resizing()
img_sizer.make_imgs()
if __name__ == '__main__':
main()
|
py | 1a507776502a8b29a60bb368349e6138e7e92d85 | #!/usr/bin/env python
"""
ROS Code Coverage Class (Python).
This module demonstrates code coverage and code quality for ROS Python applications.
"""
class RosCCClass(object):
"""Example of a Python Class for code coverage purposes."""
def __init__(self, identifier, name='default'):
"""
ROS Code Coverage __init__ method.
Args:
identifier (string): identification parameter
name (string): name parameter
"""
self.identifier = identifier
self.name = name
def set_name(self, name):
"""
Method for changing the name parameter
Args:
name (string): name parameter
"""
self.name = name
def get_name(self):
"""
Method for getting the name parameter
Returns:
name (string): name parameter
"""
return self.name
|
py | 1a5078f614596e83a998507f278a0b9bd0a27b7f | import numpy as np
def projective(coords):
""" Convert 2D cartesian coordinates to homogeneus/projective. """
num = np.shape(coords)[0]
w = np.array([[1], ]*num)
return np.append(coords, w, axis=1)
def cartesian(coords):
""" Convert 2D homogeneus/projective coordinates to cartesian. """
return coords[:, :2]
def translate(x, y):
""" Return translation matrix. """
return np.array([
[1, 0, x],
[0, 1, y],
[0, 0, 1],
])
def rotate(a):
""" Return rotation matrix. """
return np.array([
[np.cos(a), -np.sin(a), 0],
[np.sin(a), np.cos(a), 0],
[0, 0, 1]
])
def transform_list(coords, matrix):
""" Apply transformation to a list of coordinates. """
return matrix.dot(coords.T).T
def transform_apply(coords, transforms):
""" Apply list of transformations to a list of coordinates. """
out = projective(coords)
for transform in transforms:
out = transform_list(out, transform)
return cartesian(out)
|
py | 1a507b207a9db803a1966627c3a632700a5910fe | from rest_framework.urlpatterns import format_suffix_patterns
from django.urls import re_path
from api.bookmarks import views as bookmark_views
from api.experiment_groups import views
from constants.urls import GROUP_ID_PATTERN, ID_PATTERN, NAME_PATTERN, USERNAME_PATTERN
groups_urlpatterns = [
re_path(r'^{}/{}/groups/{}/?$'.format(USERNAME_PATTERN, NAME_PATTERN, ID_PATTERN),
views.ExperimentGroupDetailView.as_view()),
re_path(r'^{}/{}/groups/{}/statuses/?$'.format(
USERNAME_PATTERN, NAME_PATTERN, GROUP_ID_PATTERN),
views.ExperimentGroupStatusListView.as_view()),
re_path(r'^{}/{}/groups/{}/stop/?$'.format(USERNAME_PATTERN, NAME_PATTERN, ID_PATTERN),
views.ExperimentGroupStopView.as_view()),
re_path(
r'^{}/{}/groups/{}/bookmark/?$'.format(USERNAME_PATTERN, NAME_PATTERN, ID_PATTERN),
bookmark_views.ExperimentGroupBookmarkCreateView.as_view()),
re_path(
r'^{}/{}/groups/{}/unbookmark/?$'.format(USERNAME_PATTERN, NAME_PATTERN, ID_PATTERN),
bookmark_views.ExperimentGroupBookmarkDeleteView.as_view()),
]
# Order is important, because the patterns could swallow other urls
urlpatterns = format_suffix_patterns(groups_urlpatterns)
|
py | 1a507b6e7bb23accbb6e5a653b600c7c3eacea3c | # coding:utf-8
import collections
import csv
import os
from util.log import logger
logger = logger()
class Template(object):
def __init__(self,
base_dic, cmp_dic,
base_cost, cmp_cost,
base_call_times, cmp_call_times,
base_method_thread, cmp_method_thread,
base_theads_pid, cmp_theads_pid):
self.base = base_dic # base_sorted_dic, cmp_sorted_dic, base_cost, cmp_cost,base_call_times,cmp_call_times
self.cmp = cmp_dic
self.base_cost = base_cost
self.cmp_cost = cmp_cost
self.base_call_times = base_call_times
self.cmp_call_times = cmp_call_times
self.order_base_dic = collections.OrderedDict()
self.order_cmp_dic = collections.OrderedDict()
self.order_base_keys, self.order_base_values = self.initObjDatas(self.base, self.order_base_dic)
self.order_cmp_keys, self.order_cmp_values = self.initObjDatas(self.cmp, self.order_cmp_dic)
self.base_method_thread = base_method_thread
self.cmp_method_thread = cmp_method_thread
self.base_theads_pid = base_theads_pid
self.cmp_theads_pid = cmp_theads_pid
def initObjDatas(self, obj, init_obj):
_keys = []
_values = []
for each in obj:
init_obj[each[0]] = each[1]
for _k, _v in init_obj.items():
_keys.append(_k)
_values.append(_v)
return _keys, _values
def generateTable(self, path, rows, data):
if os.path.isfile(path):
os.remove(path)
csvfile = file(path, "wb")
writer = csv.writer(csvfile)
# writer.writerow(rows)
writer.writerows(data)
csvfile.close()
def searchDictList(self, orderDict):
keys = []
values = []
for k, v in orderDict.items():
keys.append(k)
values.append(v)
return keys, values
def generateTableData(self, path, rows):
''' ['调用方法','隶属线程', '线程PID', '基准分支排名', '对比分支排名', '基准分支方法耗时', '对比分支方法耗时',
'耗时差(对比分支-基准分支)', '耗时上涨比例(%)', '基准分支方法调用次数','对比分支方法调用次数','方法耗时排名变化'] '''
logger.debug("self.cmp_cost:\n" + str(self.cmp_cost))
logger.debug("self.base_cost:\n" + str(self.base_cost))
if self.base_cost != 0:
ratio = format(float(self.cmp_cost - self.base_cost) / float(self.base_cost), '.2%')
else:
ratio = self.cmp_cost
data = []
add_rows = rows
add_rows[0] = add_rows[0] + "- 系数: " + str(ratio)
add_flag = 0
for cmp_obj in self.order_cmp_keys:
''' 当cmp_obj有新增方法时 '''
if cmp_obj not in self.order_base_keys:
add_flag = 1
method = cmp_obj
base_index = "-"
cmp_index = self.order_cmp_keys.index(cmp_obj)
base_time = 0
cmp_time = self.order_cmp_values[cmp_index]
cmp_call_times = self.cmp_call_times[cmp_obj] if self.cmp_call_times.has_key(cmp_obj) else "-"
if self.cmp_method_thread.has_key(cmp_obj):
cmp_thread = self.cmp_method_thread[cmp_obj]
self.cmp_method_thread.pop(cmp_obj)
else:
cmp_thread = "-"
base_call_times = 0
diff = cmp_time
rate = format(float(1), '.2%')
rank_change = cmp_index
content = (
method, str(cmp_thread), str(base_index), str(cmp_index), str(base_time), str(cmp_time), str(diff),
str(rate), str(base_call_times), str(cmp_call_times), str(rank_change))
data.append(content)
if add_flag == 1:
data.insert(0, add_rows)
rows[0] = rows[0] + "- 系数: " + str(ratio)
data.append(rows)
for base_obj in self.order_base_keys:
method = base_obj
base_index = self.order_base_keys.index(base_obj) # 获取base_key的排名
if base_obj in self.order_cmp_keys:
cmp_index = self.order_cmp_keys.index(base_obj) # 当base_obj方法还在cmp_obj方法中
base_call_times = self.base_call_times[base_obj] if self.base_call_times.has_key(base_obj) else "-"
cmp_call_times = self.cmp_call_times[base_obj] if self.cmp_call_times.has_key(base_obj) else "-"
else:
cmp_index = "-" # 当base_obj方法在cmp_obj已经删减
base_call_times = self.base_call_times[base_obj] if self.base_call_times.has_key(base_obj) else "-"
cmp_call_times = 0
if self.base_method_thread.has_key(base_obj):
base_thread = self.base_method_thread[base_obj]
self.base_method_thread.pop(base_obj)
else:
base_thread = "-"
base_time = self.order_base_values[base_index]
if cmp_index == "-":
cmp_time = 0
rank_change = base_index
else:
cmp_time = self.order_cmp_values[cmp_index]
rank_change = base_index - cmp_index
diff = cmp_time - base_time
try:
rate = format(float(diff) / float(base_time), '.2%') # -100%:代表base_obj方法在cmp_obj已经删减的比率
except Exception as e:
rate = "error"
content = (
method, str(base_thread), str(base_index), str(cmp_index), str(base_time), str(cmp_time), str(diff),
str(rate), str(base_call_times), str(cmp_call_times), str(rank_change))
data.append(content)
self.generateTable(path, rows, data)
logger.debug("self.base_cost-self.cmp_cost:\n" + str(self.base_cost - self.cmp_cost))
logger.debug("self.base_method_thread:\n" + str(self.base_method_thread))
logger.debug("self.cmp_method_thread:\n" + str(self.cmp_method_thread))
|
py | 1a507c189770c68c66f6bb4705998f4f2911ca3d | # -*- coding: utf-8 -*-
# Copyright (c) 2021, TeamPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class AMCSubscription(Document):
pass
|
py | 1a507c70bd3e12854b972c0c26d72f6b67d5a0f2 | import re
import os
import nltk
import zlib
import codecs
import shutil
import logging
from unidecode import unidecode
from indra.literature.pmc_client import extract_text
from indra.resources.greek_alphabet import greek_alphabet
logger = logging.getLogger(__name__)
class IsiPreprocessor(object):
"""Preprocess a set of documents, one by one, and add the preprocessed
text to a temporary directory in a format suitable for the ISI reader.
The ISI reader requires plain text with one sentence per line.
Attributes
----------
preprocessed_dir : str
The directory holding the literature text preprocessed and sentence
tokenized in a format suitable for the ISI reader
next_file_id : int
The next file with preprocessed text will be named next_file_id.txt
pmids : dict
A dictionary mapping file ids to the pmid of the text corresponding
to that file, can be None if unknown
extra_annotations : dict
A dictionary mapping file ids to a (possibly empty) dictionary with
additional annotations to include for statements extracted from this
document
"""
def __init__(self, preprocessed_dir):
preprocessed_dir = os.path.abspath(preprocessed_dir)
self.preprocessed_dir = preprocessed_dir
self.next_file_id = 1
self.pmids = {}
self.extra_annotations = {}
# This directory should be empty
contents = os.listdir(preprocessed_dir)
if len(contents) != 0:
logger.warning('IsiPreprocessor should get an empty directory in' +
' which to store preprocessed files.')
def register_preprocessed_file(self, infile, pmid, extra_annotations):
"""Set up already preprocessed text file for reading with ISI reader.
This is essentially a mock function to "register" already preprocessed
files and get an IsiPreprocessor object that can be passed to
the IsiProcessor.
Parameters
----------
infile : str
Path to an already preprocessed text file (i.e. one ready to
be sent for reading to ISI reader).
pmid : str
The PMID corresponding to the file
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
infile_base = os.path.basename(infile)
outfile = os.path.join(self.preprocessed_dir, infile_base)
shutil.copyfile(infile, outfile)
infile_key = os.path.splitext(infile_base)[0]
self.pmids[infile_key] = pmid
self.extra_annotations[infile_key] = extra_annotations
def preprocess_plain_text_string(self, text, pmid, extra_annotations):
"""Preprocess plain text string for use by ISI reader.
Preprocessing is done by tokenizing into sentences and writing
each sentence on its own line in a plain text file. All other
preprocessing functions ultimately call this one.
Parameters
----------
text : str
The plain text of the article of abstract
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
output_file = '%s.txt' % self.next_file_id
output_file = os.path.join(self.preprocessed_dir, output_file)
# Replace greek characters with corresponding strings
for greek_letter, spelled_letter in greek_alphabet.items():
text = text.replace(greek_letter, spelled_letter)
# Replace all other unicode characters with nearest ascii equivalents
text = unidecode(text)
# Tokenize sentence
sentences = nltk.sent_tokenize(text)
# Write sentences to text file
first_sentence = True
with codecs.open(output_file, 'w', encoding='utf-8') as f:
for sentence in sentences:
if not first_sentence:
f.write('\n')
f.write(sentence.rstrip())
first_sentence = False
# Store annotations
self.pmids[str(self.next_file_id)] = pmid
self.extra_annotations[str(self.next_file_id)] = extra_annotations
# Increment file id
self.next_file_id += 1
def preprocess_plain_text_file(self, filename, pmid, extra_annotations):
"""Preprocess a plain text file for use with ISI reder.
Preprocessing results in a new text file with one sentence
per line.
Parameters
----------
filename : str
The name of the plain text file
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with codecs.open(filename, 'r', encoding='utf-8') as f:
content = f.read()
self.preprocess_plain_text_string(content, pmid,
extra_annotations)
def preprocess_nxml_file(self, filename, pmid, extra_annotations):
"""Preprocess an NXML file for use with the ISI reader.
Preprocessing is done by extracting plain text from NXML and then
creating a text file with one sentence per line.
Parameters
----------
filename : str
Filename (more specifically the file path) of an nxml file to
process
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with open(filename, 'r') as fh:
txt_content = extract_text(fh.read())
# We need to remove some common LaTEX commands from the converted text
# or the reader will get confused
cmd1 = r'[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}'
cmd2 = r'[^ \{\}]+\{[^\{\}]+\}'
txt_content = re.sub(cmd1, '', txt_content)
txt_content = re.sub(cmd2, '', txt_content)
# Prepocess text extracted from nxml
self.preprocess_plain_text_string(txt_content, pmid, extra_annotations)
def preprocess_abstract_list(self, abstract_list):
"""Preprocess abstracts in database pickle dump format for ISI reader.
For each abstract, creates a plain text file with one sentence per
line, and stores metadata to be included with each statement from
that abstract.
Parameters
----------
abstract_list : list[dict]
Compressed abstracts with corresopnding metadata in INDRA database
pickle dump format.
"""
for abstract_struct in abstract_list:
abs_format = abstract_struct['format']
content_type = abstract_struct['text_type']
content_zipped = abstract_struct['content']
tcid = abstract_struct['tcid']
trid = abstract_struct['trid']
assert(abs_format == 'text')
assert(content_type == 'abstract')
pmid = None # Don't worry about pmid for now
extra_annotations = {'tcid': tcid, 'trid': trid}
# Uncompress content
content = zlib.decompress(content_zipped,
zlib.MAX_WBITS+16).decode('utf-8')
self.preprocess_plain_text_string(content, pmid, extra_annotations)
def iter_outputs(self, output_dir):
"""Iterate over the outputs in a given directory using stored metadata.
For each of the output JSONs, retrieve the extra annotations for that
file, and link the file with its corresponding PMID.
Parameters
----------
output_dir : str
The path to the directory where the JSON outputs were dumped.
"""
for basename, pmid in self.pmids.items():
fname = os.path.join(output_dir, '%s.json' % basename)
extra_annotations = self.extra_annotations.get(fname, {})
yield fname, pmid, extra_annotations
|
py | 1a507cb276f0f6520024a7c275f6935be0c40157 | import numpy as np
import math
from instrument.geometry.pml import weave
from instrument.geometry import shapes, operations
import os, sys
class Clampcell(object):
def __init__(self, total_height=False):
self.sample_height=28.57 #mm
if total_height is True:
self.sample_height=95.758
###### OUTER BODY #############
def outer_body(self):
Al_OutDiameter = 32.05 # mm
Al_OutRadius=Al_OutDiameter/2
Al_Height=self.sample_height #28.57 #mm (total height 95.758 mm)
Al_InSmallestCone_Dia= 14.59 #mm (inner boundary is tappered cylinder, bottom Diameter )
Al_InSmallestCone_Rad=Al_InSmallestCone_Dia/2
Al_InconeAngle= 2
Al_InHeight=Al_Height+10 #mm (tappered cylinder height) (this should be same as Al_Height, but in constructive geometry the inner height has to be larger for correct subtraction)
Al_InLargestCone_Dia= (2* np.tan(np.deg2rad(Al_InconeAngle/2))*Al_InHeight)+Al_InSmallestCone_Dia #( tappered cylinder top diameter)
Al_InLargestCone_Rad=Al_InLargestCone_Dia/2
Al_InSmallest_ConeHeight=Al_InSmallestCone_Dia/(2*np.tan(np.deg2rad(Al_InconeAngle/2)))
Al_InLargest_ConeHeight=Al_InSmallest_ConeHeight+Al_InHeight
Al_boxHeightToSubtract=Al_InSmallest_ConeHeight*2
Al_boxthisckness= Al_InSmallestCone_Dia+20
Al_HalfHeight=Al_InHeight/2
Al_moving_height=Al_InSmallest_ConeHeight+Al_HalfHeight
### CReate the string for OUTER BODY ######
Al_OutRadius_str=str(Al_OutRadius)+r'*mm'
Al_Height_str=str(Al_Height)+r'*mm'
Al_InLargestCone_Rad_str=str(Al_InLargestCone_Rad)+r'*mm'
Al_InLargest_ConeHeight_str=str(Al_InLargest_ConeHeight)+r'*mm'
Al_InSmallest_ConeHeight_str=str(Al_InSmallest_ConeHeight)+r'*mm'
Al_boxHeightToSubtract_str=str(Al_boxHeightToSubtract)+r'*mm'
Al_boxthisckness_str=str(Al_boxthisckness)+r'*mm'
Al_moving_height_str=str(-Al_moving_height)+r'*mm'
#create the inner Al largest cone
Al_largest_cone=shapes.cone(radius=Al_InLargestCone_Rad_str, height=Al_InLargest_ConeHeight_str) # upside down
#rotation to make top wider
Al_largest_cone_widertip=operations.rotate(Al_largest_cone, angle="180*deg",vertical="0",transversal="1",beam="0")
#make a tapered cylinder
Al_tapered_cylinder= operations.Difference(Al_largest_cone_widertip,
shapes.block(thickness=Al_boxthisckness_str,height=Al_boxHeightToSubtract_str,width=Al_boxthisckness_str) )
#moving the center of the cylinder to the center of the coordinate
Al_centered_taperedCylinder=operations.translate(Al_tapered_cylinder, vertical=Al_moving_height_str)
#Creating the outer Al body
outer_Al = operations.subtract(
shapes.cylinder(radius=Al_OutRadius_str, height=Al_Height_str),
Al_centered_taperedCylinder,
)
return(outer_Al)
######## INNER SLEEVE ##########
def inner_sleeve(self):
CuBe_InDiameter = 4.74 # mm
CuBe_InRadius=CuBe_InDiameter/2
CuBe_InHeight=self.sample_height+10 #mm (total height 95.758 mm)
CuBe_Height=self.sample_height
CuBe_OutSmallestCone_Dia=14.63 #(outer boundary is tappered cylinder, bottom diameter )
CuBe_OutSmallestCone_Rad=CuBe_OutSmallestCone_Dia/2
CuBe_OutconeAngle= 2 # the tappered angle
CuBe_OutLargestCone_Dia= (2* np.tan(np.deg2rad(CuBe_OutconeAngle/2))*CuBe_Height)+CuBe_OutSmallestCone_Dia #( tappered cylinder top diamter)
CuBe_OutLargestCone_Rad=CuBe_OutLargestCone_Dia/2
CuBe_OutSmallest_ConeHeight=CuBe_OutSmallestCone_Dia/(2*np.tan(np.deg2rad(CuBe_OutconeAngle/2)))
CuBe_OutLargest_ConeHeight=CuBe_OutSmallest_ConeHeight+CuBe_Height
CuBe_boxHeightToSubtract=CuBe_OutSmallest_ConeHeight*2
CuBe_boxthisckness= CuBe_OutSmallestCone_Dia+20
CuBe_HalfHeight=CuBe_Height/2
CuBe_moving_height=CuBe_OutSmallest_ConeHeight+CuBe_HalfHeight
### CReate the string for INNER SLEEVE ######
CuBe_InRadius_str=str(CuBe_InRadius)+r'*mm'
CuBe_InHeight_str=str(CuBe_InHeight)+r'*mm'
CuBe_Height_str=str(CuBe_Height)+r'*mm'
CuBe_OutLargestCone_Rad_str=str(CuBe_OutLargestCone_Rad)+r'*mm'
CuBe_OutLargest_ConeHeight_str=str(CuBe_OutLargest_ConeHeight)+r'*mm'
CuBe_boxHeightToSubtract_str=str(CuBe_boxHeightToSubtract)+r'*mm'
CuBe_boxthisckness_str=str(CuBe_boxthisckness)+r'*mm'
CuBe_moving_height_str=str(-CuBe_moving_height)+r'*mm'
#create the outer CuBe largest cone
CuBe_largest_cone=shapes.cone(radius=CuBe_OutLargestCone_Rad_str, height=CuBe_OutLargest_ConeHeight_str) # upside down
#rotation to make top wider
CuBe_largest_cone_widertip=operations.rotate(CuBe_largest_cone, angle="180*deg",vertical="0",transversal="1",beam="0")
#make a tapered cylinder
CuBe_tapered_cylinder= operations.Difference(CuBe_largest_cone_widertip,
shapes.block(thickness=CuBe_boxthisckness_str,height=CuBe_boxHeightToSubtract_str,width=CuBe_boxthisckness_str) )
#moving the center of the cylinder to the center of the coordinate
CuBe_centered_taperedCylinder=operations.translate(CuBe_tapered_cylinder, vertical=CuBe_moving_height_str)
#Creating the InnerSleeve
CuBe_innerSleeve = operations.subtract(
CuBe_centered_taperedCylinder,
shapes.cylinder(radius=CuBe_InRadius_str, height=CuBe_InHeight_str),
)
return(CuBe_innerSleeve)
####### SAMPLE ######### ( the sample is a cylinder)
def sample(self):
sample_Height=27.3 #mm
sample_Diameter=4.16 #mm
sample_Radius=sample_Diameter/2
##covert to string###
sample_Height_str=str(sample_Height)+r'*mm'
sample_Radius_str=str(sample_Radius)+r'*mm'
##cylindrical sample##
sample= shapes.cylinder(radius=sample_Radius_str, height=sample_Height_str)
return(sample)
|
py | 1a507ccb9116aba31011218629b52df56616e510 | # Generated by Django 3.0.2 on 2020-01-23 07:16
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [("auth", "0011_update_proxy_permissions")]
operations = [
migrations.CreateModel(
name="CustomUser",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
verbose_name="username",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=30, verbose_name="first name", null=True
),
),
(
"last_name",
models.CharField(
blank=True, max_length=150, verbose_name="last name", null=True
),
),
(
"email",
models.EmailField(
blank=True, max_length=254, verbose_name="email address"
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="date joined"
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={"unique_together": {("first_name", "last_name")}},
managers=[("objects", django.contrib.auth.models.UserManager())],
)
]
|
py | 1a507db96ef2e9c6d83ee0cc11d852aaa93bc3ee | # !/usr/bin/python
# -*- coding:utf-8 -*-
# @author: Shengjia Yan
# @date: 2017-11-17 Friday
# @email: [email protected]
import re
import json
import codecs
from collections import Counter
def words(text):
return re.findall(r'\w+', text.lower()) # \w+ matches one or more word characters (i.e., [a-zA-Z0-9_]).
WORDS = Counter(words(open('../data/big.txt').read()))
# probability of 'word'
def P(word, N=sum(WORDS.values())):
return WORDS[word] / float(N)
# most probable spelling correction for word
def correction(word):
return max(candidates(word), key=P)
# generate possible spelling corrections for word
def candidates(word):
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
# the subset of `words` that appear in the dictionary of WORDS
def known(words):
return set(w for w in words if w in WORDS)
# all edits that are one edit away from `word`
def edits1(word):
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
# all edits that are two edits away from `word`.
def edits2(word):
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
def saveDict(path):
with codecs.open(path, mode='w', encoding='UTF8') as dict_file:
# big_dict = sorted(WORDS.items(), key=lambda x: x[1], reverse=True)
big_dict = json.dumps(WORDS, ensure_ascii=False)
dict_file.write(big_dict)
def main():
print len(WORDS)
print sum(WORDS.values())
print WORDS.most_common(10)
print max(WORDS, key=P)
print P('the')
print P('outrivaled')
print P('unmentioned')
if __name__ == '__main__':
main()
|
py | 1a507e559349518aefc0875ed6c87aea2669522b | # Generated by Django 2.1.5 on 2019-06-21 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0115_auto_20190621_1556'),
]
operations = [
migrations.AlterField(
model_name='monetization',
name='bank',
field=models.CharField(choices=[('gt', 'GT Bank Plc'), ('zenith', 'Zenith Bank Plc'), ('first', 'First Bank Plc'), ('polaris', 'Polaris Bank Plc'), ('access', 'Access Bank Plc')], default='gt', max_length=20),
),
]
|
py | 1a507faefd6f842a15003eaa146dcb3a1389ad27 | import tensorflow as tf
import numpy as np
import os
import math
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
@tf.function
def one_hot(labels, class_size):
"""
Create one hot label matrix of size (N, C)
Inputs:
- labels: Labels Tensor of shape (N,) representing a ground-truth label
for each MNIST image
- class_size: Scalar representing of target classes our dataset
Returns:
- targets: One-hot label matrix of (N, C), where targets[i, j] = 1 when
the ground truth label for image i is j, and targets[i, :j] &
targets[i, j + 1:] are equal to 0
"""
return tf.one_hot(labels, class_size)
def save_model_weights(model, args):
"""
Save trained VAE model weights to model_ckpts/
Inputs:
- model: Trained VAE model.
- cfg: All arguments.
"""
model_flag = "cvae" if args.is_cvae else "vae"
output_dir = os.path.join("model_ckpts", model_flag)
output_path = os.path.join(output_dir, model_flag)
os.makedirs("model_ckpts", exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
model.save_weights(output_path)
def show_vae_images(model, latent_size):
"""
Call this only if the model is VAE!
Generate 10 images from random vectors.
Show the generated images from your trained VAE.
Image will be saved to outputs/show_vae_images.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
# Generated images from vectors of random values.
z = tf.random.normal(shape=[10, latent_size])
samples = model.decoder(z).numpy()
# Visualize
fig = plt.figure(figsize=(10, 1))
gspec = gridspec.GridSpec(1, 10)
gspec.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gspec[i])
plt.axis("off")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect("equal")
plt.imshow(sample.reshape(28, 28), cmap="Greys_r")
# Save the generated images
os.makedirs("outputs", exist_ok=True)
output_path = os.path.join("outputs", "show_vae_images.pdf")
plt.savefig(output_path, bbox_inches="tight")
plt.close(fig)
def show_vae_interpolation(model, latent_size):
"""
Call this only if the model is VAE!
Generate interpolation between two .
Show the generated images from your trained VAE.
Image will be saved to outputs/show_vae_interpolation.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
def show_interpolation(images):
"""
A helper to visualize the interpolation.
"""
images = tf.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(math.ceil(math.sqrt(images.shape[0])))
sqrtimg = int(math.ceil(math.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(tf.reshape(img, [sqrtimg, sqrtimg]))
# Save the generated images
os.makedirs("outputs", exist_ok=True)
output_path = os.path.join("outputs", "show_vae_interpolation.pdf")
plt.savefig(output_path, bbox_inches="tight")
plt.close(fig)
S = 12
z0 = tf.random.normal(shape=[S, latent_size], dtype=tf.dtypes.float32) # [S, latent_size]
z1 = tf.random.normal(shape=[S, latent_size], dtype=tf.dtypes.float32)
w = tf.linspace(0, 1, S)
w = tf.cast(tf.reshape(w, (S, 1, 1)), dtype=tf.float32) # [S, 1, 1]
z = tf.transpose(w * z0 + (1 - w) * z1, perm=[1, 0, 2])
z = tf.reshape(z, (S * S, latent_size)) # [S, S, latent_size]
x = model.decoder(z) # [S*S, 1, 28, 28]
show_interpolation(x)
def show_cvae_images(model, latent_size):
"""
Call this only if the model is CVAE!
Conditionally generate 10 images for each digit.
Show the generated images from your trained CVAE.
Image will be saved to outputs/show_cvae_images.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
# Conditionally generated images from vectors of random values.
num_generation = 100
num_classes = 10
num_per_class = num_generation // num_classes
c = tf.eye(num_classes) # [one hot labels for 0-9]
z = []
labels = []
for label in range(num_classes):
curr_c = c[label]
curr_c = tf.broadcast_to(curr_c, [num_per_class, len(curr_c)])
curr_z = tf.random.normal(shape=[num_per_class, latent_size])
curr_z = tf.concat([curr_z, curr_c], axis=-1)
z.append(curr_z)
labels.append([label] * num_per_class)
z = np.concatenate(z)
labels = np.concatenate(labels)
samples = model.decoder(z).numpy()
# Visualize
rows = num_classes
cols = num_generation // rows
fig = plt.figure(figsize=(cols, rows))
gspec = gridspec.GridSpec(rows, cols)
gspec.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gspec[i])
plt.axis("off")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect("equal")
plt.imshow(sample.reshape(28, 28), cmap="Greys_r")
# Save the generated images
os.makedirs("outputs", exist_ok=True)
output_path = os.path.join("outputs", "show_cvae_images.pdf")
plt.savefig(output_path, bbox_inches="tight")
plt.close(fig)
def load_weights(model, is_cvae):
"""
Load the trained model's weights.
Inputs:
- model: Your untrained model instance.
Returns:
- model: Trained model.
"""
num_classes = 10
inputs = tf.zeros([1, 1, 28, 28]) # Random data sample
labels = tf.constant([[0]])
if is_cvae:
weights_path = os.path.join("model_ckpts", "cvae", "cvae")
one_hot_vec = one_hot(labels, num_classes)
_ = model(inputs, one_hot_vec)
model.load_weights(weights_path)
else:
weights_path = os.path.join("model_ckpts", "vae", "vae")
_ = model(inputs)
model.load_weights(weights_path)
return model
|
py | 1a5080cfb18962865047b43a5b352ccb65758ac5 | import numpy as np
import torch
from gym import spaces
from torch import nn as nn
from torch.nn import functional as F
def loss_function_factory(loss_function):
if loss_function == "l2":
return F.mse_loss
elif loss_function == "l1":
return F.l1_loss
elif loss_function == "smooth_l1":
return F.smooth_l1_loss
elif loss_function == "bce":
return F.binary_cross_entropy
else:
raise ValueError("Unknown loss function : {}".format(loss_function))
def optimizer_factory(params, optimizer_type="ADAM", **kwargs):
if optimizer_type == "ADAM":
return torch.optim.Adam(params=params, **kwargs)
elif optimizer_type == "RMS_PROP":
return torch.optim.RMSprop(params=params, **kwargs)
else:
raise ValueError("Unknown optimizer type: {}".format(optimizer_type))
def model_factory(type="MultiLayerPerceptron", **kwargs) -> nn.Module:
from rlberry.agents.torch.utils.attention_models import EgoAttentionNetwork
from rlberry.agents.torch.utils.models import (
MultiLayerPerceptron,
DuelingNetwork,
ConvolutionalNetwork,
Table,
)
if type == "MultiLayerPerceptron":
return MultiLayerPerceptron(**kwargs)
elif type == "DuelingNetwork":
return DuelingNetwork(**kwargs)
elif type == "ConvolutionalNetwork":
return ConvolutionalNetwork(**kwargs)
elif type == "EgoAttentionNetwork":
return EgoAttentionNetwork(**kwargs)
elif type == "Table":
return Table(**kwargs)
else:
raise ValueError("Unknown model type")
def model_factory_from_env(env, **kwargs):
kwargs = size_model_config(env, **kwargs)
return model_factory(**kwargs)
def size_model_config(env, **model_config):
"""
Update the configuration of a model depending on the environment
observation/action spaces.
Typically, the input/output sizes.
Parameters
----------
env : gym.Env
An environment.
model_config : dict
A model configuration.
"""
if isinstance(env.observation_space, spaces.Box):
obs_shape = env.observation_space.shape
elif isinstance(env.observation_space, spaces.Tuple):
obs_shape = env.observation_space.spaces[0].shape
elif isinstance(env.observation_space, spaces.Discrete):
return model_config
# Assume CHW observation space
if model_config["type"] == "ConvolutionalNetwork":
model_config["in_channels"] = int(obs_shape[0])
model_config["in_height"] = int(obs_shape[1])
model_config["in_width"] = int(obs_shape[2])
else:
model_config["in_size"] = int(np.prod(obs_shape))
if isinstance(env.action_space, spaces.Discrete):
model_config["out_size"] = env.action_space.n
elif isinstance(env.action_space, spaces.Tuple):
model_config["out_size"] = env.action_space.spaces[0].n
return model_config
def activation_factory(activation_type):
if activation_type == "RELU":
return F.relu
elif activation_type == "TANH":
return torch.tanh
elif activation_type == "ELU":
return nn.ELU()
else:
raise ValueError("Unknown activation_type: {}".format(activation_type))
def trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
py | 1a5081279ba26079ac484e3a3617605dac12837d | # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Algorithm specs.
The "spec" of each algorithm is a static set of `(stage, loc, type)`-tuples.
- `stage`: One of either an `input`, `output` or `hint`
- `location`: Each datum is associated with either the `node`, `edge` or `graph`
- `type`: Either a `scalar`, `categorical`, `mask`, `mask_one` or `pointer`
The dataflow for an algorithm is represented by `(stage, loc, type, data)`
"probes" that are valid under that algorithm's spec. It contains a single
snapshot for each `input` and `output` and a time-series of intermediate
algorithmic states (`hint`).
At minimum, each node contains a `pos` probe that serves as a unique index e.g.
for representing sequential data where appropriate
"""
import types
from typing import Dict, Tuple
class Stage:
INPUT = 'input'
OUTPUT = 'output'
HINT = 'hint'
class Location:
NODE = 'node'
EDGE = 'edge'
GRAPH = 'graph'
class Type:
SCALAR = 'scalar'
CATEGORICAL = 'categorical'
MASK = 'mask'
MASK_ONE = 'mask_one'
POINTER = 'pointer'
class OutputClass:
POSITIVE = 1
NEGATIVE = 0
MASKED = -1
Spec = Dict[str, Tuple[str, str, str]]
CLRS_21_ALGS = [
'a_star',
'bellman_ford',
'bfs',
'binary_search',
'bubble_sort',
'dag_shortest_paths',
'dfs',
'dijkstra',
'find_maximum_subarray_kadane',
'floyd_warshall',
'heapsort',
'insertion_sort',
'kmp_matcher',
'matrix_chain_order',
'minimum',
'mst_prim',
'naive_string_matcher',
'optimal_bst',
'quickselect',
'quicksort',
'task_scheduling',
'topological_sort',
]
SPECS = types.MappingProxyType({
'insertion_sort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'pred': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'bubble_sort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'pred': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'heapsort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'pred': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'parent': (Stage.HINT, Location.NODE, Type.POINTER),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'largest': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'heap_size': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
},
'quicksort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'pred': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'p': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'r': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'quickselect': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'median': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'p': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'r': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i_rank': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'target': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'minimum': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'min': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'min_h': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'binary_search': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'target': (Stage.INPUT, Location.GRAPH, Type.SCALAR),
'return': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'mid': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'find_maximum_subarray': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'start': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'end': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'mid': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'left_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'left_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'left_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'right_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'right_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'right_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'cross_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'cross_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'cross_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'ret_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'ret_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'ret_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'left_x_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'right_x_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
},
'find_maximum_subarray_kadane': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'start': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'end': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'best_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'best_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'best_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'sum': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'matrix_chain_order': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'p': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.OUTPUT, Location.EDGE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'm': (Stage.HINT, Location.EDGE, Type.SCALAR),
's_h': (Stage.HINT, Location.EDGE, Type.POINTER),
'msk': (Stage.HINT, Location.EDGE, Type.MASK)
},
'lcs_length': {
'string': (Stage.INPUT, Location.NODE, Type.MASK),
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL),
'b': (Stage.OUTPUT, Location.EDGE, Type.CATEGORICAL),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'b_h': (Stage.HINT, Location.EDGE, Type.CATEGORICAL),
'c': (Stage.HINT, Location.EDGE, Type.SCALAR)
},
'optimal_bst': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'p': (Stage.INPUT, Location.NODE, Type.SCALAR),
'q': (Stage.INPUT, Location.NODE, Type.SCALAR),
'root': (Stage.OUTPUT, Location.EDGE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'root_h': (Stage.HINT, Location.EDGE, Type.POINTER),
'e': (Stage.HINT, Location.EDGE, Type.SCALAR),
'w': (Stage.HINT, Location.EDGE, Type.SCALAR),
'msk': (Stage.HINT, Location.EDGE, Type.MASK)
},
'activity_selector': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.SCALAR),
'f': (Stage.INPUT, Location.NODE, Type.SCALAR),
'selected': (Stage.OUTPUT, Location.NODE, Type.MASK),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'selected_h': (Stage.HINT, Location.NODE, Type.MASK),
'm': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'k': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'task_scheduling': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'd': (Stage.INPUT, Location.NODE, Type.SCALAR),
'w': (Stage.INPUT, Location.NODE, Type.SCALAR),
'selected': (Stage.OUTPUT, Location.NODE, Type.MASK),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'selected_h': (Stage.HINT, Location.NODE, Type.MASK),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
't': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'dfs': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'time': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'topological_sort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'topo': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'topo_head': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'topo_h': (Stage.HINT, Location.NODE, Type.POINTER),
'topo_head_h': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'strongly_connected_components': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'scc_id': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'scc_id_h': (Stage.HINT, Location.NODE, Type.POINTER),
'A_t': (Stage.HINT, Location.EDGE, Type.MASK),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'time': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'phase': (Stage.HINT, Location.GRAPH, Type.MASK)
},
'a_star': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
'mark': (Stage.HINT, Location.NODE, Type.MASK),
'in_queue': (Stage.HINT, Location.NODE, Type.MASK),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'articulation_points': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'is_cut': (Stage.OUTPUT, Location.NODE, Type.MASK),
'is_cut_h': (Stage.HINT, Location.NODE, Type.MASK),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
'low': (Stage.HINT, Location.NODE, Type.SCALAR),
'child_cnt': (Stage.HINT, Location.NODE, Type.SCALAR),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'time': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'bridges': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'is_bridge': (Stage.OUTPUT, Location.EDGE, Type.MASK),
'is_bridge_h': (Stage.HINT, Location.EDGE, Type.MASK),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
'low': (Stage.HINT, Location.NODE, Type.SCALAR),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'time': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'bfs': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'reach_h': (Stage.HINT, Location.NODE, Type.MASK),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER)
},
'mst_kruskal': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'in_mst': (Stage.OUTPUT, Location.EDGE, Type.MASK),
'in_mst_h': (Stage.HINT, Location.EDGE, Type.MASK),
'pi': (Stage.HINT, Location.NODE, Type.POINTER),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'root_u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'root_v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'mask_u': (Stage.HINT, Location.NODE, Type.MASK),
'mask_v': (Stage.HINT, Location.NODE, Type.MASK),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
},
'mst_prim': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'key': (Stage.HINT, Location.NODE, Type.SCALAR),
'mark': (Stage.HINT, Location.NODE, Type.MASK),
'in_queue': (Stage.HINT, Location.NODE, Type.MASK),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'bellman_ford': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'msk': (Stage.HINT, Location.NODE, Type.MASK)
},
'dag_shortest_paths': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'mark': (Stage.HINT, Location.NODE, Type.MASK),
'topo_h': (Stage.HINT, Location.NODE, Type.POINTER),
'topo_head_h': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.MASK)
},
'dijkstra': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'mark': (Stage.HINT, Location.NODE, Type.MASK),
'in_queue': (Stage.HINT, Location.NODE, Type.MASK),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'floyd_warshall': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'Pi': (Stage.OUTPUT, Location.EDGE, Type.POINTER),
'Pi_h': (Stage.HINT, Location.EDGE, Type.POINTER),
'D': (Stage.HINT, Location.EDGE, Type.SCALAR),
'msk': (Stage.HINT, Location.EDGE, Type.MASK),
'k': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'bipartite_matching': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
't': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'in_matching': (Stage.OUTPUT, Location.EDGE, Type.MASK),
'in_matching_h': (Stage.HINT, Location.EDGE, Type.MASK),
'A_h': (Stage.HINT, Location.EDGE, Type.SCALAR),
'adj_h': (Stage.HINT, Location.EDGE, Type.MASK),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'msk': (Stage.HINT, Location.NODE, Type.MASK),
'pi': (Stage.HINT, Location.NODE, Type.POINTER),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.MASK)
},
'naive_string_matcher': {
'string': (Stage.INPUT, Location.NODE, Type.MASK),
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL),
'match': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'kmp_matcher': {
'string': (Stage.INPUT, Location.NODE, Type.MASK),
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL),
'match': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'pi': (Stage.HINT, Location.NODE, Type.POINTER),
'k': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'q': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.MASK)
},
'segments_intersect': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'x': (Stage.INPUT, Location.NODE, Type.SCALAR),
'y': (Stage.INPUT, Location.NODE, Type.SCALAR),
'intersect': (Stage.OUTPUT, Location.GRAPH, Type.MASK),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'k': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'dir': (Stage.HINT, Location.NODE, Type.SCALAR),
'on_seg': (Stage.HINT, Location.NODE, Type.MASK)
},
'graham_scan': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'x': (Stage.INPUT, Location.NODE, Type.SCALAR),
'y': (Stage.INPUT, Location.NODE, Type.SCALAR),
'in_hull': (Stage.OUTPUT, Location.NODE, Type.MASK),
'best': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'atans': (Stage.HINT, Location.NODE, Type.SCALAR),
'in_hull_h': (Stage.HINT, Location.NODE, Type.MASK),
'stack_prev': (Stage.HINT, Location.NODE, Type.POINTER),
'last_stack': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
},
'jarvis_march': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'x': (Stage.INPUT, Location.NODE, Type.SCALAR),
'y': (Stage.INPUT, Location.NODE, Type.SCALAR),
'in_hull': (Stage.OUTPUT, Location.NODE, Type.MASK),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'in_hull_h': (Stage.HINT, Location.NODE, Type.MASK),
'best': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'last_point': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'endpoint': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
}
})
|
py | 1a508183996d75a1abc83c8ced1800c6ff90f9ca | # Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import re
import types
import copy
import inspect
import traceback
from os.path import expanduser
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import configparser
import ansible.module_utils.six.moves.urllib.parse as urlparse
try:
from ansible.release import __version__ as ANSIBLE_VERSION
except ImportError:
ANSIBLE_VERSION = 'unknown'
AZURE_COMMON_ARGS = dict(
auth_source=dict(
type='str',
choices=['auto', 'cli', 'env', 'credential_file']
),
profile=dict(type='str'),
subscription_id=dict(type='str', no_log=True),
client_id=dict(type='str', no_log=True),
secret=dict(type='str', no_log=True),
tenant=dict(type='str', no_log=True),
ad_user=dict(type='str', no_log=True),
password=dict(type='str', no_log=True),
cloud_environment=dict(type='str'),
cert_validation_mode=dict(type='str', choices=['validate', 'ignore']),
api_profile=dict(type='str', default='latest')
# debug=dict(type='bool', default=False),
)
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
cert_validation_mode='AZURE_CERT_VALIDATION_MODE',
)
# FUTURE: this should come from the SDK or an external location.
# For now, we have to copy from azure-cli
AZURE_API_PROFILES = {
'latest': {
'ContainerInstanceManagementClient': '2018-02-01-preview',
'ComputeManagementClient': dict(
default_api_version='2017-12-01',
resource_skus='2017-09-01',
disks='2017-03-30',
snapshots='2017-03-30',
virtual_machine_run_commands='2017-03-30'
),
'NetworkManagementClient': '2017-11-01',
'ResourceManagementClient': '2017-05-10',
'StorageManagementClient': '2017-10-01'
},
'2017-03-09-profile': {
'ComputeManagementClient': '2016-03-30',
'NetworkManagementClient': '2015-06-15',
'ResourceManagementClient': '2016-02-01',
'StorageManagementClient': '2016-01-01'
}
}
AZURE_TAG_ARGS = dict(
tags=dict(type='dict'),
append_tags=dict(type='bool', default=True),
)
AZURE_COMMON_REQUIRED_IF = [
('log_mode', 'file', ['log_path'])
]
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'
VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'
CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))")
AZURE_SUCCESS_STATE = "Succeeded"
AZURE_FAILED_STATE = "Failed"
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
HAS_MSRESTAZURE = True
HAS_MSRESTAZURE_EXC = None
try:
import importlib
except ImportError:
# This passes the sanity import test, but does not provide a user friendly error message.
# Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.
importlib = None
try:
from packaging.version import Version
HAS_PACKAGING_VERSION = True
HAS_PACKAGING_VERSION_EXC = None
except ImportError as exc:
Version = None
HAS_PACKAGING_VERSION = False
HAS_PACKAGING_VERSION_EXC = exc
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
try:
from msrest.serialization import Serializer
except ImportError as exc:
HAS_MSRESTAZURE_EXC = exc
HAS_MSRESTAZURE = False
try:
from enum import Enum
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, is_valid_resource_id
from msrestazure import azure_cloud
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.version import VERSION as network_client_version
from azure.mgmt.storage.version import VERSION as storage_client_version
from azure.mgmt.compute.version import VERSION as compute_client_version
from azure.mgmt.resource.version import VERSION as resource_client_version
from azure.mgmt.dns.version import VERSION as dns_client_version
from azure.mgmt.web.version import VERSION as web_client_version
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.dns import DnsManagementClient
from azure.mgmt.web import WebSiteManagementClient
from azure.mgmt.containerservice import ContainerServiceClient
from azure.storage.cloudstorageaccount import CloudStorageAccount
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
CLIError = Exception
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
def format_resource_id(val, subscription_id, namespace, types, resource_group):
return resource_id(name=val,
resource_group=resource_group,
namespace=namespace,
type=types,
subscription=subscription_id) if not is_valid_resource_id(val) else val
# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime)
# or generate the requirements files from this so we only have one source of truth to maintain...
AZURE_PKG_VERSIONS = {
'StorageManagementClient': {
'package_name': 'storage',
'expected_version': '1.5.0'
},
'ComputeManagementClient': {
'package_name': 'compute',
'expected_version': '2.0.0'
},
'ContainerInstanceManagementClient': {
'package_name': 'containerinstance',
'expected_version': '0.3.1'
},
'NetworkManagementClient': {
'package_name': 'network',
'expected_version': '1.3.0'
},
'ResourceManagementClient': {
'package_name': 'resource',
'expected_version': '1.1.0'
},
'DnsManagementClient': {
'package_name': 'dns',
'expected_version': '1.0.1'
},
'WebSiteManagementClient': {
'package_name': 'web',
'expected_version': '0.32.0'
},
} if HAS_AZURE else {}
AZURE_MIN_RELEASE = '2.0.0'
class AzureRMModuleBase(object):
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, supports_tags=True, facts_module=False, skip_exec=False):
merged_arg_spec = dict()
merged_arg_spec.update(AZURE_COMMON_ARGS)
if supports_tags:
merged_arg_spec.update(AZURE_TAG_ARGS)
if derived_arg_spec:
merged_arg_spec.update(derived_arg_spec)
merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
if required_if:
merged_required_if += required_if
self.module = AnsibleModule(argument_spec=merged_arg_spec,
bypass_checks=bypass_checks,
no_log=no_log,
check_invalid_arguments=check_invalid_arguments,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
add_file_common_args=add_file_common_args,
supports_check_mode=supports_check_mode,
required_if=merged_required_if)
if not HAS_PACKAGING_VERSION:
self.fail("Do you have packaging installed? Try `pip install packaging`"
"- {0}".format(HAS_PACKAGING_VERSION_EXC))
if not HAS_MSRESTAZURE:
self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
"- {0}".format(HAS_MSRESTAZURE_EXC))
if not HAS_AZURE:
self.fail("Do you have azure>={1} installed? Try `pip install ansible[azure]`"
"- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))
self._cloud_environment = None
self._network_client = None
self._storage_client = None
self._resource_client = None
self._compute_client = None
self._dns_client = None
self._web_client = None
self._containerservice_client = None
self.check_mode = self.module.check_mode
self.api_profile = self.module.params.get('api_profile')
self.facts_module = facts_module
# self.debug = self.module.params.get('debug')
# authenticate
self.credentials = self._get_credentials(self.module.params)
if not self.credentials:
if HAS_AZURE_CLI_CORE:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).")
else:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).")
# cert validation mode precedence: module-arg, credential profile, env, "validate"
self._cert_validation_mode = self.module.params['cert_validation_mode'] or self.credentials.get('cert_validation_mode') or \
os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'
if self._cert_validation_mode not in ['validate', 'ignore']:
self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))
# if cloud_environment specified, look up/build Cloud object
raw_cloud_env = self.credentials.get('cloud_environment')
if self.credentials.get('credentials') is not None and raw_cloud_env is not None:
self._cloud_environment = raw_cloud_env
elif not raw_cloud_env:
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
else:
# try to look up "well-known" values via the name attribute on azure_cloud members
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
if len(matched_clouds) == 1:
self._cloud_environment = matched_clouds[0]
elif len(matched_clouds) > 1:
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
else:
if not urlparse.urlparse(raw_cloud_env).scheme:
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
try:
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
except Exception as e:
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc(e))
if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('credentials') is not None:
# AzureCLI credentials
self.azure_credentials = self.credentials['credentials']
elif self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment,
verify=self._cert_validation_mode == 'validate')
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
tenant = 'common' # SDK default
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment,
verify=self._cert_validation_mode == 'validate')
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password or "
"be logged using AzureCLI.")
# common parameter validation
if self.module.params.get('tags'):
self.validate_tags(self.module.params['tags'])
if not skip_exec:
res = self.exec_module(**self.module.params)
self.module.exit_json(**res)
def check_client_version(self, client_type):
# Ensure Azure modules are at least 2.0.0rc5.
package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)
if package_version is not None:
client_name = package_version.get('package_name')
try:
client_module = importlib.import_module(client_type.__module__)
client_version = client_module.VERSION
except RuntimeError:
# can't get at the module version for some reason, just fail silently...
return
expected_version = package_version.get('expected_version')
if Version(client_version) < Version(expected_version):
self.fail("Installed azure-mgmt-{0} client version is {1}. The supported version is {2}. Try "
"`pip install ansible[azure]`".format(client_name, client_version, expected_version))
def exec_module(self, **kwargs):
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
def fail(self, msg, **kwargs):
'''
Shortcut for calling module.fail()
:param msg: Error message text.
:param kwargs: Any key=value pairs
:return: None
'''
self.module.fail_json(msg=msg, **kwargs)
def deprecate(self, msg, version=None):
self.module.deprecate(msg, version)
def log(self, msg, pretty_print=False):
pass
# Use only during module development
# if self.debug:
# log_file = open('azure_rm.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
# else:
# log_file.write(msg + u'\n')
def validate_tags(self, tags):
'''
Check if tags dictionary contains string:string pairs.
:param tags: dictionary of string:string pairs
:return: None
'''
if not self.facts_module:
if not isinstance(tags, dict):
self.fail("Tags must be a dictionary of string:string values.")
for key, value in tags.items():
if not isinstance(value, str):
self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
def update_tags(self, tags):
'''
Call from the module to update metadata tags. Returns tuple
with bool indicating if there was a change and dict of new
tags to assign to the object.
:param tags: metadata tags from the object
:return: bool, dict
'''
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
changed = False
if isinstance(self.module.params.get('tags'), dict):
for key, value in self.module.params['tags'].items():
if not new_tags.get(key) or new_tags[key] != value:
changed = True
new_tags[key] = value
if isinstance(tags, dict):
for key, value in tags.items():
if not self.module.params['tags'].get(key):
new_tags.pop(key)
changed = True
return changed, new_tags
def has_tags(self, obj_tags, tag_list):
'''
Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
exists in object tags.
:param obj_tags: dictionary of tags from an Azure object.
:param tag_list: list of tag keys or tag key:value pairs
:return: bool
'''
if not obj_tags and tag_list:
return False
if not tag_list:
return True
matches = 0
result = False
for tag in tag_list:
tag_key = tag
tag_value = None
if ':' in tag:
tag_key, tag_value = tag.split(':')
if tag_value and obj_tags.get(tag_key) == tag_value:
matches += 1
elif not tag_value and obj_tags.get(tag_key):
matches += 1
if matches == len(tag_list):
result = True
return result
def get_resource_group(self, resource_group):
'''
Fetch a resource group.
:param resource_group: name of a resource group
:return: resource group object
'''
try:
return self.rm_client.resource_groups.get(resource_group)
except CloudError as cloud_error:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message))
except Exception as exc:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
def _get_profile(self, profile="default"):
path = expanduser("~/.azure/credentials")
try:
config = configparser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('subscription_id'):
return credentials
return None
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile']:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials.get('subscription_id') is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = params.get(attribute, None)
auth_source = params.get('auth_source', None)
if not auth_source:
auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')
if auth_source == 'cli':
if not HAS_AZURE_CLI_CORE:
self.fail("Azure auth_source is `cli`, but azure-cli package is not available. Try `pip install azure-cli --upgrade`")
try:
self.log('Retrieving credentials from Azure CLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as err:
self.fail("Azure CLI profile cannot be loaded - {0}".format(err))
if auth_source == 'env':
self.log('Retrieving credentials from environment')
env_credentials = self._get_env_credentials()
return env_credentials
if auth_source == 'credential_file':
self.log("Retrieving credentials from credential file")
profile = params.get('profile', 'default')
default_credentials = self._get_profile(profile)
return default_credentials
# auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def serialize_obj(self, obj, class_name, enum_modules=None):
'''
Return a JSON representation of an Azure object.
:param obj: Azure object
:param class_name: Name of the object's class
:param enum_modules: List of module names to build enum dependencies from.
:return: serialized result
'''
enum_modules = [] if enum_modules is None else enum_modules
dependencies = dict()
if enum_modules:
for module_name in enum_modules:
mod = importlib.import_module(module_name)
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
dependencies[mod_class_name] = mod_class_obj
self.log("dependencies: ")
self.log(str(dependencies))
serializer = Serializer(classes=dependencies)
return serializer.body(obj, class_name, keep_readonly=True)
def get_poller_result(self, poller, wait=5):
'''
Consistent method of waiting on and retrieving results from Azure's long poller
:param poller Azure poller object
:return object resulting from the original request
'''
try:
delay = wait
while not poller.done():
self.log("Waiting for {0} sec".format(delay))
poller.wait(timeout=delay)
return poller.result()
except Exception as exc:
self.log(str(exc))
raise
def check_provisioning_state(self, azure_object, requested_state='present'):
'''
Check an Azure object's provisioning state. If something did not complete the provisioning
process, then we cannot operate on it.
:param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
and name attributes.
:return None
'''
if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
hasattr(azure_object, 'name'):
# resource group object fits this model
if isinstance(azure_object.properties.provisioning_state, Enum):
if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
if isinstance(azure_object.provisioning_state, Enum):
if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):
keys = dict()
try:
# Get keys from the storage account
self.log('Getting keys')
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
except Exception as exc:
self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
try:
self.log('Create blob service')
if storage_blob_type == 'page':
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_page_blob_service()
elif storage_blob_type == 'block':
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_block_blob_service()
else:
raise Exception("Invalid storage blob type defined.")
except Exception as exc:
self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
str(exc)))
def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic'):
'''
Create a default public IP address <public_ip_name> to associate with a network interface.
If a PIP address matching <public_ip_name> exists, return it. Otherwise, create one.
:param resource_group: name of an existing resource group
:param location: a valid azure location
:param public_ip_name: base name to assign the public IP address
:param allocation_method: one of 'Static' or 'Dynamic'
:return: PIP object
'''
pip = None
self.log("Starting create_default_pip {0}".format(public_ip_name))
self.log("Check to see if public IP {0} exists".format(public_ip_name))
try:
pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
except CloudError:
pass
if pip:
self.log("Public ip {0} found.".format(public_ip_name))
self.check_provisioning_state(pip)
return pip
params = self.network_models.PublicIPAddress(
location=location,
public_ip_allocation_method=allocation_method,
)
self.log('Creating default public IP {0}'.format(public_ip_name))
try:
poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
except Exception as exc:
self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
return self.get_poller_result(poller)
def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):
'''
Create a default security group <security_group_name> to associate with a network interface. If a security group matching
<security_group_name> exists, return it. Otherwise, create one.
:param resource_group: Resource group name
:param location: azure location name
:param security_group_name: base name to use for the security group
:param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
:param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
:param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
:return: security_group object
'''
group = None
self.log("Create security group {0}".format(security_group_name))
self.log("Check to see if security group {0} exists".format(security_group_name))
try:
group = self.network_client.network_security_groups.get(resource_group, security_group_name)
except CloudError:
pass
if group:
self.log("Security group {0} found.".format(security_group_name))
self.check_provisioning_state(group)
return group
parameters = self.network_models.NetworkSecurityGroup()
parameters.location = location
if not open_ports:
# Open default ports based on OS type
if os_type == 'Linux':
# add an inbound SSH rule
parameters.security_rules = [
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow SSH Access',
source_port_range='*', destination_port_range='22', priority=100, name='SSH')
]
parameters.location = location
else:
# for windows add inbound RDP and WinRM rules
parameters.security_rules = [
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow RDP port 3389',
source_port_range='*', destination_port_range='3389', priority=100, name='RDP01'),
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow WinRM HTTPS port 5986',
source_port_range='*', destination_port_range='5986', priority=101, name='WinRM01'),
]
else:
# Open custom ports
parameters.security_rules = []
priority = 100
for port in open_ports:
priority += 1
rule_name = "Rule_{0}".format(priority)
parameters.security_rules.append(
self.network_models.SecurityRule(protocol='Tcp',
source_address_prefix='*',
destination_address_prefix='*',
access='Allow',
direction='Inbound',
source_port_range='*',
destination_port_range=str(port),
priority=priority,
name=rule_name)
)
self.log('Creating default security group {0}'.format(security_group_name))
try:
poller = self.network_client.network_security_groups.create_or_update(resource_group,
security_group_name,
parameters)
except Exception as exc:
self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
return self.get_poller_result(poller)
@staticmethod
def _validation_ignore_callback(session, global_config, local_config, **kwargs):
session.verify = False
def get_api_profile(self, client_type_name, api_profile_name):
profile_all_clients = AZURE_API_PROFILES.get(api_profile_name)
if not profile_all_clients:
raise KeyError("unknown Azure API profile: {0}".format(api_profile_name))
profile_raw = profile_all_clients.get(client_type_name, None)
if not profile_raw:
self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name))
if isinstance(profile_raw, dict):
if not profile_raw.get('default_api_version'):
raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name))
return profile_raw
# wrap basic strings in a dict that just defines the default
return dict(default_api_version=profile_raw)
def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):
self.log('Getting management service client {0}'.format(client_type.__name__))
self.check_client_version(client_type)
client_argspec = inspect.getargspec(client_type.__init__)
client_kwargs = dict(credentials=self.azure_credentials, subscription_id=self.subscription_id, base_url=base_url)
api_profile_dict = {}
if self.api_profile:
api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile)
if not base_url:
# most things are resource_manager, don't make everyone specify
base_url = self._cloud_environment.endpoints.resource_manager
# unversioned clients won't accept profile; only send it if necessary
# clients without a version specified in the profile will use the default
if api_profile_dict and 'profile' in client_argspec.args:
client_kwargs['profile'] = api_profile_dict
# If the client doesn't accept api_version, it's unversioned.
# If it does, favor explicitly-specified api_version, fall back to api_profile
if 'api_version' in client_argspec.args:
profile_default_version = api_profile_dict.get('default_api_version', None)
if api_version or profile_default_version:
client_kwargs['api_version'] = api_version or profile_default_version
client = client_type(**client_kwargs)
# FUTURE: remove this once everything exposes models directly (eg, containerinstance)
try:
getattr(client, "models")
except AttributeError:
def _ansible_get_models(self, *arg, **kwarg):
return self._ansible_models
setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
client.models = types.MethodType(_ansible_get_models, client)
# Add user agent for Ansible
client.config.add_user_agent(ANSIBLE_USER_AGENT)
# Add user agent when running from Cloud Shell
if CLOUDSHELL_USER_AGENT_KEY in os.environ:
client.config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])
# Add user agent when running from VSCode extension
if VSCODEEXT_USER_AGENT_KEY in os.environ:
client.config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])
if self._cert_validation_mode == 'ignore':
client.config.session_configuration_callback = self._validation_ignore_callback
return client
@property
def storage_client(self):
self.log('Getting storage client...')
if not self._storage_client:
self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-10-01')
return self._storage_client
@property
def storage_models(self):
self.log('Getting storage models...')
return StorageManagementClient.models("2017-10-01")
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-06-01')
return self._network_client
@property
def network_models(self):
self.log("Getting network models...")
return NetworkManagementClient.models("2017-06-01")
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-05-10')
return self._resource_client
@property
def rm_models(self):
self.log("Getting resource manager models")
return ResourceManagementClient.models("2017-05-10")
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-03-30')
return self._compute_client
@property
def compute_models(self):
self.log("Getting compute models")
return ComputeManagementClient.models("2017-03-30")
@property
def dns_client(self):
self.log('Getting dns client')
if not self._dns_client:
self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._dns_client
@property
def web_client(self):
self.log('Getting web client')
if not self._web_client:
self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._web_client
@property
def containerservice_client(self):
self.log('Getting container service client')
if not self._containerservice_client:
self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._containerservice_client
|
py | 1a5081c9201ab0d4435a01791d52d4d818d5c93c | import django
from django.conf import settings
from django.core.management import call_command
settings.configure(
DEBUG=True,
INSTALLED_APPS=(
'django.contrib.contenttypes',
'msg',
),
MSG_SETTINGS={
'handlers': []
}
)
django.setup()
call_command('makemigrations', 'msg')
|
py | 1a508273d369a63133b1fb7039edc365ebe16b40 | from .base import BaseSerializer
from architecture import models
class WtvSerializer(BaseSerializer):
class Meta:
model = models.Wtv
fields = '__all__'
class BImsBootSerializer(BaseSerializer):
class Meta:
model = models.BImsBoot
fields = '__all__'
class BImsPanelSerializer(BaseSerializer):
class Meta:
model = models.BImsPanel
fields = '__all__'
class TmsSerializer(BaseSerializer):
class Meta:
model = models.Tms
fields = '__all__'
class EpgSerializer(BaseSerializer):
class Meta:
model = models.Epg
fields = '__all__'
class SearchSerializer(BaseSerializer):
class Meta:
model = models.Search
fields = '__all__'
class PicSerializer(BaseSerializer):
class Meta:
model = models.Pic
fields = '__all__'
class PplSerializer(BaseSerializer):
class Meta:
model = models.Ppl
fields = '__all__'
class CosEpgSerializer(BaseSerializer):
class Meta:
model = models.CosEpg
fields = '__all__'
class UicSerializer(BaseSerializer):
class Meta:
model = models.Uic
fields = '__all__'
class MScreenSerializer(BaseSerializer):
class Meta:
model = models.MScreen
fields = '__all__'
class DMS2Serializer(BaseSerializer):
class Meta:
model = models.DMS2
fields = '__all__'
class XMppSerializer(BaseSerializer):
class Meta:
model = models.XMpp
fields = '__all__'
class NDmsSerializer(BaseSerializer):
class Meta:
model = models.NDms
fields = '__all__'
class TOSSerializer(BaseSerializer):
class Meta:
model = models.TOS
fields = '__all__'
class UCSSerializer(BaseSerializer):
class Meta:
model = models.UCS
fields = '__all__'
class MGSSerializer(BaseSerializer):
class Meta:
model = models.MGS
fields = '__all__'
class NMCSerializer(BaseSerializer):
class Meta:
model = models.NMC
fields = '__all__'
class UBSSerializer(BaseSerializer):
class Meta:
model = models.UBS
fields = '__all__'
class VASSerializer(BaseSerializer):
class Meta:
model = models.VAS
fields = '__all__'
|
py | 1a5082cc00b544c3d5c5798e2d6c024736d4710d | #!/usr/bin/env python
from functools import partial
import sys
import os
import numpy as np
from vmaf.config import VmafConfig
from vmaf.core.asset import Asset
from vmaf.core.quality_runner import PsnrQualityRunner
from vmaf.tools.misc import get_cmd_option
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
FMTS = ['yuv420p', 'yuv422p', 'yuv444p', 'yuv420p10le', 'yuv422p10le', 'yuv444p10le']
OUT_FMTS = ['text (default)', 'xml', 'json']
POOL_METHODS = ['mean', 'harmonic_mean', 'min', 'median', 'perc5', 'perc10', 'perc20']
def print_usage():
print "usage: " + os.path.basename(sys.argv[0]) \
+ " fmt width height ref_path dis_path [--out-fmt out_fmt]\n"
print "fmt:\n\t" + "\n\t".join(FMTS) + "\n"
print "out_fmt:\n\t" + "\n\t".join(OUT_FMTS) + "\n"
def main():
if len(sys.argv) < 6:
print_usage()
return 2
try:
fmt = sys.argv[1]
width = int(sys.argv[2])
height = int(sys.argv[3])
ref_path = sys.argv[4]
dis_path = sys.argv[5]
except ValueError:
print_usage()
return 2
if width < 0 or height < 0:
print "width and height must be non-negative, but are {w} and {h}".format(w=width, h=height)
print_usage()
return 2
if fmt not in FMTS:
print_usage()
return 2
out_fmt = get_cmd_option(sys.argv, 6, len(sys.argv), '--out-fmt')
if not (out_fmt is None
or out_fmt == 'xml'
or out_fmt == 'json'
or out_fmt == 'text'):
print_usage()
return 2
pool_method = get_cmd_option(sys.argv, 6, len(sys.argv), '--pool')
if not (pool_method is None
or pool_method in POOL_METHODS):
print '--pool can only have option among {}'.format(', '.join(POOL_METHODS))
return 2
asset = Asset(dataset="cmd", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':width, 'height':height, 'yuv_type':fmt}
)
assets = [asset]
runner_class = PsnrQualityRunner
runner = runner_class(
assets, None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict=None,
optional_dict2=None,
)
# run
runner.run()
result = runner.results[0]
# pooling
if pool_method == 'harmonic_mean':
result.set_score_aggregate_method(ListStats.harmonic_mean)
elif pool_method == 'min':
result.set_score_aggregate_method(np.min)
elif pool_method == 'median':
result.set_score_aggregate_method(np.median)
elif pool_method == 'perc5':
result.set_score_aggregate_method(ListStats.perc5)
elif pool_method == 'perc10':
result.set_score_aggregate_method(ListStats.perc10)
elif pool_method == 'perc20':
result.set_score_aggregate_method(ListStats.perc20)
else: # None or 'mean'
pass
# output
if out_fmt == 'xml':
print result.to_xml()
elif out_fmt == 'json':
print result.to_json()
else: # None or 'text'
print str(result)
return 0
if __name__ == "__main__":
ret = main()
exit(ret)
|
py | 1a508320b82773c126e2598fc03b21c04c4fc9ce | """"Example usage of BayesianDense layer on MNIST dataset (~1.5% test error). """
import os
import logging
import logging.config
from sklearn.utils import shuffle
from keras.layers import Dense, Input
from keras.models import Model
from keras.datasets import mnist
from keras.optimizers import Adam
import numpy as np
import pickle
import keras.backend as K
from tqdm import tqdm
from bayesian_dense.bayesian_dense import BayesianDense, VariationalRegularizer
from keras.regularizers import WeightRegularizer
def accuracy(model, x, label_true, batch_size):
"""Calculate accuracy of a model"""
y_pred = model.predict(x, batch_size=batch_size)
label_pred = np.argmax(y_pred,axis=1)
correct = np.count_nonzero(label_true == label_pred)
return 1.0-(float(correct)/float(x.shape[0]))
def one_hot(labels, m):
"""Convert labels to one-hot representations"""
n = labels.shape[0]
y = np.zeros((n,m))
y[np.arange(n),labels.ravel()]=1
return y
def model(hidden_dim=512, input_dim=28*28, sigma_regularization=1e-3, mu_regularization=1e-5, k=10,
activation = lambda x: K.relu(x, 1.0 / 5.5)):
"""Create two layer MLP with softmax output"""
_x = Input(shape=(input_dim,))
layer = lambda output_dim, activation: BayesianDense(output_dim,
activation=activation,
W_sigma_regularizer=VariationalRegularizer(weight=sigma_regularization),
b_sigma_regularizer=VariationalRegularizer(weight=sigma_regularization),
W_regularizer=WeightRegularizer(l1=mu_regularization))
h1 = layer(hidden_dim, activation)
h2 = layer(hidden_dim, activation)
y = layer(k, 'softmax')
_y = y(h2(h1(_x)))
m = Model(_x, _y)
m.compile(Adam(1e-3),loss='categorical_crossentropy')
return m
def mnist_data():
"""Rescale and reshape MNIST data"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype(np.float32) / 255.
x_test = x_test.astype(np.float32) / 255.
x_train = x_train.reshape((x_train.shape[0], -1))
x_test = x_test.reshape((x_test.shape[0], -1))
return (x_train, y_train, x_test, y_test)
if __name__ == "__main__":
logging.config.fileConfig('logging.conf')
path = "output/bayesian_dense/test"
if not os.path.exists(path):
os.makedirs(path)
x_train, y_train, x_test, y_test = mnist_data()
nb_epoch = 100
batch_size = 128
k = 10
decay = 0.96
lr = 1e-3
m=model()
m.summary()
log = []
for epoch in tqdm(range(nb_epoch)):
acc_train = accuracy(m, x_train, y_train, batch_size=batch_size)
acc_test = accuracy(m, x_test, y_test, batch_size=batch_size)
log.append([acc_train, acc_test])
m.optimizer.lr.set_value(np.float32(lr))
logging.info("Epoch: %i/%i, Train: %f, Test: %f, LR: %f"%(epoch, nb_epoch, acc_train, acc_test, lr))
x_train, y_train = shuffle(x_train, y_train)
m.fit(x_train, one_hot(y_train,k), nb_epoch=1, batch_size=batch_size, shuffle=True,
validation_data=(x_test, one_hot(y_test,k)))
lr *= decay
if epoch%10 == 0:
m.save_weights("%s/checkpoint-%03i.hd5"%(path,epoch))
m.save_weights('%s/model.hd5'%path)
with open("%s/log.pkl"%path, "w") as f:
pickle.dump(log, f)
|
py | 1a50835b8dcb63b23e0f5b2b7a331401ec3c56e8 | __author__ = 'yuxiang'
import datasets
import datasets.kitti_tracking
import os
import PIL
import datasets.imdb
import numpy as np
import scipy.sparse
from utils.cython_bbox import bbox_overlaps
from utils.boxes_grid import get_boxes_grid
import subprocess
import pickle as cPickle
from fast_rcnn.config import cfg
import math
from rpn_msr.generate_anchors import generate_anchors
class kitti_tracking(datasets.imdb):
def __init__(self, image_set, seq_name, kitti_tracking_path=None):
datasets.imdb.__init__(self, 'kitti_tracking_' + image_set + '_' + seq_name)
self._image_set = image_set
self._seq_name = seq_name
self._kitti_tracking_path = self._get_default_path() if kitti_tracking_path is None \
else kitti_tracking_path
self._data_path = os.path.join(self._kitti_tracking_path, image_set, 'image_02')
self._classes = ('__background__', 'Car', 'Pedestrian', 'Cyclist')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
# Default to roidb handler
if cfg.IS_RPN:
self._roidb_handler = self.gt_roidb
else:
self._roidb_handler = self.region_proposal_roidb
# num of subclasses
if image_set == 'training' and seq_name != 'trainval':
self._num_subclasses = 220 + 1
else:
self._num_subclasses = 472 + 1
# load the mapping for subcalss to class
if image_set == 'training' and seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.int)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = self._class_to_ind[words[1]]
self._subclass_mapping = mapping
self.config = {'top_k': 100000}
# statistics for computing recall
self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_proposal = 0
assert os.path.exists(self._kitti_tracking_path), \
'kitti_tracking path does not exist: {}'.format(self._kitti_tracking_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
kitti_train_nums = [154, 447, 233, 144, 314, 297, 270, 800, 390, 803, 294, \
373, 78, 340, 106, 376, 209, 145, 339, 1059, 837]
kitti_test_nums = [465, 147, 243, 257, 421, 809, 114, 215, 165, 349, 1176, \
774, 694, 152, 850, 701, 510, 305, 180, 404, 173, 203, \
436, 430, 316, 176, 170, 85, 175]
if self._seq_name == 'train' or self._seq_name == 'trainval':
assert self._image_set == 'training', 'Use train set or trainval set in testing'
if self._seq_name == 'train':
seq_index = [0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16]
else:
seq_index = range(0, 21)
# for each sequence
image_index = []
for i in xrange(len(seq_index)):
seq_idx = seq_index[i]
num = kitti_train_nums[seq_idx]
for j in xrange(num):
image_index.append('{:04d}/{:06d}'.format(seq_idx, j))
else:
# a single sequence
seq_num = int(self._seq_name)
if self._image_set == 'training':
num = kitti_train_nums[seq_num]
else:
num = kitti_test_nums[seq_num]
image_index = []
for i in xrange(num):
image_index.append('{:04d}/{:06d}'.format(seq_num, i))
return image_index
def _get_default_path(self):
"""
Return the default path where kitti_tracking is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'KITTI_Tracking')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.SUBCLS_NAME + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_kitti_voxel_exemplar_annotation(index)
for index in self.image_index]
if cfg.IS_RPN:
# print out recall
for i in xrange(1, self.num_classes):
print('{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]))
print('{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]))
print('{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i])))
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_kitti_voxel_exemplar_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the KITTI voxel exemplar format.
"""
if self._image_set == 'training' and self._seq_name != 'trainval':
prefix = 'train'
elif self._image_set == 'training':
prefix = 'trainval'
else:
prefix = ''
if prefix == '':
lines = []
lines_flipped = []
else:
filename = os.path.join(self._kitti_tracking_path, cfg.SUBCLS_NAME, prefix, index + '.txt')
if os.path.exists(filename):
print(filename)
# the annotation file contains flipped objects
lines = []
lines_flipped = []
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[1])
is_flip = int(words[2])
if subcls != -1:
if is_flip == 0:
lines.append(line)
else:
lines_flipped.append(line)
else:
lines = []
lines_flipped = []
num_objs = len(lines)
# store information of flipped objects
assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
for ix, line in enumerate(lines_flipped):
words = line.split()
subcls = int(words[1])
gt_subclasses_flipped[ix] = subcls
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
for ix, line in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
subcls = int(words[1])
boxes[ix, :] = [float(n) for n in words[3:7]]
gt_classes[ix] = cls
gt_subclasses[ix] = subcls
overlaps[ix, cls] = 1.0
subindexes[ix, cls] = subcls
subindexes_flipped[ix, cls] = gt_subclasses_flipped[ix]
overlaps = scipy.sparse.csr_matrix(overlaps)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
base_size = 16
ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]
scales = 2**np.arange(1, 6, 0.5)
anchors = generate_anchors(base_size, ratios, scales)
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2 + 1 + 0.5)
height = np.floor((height - 1) / 2 + 1 + 0.5)
width = np.round((image_width * scale - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
# gt boxes
gt_boxes = boxes * scale
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
# compute overlap
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
# check how many gt boxes are covered by anchors
if num_objs != 0:
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps': overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
def region_proposal_roidb(self):
"""
Return the database of regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_' + cfg.SUBCLS_NAME + '_' + cfg.REGION_PROPOSAL + '_region_proposal_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} roidb loaded from {}'.format(self.name, cache_file))
return roidb
if self._image_set != 'testing':
gt_roidb = self.gt_roidb()
print('Loading region proposal network boxes...')
if self._image_set == 'trainval':
model = cfg.REGION_PROPOSAL + '_trainval/'
else:
model = cfg.REGION_PROPOSAL + '_train/'
rpn_roidb = self._load_rpn_roidb(gt_roidb, model)
print('Region proposal network boxes loaded')
roidb = datasets.imdb.merge_roidbs(rpn_roidb, gt_roidb)
else:
print('Loading region proposal network boxes...')
model = cfg.REGION_PROPOSAL + '_trainval/'
roidb = self._load_rpn_roidb(None, model)
print('Region proposal network boxes loaded')
print('{} region proposals per image'.format(self._num_boxes_proposal / len(self.image_index)))
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote roidb to {}'.format(cache_file))
return roidb
def _load_rpn_roidb(self, gt_roidb, model):
# set the prefix
prefix = model
box_list = []
for index in self.image_index:
filename = os.path.join(self._kitti_tracking_path, 'region_proposals', prefix, self._image_set, index + '.txt')
assert os.path.exists(filename), \
'RPN data not found at: {}'.format(filename)
print(filename)
raw_data = np.loadtxt(filename, dtype=float)
if len(raw_data.shape) == 1:
if raw_data.size == 0:
raw_data = raw_data.reshape((0, 5))
else:
raw_data = raw_data.reshape((1, 5))
x1 = raw_data[:, 0]
y1 = raw_data[:, 1]
x2 = raw_data[:, 2]
y2 = raw_data[:, 3]
score = raw_data[:, 4]
inds = np.where((x2 > x1) & (y2 > y1))[0]
raw_data = raw_data[inds,:4]
self._num_boxes_proposal += raw_data.shape[0]
box_list.append(raw_data)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def evaluate_detections(self, all_boxes, output_dir):
# load the mapping for subcalss the alpha (viewpoint)
if self._image_set == 'training' and self._seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.float)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = float(words[3])
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index[5:] + '.txt')
print('Writing kitti_tracking results to file ' + filename)
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
alpha = mapping[subcls]
f.write('{:s} -1 -1 {:f} {:f} {:f} {:f} {:f} -1 -1 -1 -1 -1 -1 -1 {:.32f}\n'.format(\
cls, alpha, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
# write detection results into one file
def evaluate_detections_one_file(self, all_boxes, output_dir):
# load the mapping for subcalss the alpha (viewpoint)
if self._image_set == 'training' and self._seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.float)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = float(words[3])
# open results file
filename = os.path.join(output_dir, self._seq_name+'.txt')
print('Writing all kitti_tracking results to file ' + filename)
with open(filename, 'wt') as f:
# for each image
for im_ind, index in enumerate(self.image_index):
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
alpha = mapping[subcls]
f.write('{:d} -1 {:s} -1 -1 {:f} {:f} {:f} {:f} {:f} -1 -1 -1 -1000 -1000 -1000 -10 {:f}\n'.format(\
im_ind, cls, alpha, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index[5:] + '.txt')
print('Writing kitti_tracking results to file ' + filename)
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(\
dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals_msr(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print('Writing kitti_tracking results to file ' + filename)
with open(filename, 'wt') as f:
dets = all_boxes[im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
if __name__ == '__main__':
d = datasets.kitti_tracking('training', '0000')
res = d.roidb
from IPython import embed; embed()
|
py | 1a5083ebbebdd16af155ac7533545ba295527189 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract base class for tests of `PrivacyAccountant` classes.
Checks that a class derived from `PrivacyAccountant` has the correct behavior
for standard `DpEvent` classes.
"""
from typing import Collection
from absl.testing import absltest
from dp_accounting import dp_event
from dp_accounting import privacy_accountant
class PrivacyAccountantTest(absltest.TestCase):
def _make_test_accountants(
self) -> Collection[privacy_accountant.PrivacyAccountant]:
"""Makes a list of accountants to test.
Subclasses should define this to return a list of accountants to be tested.
Returns:
A list of accountants to test.
"""
return []
def test_make_test_accountants(self):
self.assertNotEmpty(self._make_test_accountants())
def test_unsupported(self):
class UnknownDpEvent(dp_event.DpEvent):
pass
for accountant in self._make_test_accountants():
for unsupported in [dp_event.UnsupportedDpEvent(), UnknownDpEvent()]:
self.assertFalse(accountant.supports(unsupported))
self.assertFalse(
accountant.supports(dp_event.SelfComposedDpEvent(unsupported, 10)))
self.assertFalse(
accountant.supports(dp_event.ComposedDpEvent([unsupported])))
def test_no_events(self):
for accountant in self._make_test_accountants():
self.assertEqual(accountant.get_epsilon(1e-12), 0)
self.assertEqual(accountant.get_epsilon(0), 0)
self.assertEqual(accountant.get_epsilon(1), 0)
try:
self.assertEqual(accountant.get_delta(1e-12), 0)
self.assertEqual(accountant.get_delta(0), 0)
self.assertEqual(accountant.get_delta(float('inf')), 0)
except NotImplementedError:
# Implementing `get_delta` is optional.
pass
def test_no_op(self):
for accountant in self._make_test_accountants():
event = dp_event.NoOpDpEvent()
self.assertTrue(accountant.supports(event))
accountant._compose(event)
self.assertEqual(accountant.get_epsilon(1e-12), 0)
self.assertEqual(accountant.get_epsilon(0), 0)
self.assertEqual(accountant.get_epsilon(1), 0)
try:
self.assertEqual(accountant.get_delta(1e-12), 0)
self.assertEqual(accountant.get_delta(0), 0)
self.assertEqual(accountant.get_delta(float('inf')), 0)
except NotImplementedError:
# Implementing `get_delta` is optional.
pass
def test_non_private(self):
for accountant in self._make_test_accountants():
event = dp_event.NonPrivateDpEvent()
self.assertTrue(accountant.supports(event))
accountant._compose(event)
self.assertEqual(accountant.get_epsilon(0.99), float('inf'))
self.assertEqual(accountant.get_epsilon(0), float('inf'))
self.assertEqual(accountant.get_epsilon(1), float('inf'))
try:
self.assertEqual(accountant.get_delta(100), 1)
self.assertEqual(accountant.get_delta(0), 1)
self.assertEqual(accountant.get_delta(float('inf')), 1)
except NotImplementedError:
# Implementing `get_delta` is optional.
pass
|
py | 1a508469425ccd4be1bb12270af993cc485e0323 | import DAO
def show_my_courses(student, course_list):
print('\nMy Courses:')
print('#\tCOURSE NAME\tINSTRUCTOR NAME')
attending_dao = DAO.AttendingDAO()
my_courses = attending_dao.get_student_courses(course_list, student.get_email())
i = 1
for course in my_courses:
print(f'{i}\t{course.get_name()}\t{course.get_instructor()}')
i+=1
def show_all_courses(course_list):
print('\nAll Courses:')
print('ID\tCOURSE NAME\tINSTRUCTOR NAME')
for course in course_list:
print(f'{course.get_id()}\t{course.get_name()}\t{course.get_instructor()}')
def main():
print('Welcome!')
entry=None
while entry!='2':
entry = input('\n1. Current Student\n2. New Student\n3. Quit\nPlease, enter 1, 2 or 3: ')
if entry=='1':
student_dao = DAO.StudentDAO()
email = input('\nEnter Your Email: ')
pw = input('Enter Your Password: ')
if student_dao.validate_user(email, pw):
course_dao = DAO.CourseDAO()
attending_dao = DAO.AttendingDAO()
student = student_dao.get_student_by_email(email)
course_list = course_dao.get_courses()
print(type(student))
show_my_courses(student, course_list)
print('\nWhat Would You Like To Do?')
while entry!='2':
entry = input('\n1. Register To Course\n2. Logout\nPlease, enter 1 or 2: ')
if entry=='1':
show_all_courses(course_list)
course_id = input('\nSelect Course By ID Number: ')
print("\nAttempting to Register...")
if attending_dao.register_student_to_course(email, course_id, course_list):
show_my_courses(student, course_list)
elif entry=='2':
print('\nYou Have Been Logged Out.')
else:
print('\nInvalid Option...')
else:
print('\nWrong Credentials!')
elif entry=='2':
print("Welcome to the school!")
student_dao = DAO.StudentDAO()
email = input('Please provide your email : ')
if not student_dao.get_student_by_email(email):
name = input("What is your full name? : ")
password = input("What would you like your password to be? : ")
student_dao.add_new_student(email, name, password)
entry = '-1'
continue;
else:
print("That email is already taken")
elif entry=='3':
print("Programming is closing, ")
break;
else:
print('Invalid Option...')
print('\nClosing Program. Goodbye.')
if __name__=='__main__':
main()
|
py | 1a5084aba21c8c60d67577e2f0050a295e35ac59 | from django.contrib.auth.models import User
from django.shortcuts import render, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from suppliers.models import *
from django.db.models import Q
import json
import sys
from django.core.serializers.json import DjangoJSONEncoder
from django.core.exceptions import ValidationError
from datetime import datetime
@csrf_exempt
def getSuppliers(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
else:
try:
suppliers = suppliers_accounts.objects.all().order_by('supplier_name', 'supplier_service')
if 'pattern' in request.POST.keys():
pattern = request.POST['pattern']
suppliers = suppliers.filter(Q(supplier_name__icontains=pattern) | Q(supplier_type__icontains=pattern) | Q(supplier_service__icontains=pattern) | Q(supplier_location__icontains=pattern) | Q(supplier_tel__icontains=pattern) | Q(supplier_email__icontains=pattern) | Q(supplier_tin__icontains=pattern) | Q(supplier_website__icontains=pattern) | Q(supplier_bank_name__icontains=pattern) | Q(suppler_bank_account_num__icontains=pattern)).order_by('supplier_name', 'supplier_service')
arr = []
for supplier in suppliers:
fields = {}
fields['id'] = supplier.id,
fields['name'] = supplier.supplier_name,
fields['type'] = supplier.supplier_type,
fields['service'] = supplier.supplier_service,
fields['location'] = supplier.supplier_location,
fields['telephone'] = supplier.supplier_tel,
fields['email'] = supplier.supplier_email,
fields['tin'] = supplier.supplier_tin,
fields['website'] = supplier.supplier_website,
fields['bank_name'] = supplier.supplier_bank_name,
fields['bank_account'] = supplier.suppler_bank_account_num,
fields['notes'] = supplier.supp_notes
mapped = {'fields': fields}
print mapped
arr.append(mapped)
response = {'response': arr}
return HttpResponse(json.dumps(response))
except:
[]
return HttpResponse('')
@csrf_exempt
def addSupplier(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('login')
else:
if request.method == 'POST':
supplier = suppliers_accounts()
supplier.supplier_name = request.POST.get('supp_name')
supplier.supplier_type = request.POST.get('supp_type')
supplier.supplier_service = request.POST.get('supp_service')
supplier.supplier_location = request.POST.get('supp_location')
supplier.supplier_tel = request.POST.get('supp_tele')
supplier.supplier_email = request.POST.get('supp_mail')
supplier.supplier_tin = request.POST.get('supp_tin')
supplier.supplier_website = request.POST.get('supp_website')
supplier.supplier_bank_name = request.POST.get('supp_bank_name')
supplier.suppler_bank_account_num = request.POST.get('supp_bank_account')
supplier.supp_notes = request.POST.get('supp_notes')
supplier.save()
return HttpResponseRedirect('/')
@csrf_exempt
def editSupplier(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('login')
else:
if request.method == 'POST':
supp_prk = request.POST.get('edsupp_prik')
edsupplier = suppliers_accounts.objects.get(id=supp_prk)
edsupplier.supplier_name = request.POST.get('edsupp_name')
edsupplier.supplier_type = request.POST.get('edsupp_type')
edsupplier.supplier_service = request.POST.get('edsupp_service')
edsupplier.supplier_location = request.POST.get('edsupp_location')
edsupplier.supplier_tel = request.POST.get('edsupp_tele')
edsupplier.supplier_email = request.POST.get('edsupp_mail')
edsupplier.supplier_tin = request.POST.get('edsupp_tin')
edsupplier.supplier_website = request.POST.get('edsupp_website')
edsupplier.supplier_bank_name = request.POST.get('edsupp_bank_name')
edsupplier.suppler_bank_account_num = request.POST.get('edsupp_bank_account')
edsupplier.supp_notes = request.POST.get('edsupp_notes')
edsupplier.save()
return HttpResponseRedirect('/')
@csrf_exempt
def removeSupplier(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
else:
if request.method == 'POST':
supplierprik = request.POST.get('supplierkey')
supplier = suppliers_accounts.objects.get(id=supplierprik)
supplier.delete()
return HttpResponseRedirect('/')
@csrf_exempt
def getSuppliersInvoices(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
else:
try:
supplier_invoice = suppliersinvoice.objects.all().order_by('invoiceID', 'supplier_name')
if 'pattern' in request.POST.keys():
pattern = request.POST['pattern']
supplier_invoice = supplier_invoice.filter(Q(invoiceID__icontains=pattern) | Q(supplier_name__icontains=pattern) | Q(SDCID__icontains=pattern) | Q(reference__icontains=pattern) | Q(description__icontains=pattern) | Q(status__icontains=pattern) | Q(Package__icontains=pattern) | Q(amount_tobe_paid__icontains=pattern) | Q(amount_paid__icontains=pattern) | Q(amount_remaining__icontains=pattern) | Q(supp_notes__icontains=pattern) | Q(supp_service_id__icontains=pattern)).order_by('invoiceID', 'supplier_name')
arr = []
for invoice in supplier_invoice:
fields = {}
fields['id'] = invoice.id
fields['invoiceID'] = invoice.invoiceID
fields['I_supplier_name'] = invoice.supplier_name
fields['I_SDCID'] = invoice.SDCID
fields['I_reference'] = invoice.reference
fields['I_description'] = invoice.description
fields['I_status'] = invoice.status
fields['I_package'] = invoice.Package
fields['I_amount_tobepaid'] = invoice.amount_tobe_paid
fields['I_amountpaid'] = invoice.amount_paid
fields['I_amountremaining'] = invoice.amount_remaining
fields['I_invoicedate'] = invoice.invoice_date.__format__('%d-%m-%Y')
fields['I_invoiceduedate'] = invoice.invoice_due_date.__format__('%d-%m-%Y')
fields['I_suppnotes'] = invoice.supp_notes
fields['I_supp_service_id'] = invoice.supp_service_id
fields['I_receivedon'] = invoice.invoice_due_date.__format__('%d-%m-%Y')
fields['I_suppliers_id'] = 1
mapped = {'fields': fields}
print mapped
arr.append(mapped)
response = {'response': arr}
return HttpResponse(json.dumps(response))
except:
[]
# print 'Unexpected error', sys.exc_info()[0]
# raise
return HttpResponse('')
@csrf_exempt
def newSupplierInvoice(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('login')
else:
try:
if request.method == 'POST':
inv = suppliersinvoice()
inv.invoiceID = request.POST.get('supplierInvId')
inv.supplier_name = request.POST.get('supplierName')
inv.SDCID = request.POST.get('supplierSDCID')
inv.invoice_date = datetime.strptime(request.POST.get('supplierInvDate'),'%d/%m/%Y').__format__('%Y-%m-%d')
inv.invoice_due_date = datetime.strptime(request.POST.get('supplierInvDueDate'),'%d/%m/%Y').__format__('%Y-%m-%d')
inv.reference = request.POST.get('supplierReference')
inv.status = request.POST.get('supplierStatus')
inv.amount_tobe_paid = request.POST.get('supplierAmtToPay')
inv.amount_paid = request.POST.get('supplierAmtPaid')
inv.amount_remaining = request.POST.get('supplierRemAmt')
inv.Package = request.POST.get('supplierPackage')
inv.received_on = datetime.strptime(request.POST.get('supplierInvReceivedOn'),'%d/%m/%Y').__format__('%Y-%m-%d')
inv.supp_service_id = request.POST.get('supplierServiceID')
inv.description = request.POST.get('supplierInvDescri')
inv.supp_notes = request.POST.get('supplierNotes')
inv.suppliers_id = 2
inv.save()
print 'successful saved line 156'
return HttpResponseRedirect('/')
except:
print "Unexpected error:", sys.exc_info()[0]
raise
return HttpResponse('')
@csrf_exempt
def editSupplierInvoice(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('login')
else:
try:
if request.method == 'POST':
inv_identity = request.POST.get('supp_inv_ID')
inv = suppliersinvoice.objects.get(id=inv_identity)
inv.invoiceID = request.POST.get('supplierInvId')
inv.supplier_name = request.POST.get('supplierName')
inv.SDCID = request.POST.get('supplierSDCID')
inv.invoice_date = datetime.strptime(request.POST.get('supplierInvDate'),'%d-%m-%Y').__format__('%Y-%m-%d')
inv.invoice_due_date = datetime.strptime(request.POST.get('supplierInvDueDate'),'%d-%m-%Y').__format__('%Y-%m-%d')
inv.reference = request.POST.get('supplierReference')
inv.status = request.POST.get('supplierStatus')
inv.amount_tobe_paid = request.POST.get('supplierAmtToPay')
inv.amount_paid = request.POST.get('supplierAmtPaid')
inv.amount_remaining = request.POST.get('supplierRemAmt')
inv.Package = request.POST.get('supplierPackage')
inv.received_on = datetime.strptime(request.POST.get('supplierInvReceivedOn'),'%d-%m-%Y').__format__('%Y-%m-%d')
inv.supp_service_id = request.POST.get('supplierServiceID')
inv.description = request.POST.get('supplierInvDescri')
inv.supp_notes = request.POST.get('supplierNotes')
inv.suppliers_id = 2
inv.save()
print 'successful saved line 156'
return HttpResponseRedirect('/')
except:
print "Unexpected error:", sys.exc_info()[0]
raise
return HttpResponse('')
@csrf_exempt
def removeSupplierInvoice(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
else:
if request.method == 'POST':
supplierinv_prik = request.POST.get('supplierinv_key')
supplier = suppliersinvoice.objects.get(id=supplierinv_prik)
supplier.delete()
return HttpResponseRedirect('/')
|
py | 1a50852ba2b88516b2ac246c27069350de8bfe91 | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect
from django.views.generic.list_detail import object_list
from faq.models import Topic, Question
def _fragmentify(model, slug, url=None):
get_object_or_404(model.objects.published().filter(slug=slug))
url = url or reverse('faq-topic-list')
fragment = '#%s' % slug
return redirect(url + fragment, permanent=True)
def topic_list(request):
"""
A list view of all published Topics
Templates:
:template:`faq/topic_list.html`
Context:
topic_list
A list of all published :model:`faq.Topic` objects that
relate to the current :model:`sites.Site`.
"""
return object_list(request, queryset=Topic.objects.published(),
template_object_name='topic')
def topic_detail(request, slug):
"""
A detail view of a Topic
Simply redirects to :view:`faq.views.topic_list` with the addition of
a fragment identifier that links to the given :model:`faq.Topic`.
E.g., ``/faq/#topic-slug``.
"""
return _fragmentify(Topic, slug)
def question_detail(request, topic_slug, slug):
"""
A detail view of a Question.
Simply redirects to :view:`faq.views.topic_list` with the addition of
a fragment identifier that links to the given :model:`faq.Question`.
E.g. ``/faq/#question-slug``.
"""
return _fragmentify(Question, slug)
|
py | 1a508687dac6f176e0ecc52dbb9ad6db340f8a82 | '''
Copyright 2017 The Regents of the University of Colorado
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
events_mapping.py
Python Version: 3.6.3
Queries the study by the events_mapping table and populates OHDSI tables Death, visit_occurrence and procedure_occurrence.
This is research code for demonstration purposes only.
croeder 8/2017 [email protected]
'''
import logging
from HeartData import migrate
#import datetime
#import sys
#import re
import psycopg2
from psycopg2.extras import RealDictCursor
from HeartData.person import BasePerson
from ui.models import Concept
logger = logging.getLogger(__name__)
NULL_PLACEHOLDER='no_column'
def _read_event_mappings(con, study_id):
event_mappings={}
cur = con.cursor(cursor_factory=RealDictCursor)
cur.execute( ("SELECT study_id, from_table, from_column, to_table, value_vocabulary_id, value_concept_code, addl_column, addl_value, from_date_column, where_clause"
" FROM events_mapping "
" WHERE study_id = %s"), (study_id,) )
rows = cur.fetchall()
cur.close()
return rows
def populate(con, person_id_list, study):
""" populate the ohdsi person table.
Be wary of the fact that the list of person_ids is a list of ohdsi_ids,
and that when you query study tables those ids need converted.
"""
personObj = BasePerson.factory(study)
id_col = personObj.get_id_field_name()
cur = con.cursor()
event_mappings = _read_event_mappings(con, study.study_id)
procedure_id=0
visit_id=0
for row in event_mappings:
logger.info("XX events_mapping.populate() %s", row)
from_table_name=row['from_table']
prefix = from_table_name.split('_')[0]
for person_id in person_id_list:
query=""
# QUERY FOR THE VALUES, BEST SPECIFIC? TODO
if (row['from_column'] != NULL_PLACEHOLDER):
# a value and a date, like the Death table
if (row['where_clause'] != NULL_PLACEHOLDER) :
query = ("SELECT {0}, {1} from {2} where " + id_col + " = %s and ( {3} )").format(row['from_date_column'], row['from_column'], row['from_table'], row['where_clause'])
#logger.debug("QUERY1:%s %s", query, person_id)
logger.info("QUERY1:%s %s", query, person_id)
cur.execute(query, (personObj.convert_person_id_to_study(person_id),))
else:
query = ("SELECT {0}, {1} from {2} where " + id_col + " = %s").format(row['from_date_column'], row['from_column'], row['from_table'])
#logger.debug("QUERY2: %s, %s", query, row)
logger.info("QUERY2: %s, %s", query, row)
cur.execute(query, (personObj.convert_person_id_to_study(person_id),))
else:
# just a date, like the Occurrence tables:
if (row['where_clause'] != NULL_PLACEHOLDER) :
query = ("SELECT {0} from {1} where " + id_col + " = %s and ( {2} )").format(row['from_date_column'], row['from_table'], row['where_clause'])
#logger.debug("QUERY3: %s %s", query, row)
logger.info("QUERY3: %s %s", query, row)
cur.execute(query, (personObj.convert_person_id_to_study(person_id),))
else:
query = ("SELECT {0} from {1} where " + id_col + " = %s").format(row['from_date_column'], row['from_table'])
#logger.debug("QUERY4: %s %s", query, row)
logger.info("QUERY4: %s %s", query, row)
cur.execute(query, (personObj.convert_person_id_to_study(person_id),))
value_rows = cur.fetchall()
logger.debug("events.populate() from:%s to:%s rows:%d", from_table_name, row['to_table'], len(value_rows))
# LOOKUP the id (vocab, concept) from the mappings row
concept_id = Concept.objects.get(vocabulary_id=row['value_vocabulary_id'], concept_code=row['value_concept_code']).concept_id
# INSERT
if (len(value_rows) == 0):
logger.warn("no rows back from %s person:%s, with %s", query, person_id, row)
elif (concept_id == None) :
logger.error("No concept %s, %s", row['value_vocabulary_id'], row['value_concept_code'])
else:
for value_row in value_rows:
if value_row[0] != None :
logger.debug("VALUE ROWS pid:%s query:%s value:%s num-rows:%d", person_id, query, value_row, len(value_rows))
to_table_name=row['to_table']
# sometimes this is a date, sometimes a string. Use string, the lowest-common denominator, works for all sources
the_date_value=''
try:
date_time_string = str(value_row[0])
(year, month, day) = date_time_string.split(' ')[0].split('-')
the_date_value = "{0}/{1}/{2}".format(month, day, year)
except:
logger.error("populate raised on {}".format(date_time_string))
the_date_value = date_time_string
# INSERT DEATH
if to_table_name == 'Death':
statement = "INSERT into death (person_id, death_date, death_datetime, death_type_concept_id, cause_concept_id)" \
+ " values ( %s, %s, %s, %s, %s)"
logger.debug("death: %s, %s, %s, %s, %s %s %s %s); ",
statement, person_id, the_date_value, row['addl_value'], concept_id,
row['value_vocabulary_id'], row['value_concept_code'], value_row[0] )
cur.execute(statement, (person_id, the_date_value, the_date_value, row['addl_value'], concept_id))
# INSERT VISIT OCCURRENCE
elif to_table_name == 'visit_occurrence':
statement = ("INSERT into visit_occurrence "
"(visit_occurrence_id, person_id, visit_concept_id, visit_start_date, "
" visit_start_datetime, visit_end_date, visit_type_concept_id)"
" values ( %s, %s, %s, %s, %s, %s, %s)")
logger.debug("visit %s %s %s %s %s %s %s %s", statement, visit_id, person_id, concept_id, the_date_value,
row['addl_value'], row['value_vocabulary_id'], row['value_concept_code'])
cur.execute(statement, (visit_id, person_id, concept_id, the_date_value, the_date_value, the_date_value, row['addl_value']))
visit_id += 1
# INSERT PROCEDURE OCCURRENCE
elif to_table_name == 'procedure_occurrence':
statement = ("INSERT into procedure_occurrence"
" (procedure_occurrence_id, person_id, procedure_concept_id, "
" procedure_date, procedure_datetime, procedure_type_concept_id)"\
" values ( %s, %s, %s, %s, %s, %s)")
logger.debug("proc: %s %s %s %s *%s* %s %s %s %s", statement, procedure_id, person_id, concept_id,
the_date_value, row['addl_value'], row['value_vocabulary_id'], row['value_concept_code'], value_row[0] )
cur.execute(statement, (procedure_id, person_id, concept_id, the_date_value, the_date_value, row['addl_value']))
procedure_id += 1
else:
logger.error("unknown table name %s in events.populate() %s", to_table_name, row)
else:
logger.warn("None value in events_mapping.populate() with %s", value_row)
value_rows=None
cur.close()
con.commit()
|
py | 1a50869ff40561255c4e6fd20574bf871ac2c165 | import unittest
import requests
class UnitTestsIbanAPI(unittest.TestCase):
# https://ibanapi.com/get-api
def test_get_get_api(self):
print('test_get_get_api')
params = (
('api_key', 'API_KEY'),
)
iban = "EE471000001020145685"
url = "https://api.ibanapi.com/v1/validate/" + iban
response = requests.get(url, params=params)
print(response.text)
if __name__ == '__main__':
unittest.main()
|
py | 1a50870947659c3f85f6f5686a3d1561b3ed712f | """ formatting.py """
import math
from enum import Enum, unique
from typing import Dict, Iterable, List
from .layer_info import LayerInfo
@unique
class Verbosity(Enum):
""" Contains verbosity levels. """
QUIET, DEFAULT, VERBOSE = 0, 1, 2
class FormattingOptions:
""" Class that holds information about formatting the table output. """
def __init__(
self,
max_depth: int,
verbose: int,
col_names: Iterable[str],
col_width: int,
):
self.max_depth = max_depth
self.verbose = verbose
self.col_names = col_names
self.col_width = col_width
self.layer_name_width = 40
def set_layer_name_width(
self, summary_list: List[LayerInfo], align_val: int = 5
) -> None:
"""
Set layer name width by taking the longest line length and rounding up to
the nearest multiple of align_val.
"""
max_length = 0
for info in summary_list:
depth_indent = info.depth * align_val + 1
max_length = max(max_length, len(str(info)) + depth_indent)
if max_length >= self.layer_name_width:
self.layer_name_width = math.ceil(max_length / align_val) * align_val
def get_total_width(self) -> int:
""" Calculate the total width of all lines in the table. """
return len(tuple(self.col_names)) * self.col_width + self.layer_name_width
def format_row(self, layer_name: str, row_values: Dict[str, str]) -> str:
""" Get the string representation of a single layer of the model. """
info_to_use = [row_values.get(row_type, "") for row_type in self.col_names]
new_line = f"{layer_name:<{self.layer_name_width}} "
for info in info_to_use:
new_line += f"{info:<{self.col_width}} "
return new_line.rstrip() + "\n"
|
py | 1a508738f4b4fe1c3b100c4317d73fb7401f7358 | import time
import torch
import functools
import argparse
import pyaudio
import wave
import torch.nn.functional as F
from utils import data
from ctcdecode import CTCBeamDecoder
from data.utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
parser.add_argument("--model_path",
default="save_model/model.pth",
type=str,
help="trained model path. (default: %(default)s)")
parser.add_argument("--lm_path",
default="lm/zh_giga.no_cna_cmn.prune01244.klm",
type=str,
help="language model path. (default: %(default)s)")
parser.add_argument("--record_time",
default=5,
type=int,
help="record time for second. (default: %(default)s)")
args = parser.parse_args()
print_arguments(args)
alpha = 0.8
beta = 0.3
cutoff_top_n = 40
cutoff_prob = 1.0
beam_width = 32
num_processes = 4
blank_index = 0
model = torch.load(args.model_path)
model = model.cuda()
model.eval()
decoder = CTCBeamDecoder(model.vocabulary,
args.lm_path,
alpha,
beta,
cutoff_top_n,
cutoff_prob,
beam_width,
num_processes,
blank_index)
def translate(vocab, out, out_len):
return "".join([vocab[x] for x in out[0:out_len]])
def predict(wav_path):
wav = data.load_audio(wav_path)
spec = data.spectrogram(wav)
spec.unsqueeze_(0)
with torch.no_grad():
spec = spec.cuda()
y = model.cnn(spec)
y = F.softmax(y, 1)
y_len = torch.tensor([y.size(-1)])
y = y.permute(0, 2, 1) # B * T * V
print("decoding...")
out, score, offset, out_len = decoder.decode(y, y_len)
return translate(model.vocabulary, out[0][0], out_len[0][0])
def save_wave_file(filename, data):
wf = wave.open(filename, "wb")
wf.setnchannels(CHANNELS)
wf.setsampwidth(SAMPWIDTH)
wf.setframerate(RATE)
wf.writeframes(b"".join(data))
wf.close()
def record(wav_path, time=5):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
my_buf = []
print("录音中(%ds)" % time)
for i in range(0, int(RATE / CHUNK * time)):
data = stream.read(CHUNK)
my_buf.append(data)
print(".", end="", flush=True)
save_wave_file(wav_path, my_buf)
stream.close()
if __name__ == '__main__':
# 录音格式
RATE = 16000
CHUNK = 1024
CHANNELS = 1
SAMPWIDTH = 2
# 临时保存路径
save_path = 'dataset/record.wav'
while True:
_ = input("按下回车键开机录音,录音%s秒中:" % args.record_time)
record(save_path, time=args.record_time)
start = time.time()
result_text = predict(save_path)
end = time.time()
print("识别时间:%dms,识别结果:%s" % (round((end - start) * 1000), result_text))
|
py | 1a50885e76ca6297fe2088d710c1efc6e451dc20 | """
Put files into the LeoShadow subfolder.
Usage:
1. convert.py <filename> LeoShadow x
This copy file <filename> into the subfolder leoShadow,
adds the prefix, and creates an empty file at the
current location.
After restarting Leo, <filename> will be re-created without
annotations.
2. convert -all LeoShadow x
Apply 'convert.py <filename> LeoShadow x' to all .py files.
Must be run in the directory with the .py files.
x is the prefix specified the for mod_shadow plugin.
"""
import os, sys, shutil
def convert(filename, leoFolder, prefix):
if not os.path.exists(leoFolder):
os.mkdir(leoFolder)
assert os.path.exists(leoFolder)
else:
assert os.path.isdir(leoFolder)
dir, name = os.path.split(filename)
newname = os.path.join(dir, leoFolder, prefix + name)
if os.path.exists(newname):
return
print("Putting", filename, "into the shadow folder", leoFolder)
os.rename(filename, newname)
f = open(filename, "w")
f.close()
if __name__ == '__main__':
scriptname, filename, leoFolder, prefix = sys.argv
if filename == '-all':
for filename in os.listdir("."):
rest, extension = os.path.splitext(filename)
if extension == '.py':
if (extension not in ['.leo', '.pyc'] and
not filename.startswith("convert")):
if os.path.isfile(filename):
convert(filename, leoFolder, prefix)
else:
convert(filename, leoFolder, prefix)
|
py | 1a5089aefef580f0b2ef8c3db74d2a656d7504c4 | """
Helpers for plugin app
"""
import os
import subprocess
import pathlib
import sysconfig
import traceback
import inspect
import pkgutil
from django.conf import settings
from django.core.exceptions import AppRegistryNotReady
# region logging / errors
class IntegrationPluginError(Exception):
"""
Error that encapsulates another error and adds the path / reference of the raising plugin
"""
def __init__(self, path, message):
self.path = path
self.message = message
def __str__(self):
return self.message # pragma: no cover
class MixinImplementationError(ValueError):
"""
Error if mixin was implemented wrong in plugin
Mostly raised if constant is missing
"""
pass
class MixinNotImplementedError(NotImplementedError):
"""
Error if necessary mixin function was not overwritten
"""
pass
def log_error(error, reference: str = 'general'):
"""
Log an plugin error
"""
from plugin import registry
# make sure the registry is set up
if reference not in registry.errors:
registry.errors[reference] = []
# add error to stack
registry.errors[reference].append(error)
def handle_error(error, do_raise: bool = True, do_log: bool = True, log_name: str = ''):
"""
Handles an error and casts it as an IntegrationPluginError
"""
package_path = traceback.extract_tb(error.__traceback__)[-1].filename
install_path = sysconfig.get_paths()["purelib"]
try:
package_name = pathlib.Path(package_path).relative_to(install_path).parts[0]
except ValueError:
# is file - loaded -> form a name for that
path_obj = pathlib.Path(package_path).relative_to(settings.BASE_DIR)
path_parts = [*path_obj.parts]
path_parts[-1] = path_parts[-1].replace(path_obj.suffix, '') # remove suffix
# remove path prefixes
if path_parts[0] == 'plugin':
path_parts.remove('plugin')
path_parts.pop(0)
else:
path_parts.remove('plugins')
package_name = '.'.join(path_parts)
if do_log:
log_kwargs = {}
if log_name:
log_kwargs['reference'] = log_name
log_error({package_name: str(error)}, **log_kwargs)
if do_raise:
raise IntegrationPluginError(package_name, str(error))
# endregion
# region git-helpers
def get_git_log(path):
"""
Get dict with info of the last commit to file named in path
"""
from plugin import registry
output = None
if registry.git_is_modern:
path = path.replace(os.path.dirname(settings.BASE_DIR), '')[1:]
command = ['git', 'log', '-n', '1', "--pretty=format:'%H%n%aN%n%aE%n%aI%n%f%n%G?%n%GK'", '--follow', '--', path]
try:
output = str(subprocess.check_output(command, cwd=os.path.dirname(settings.BASE_DIR)), 'utf-8')[1:-1]
if output:
output = output.split('\n')
except subprocess.CalledProcessError: # pragma: no cover
pass
if not output:
output = 7 * [''] # pragma: no cover
return {'hash': output[0], 'author': output[1], 'mail': output[2], 'date': output[3], 'message': output[4], 'verified': output[5], 'key': output[6]}
def check_git_version():
"""returns if the current git version supports modern features"""
# get version string
try:
output = str(subprocess.check_output(['git', '--version'], cwd=os.path.dirname(settings.BASE_DIR)), 'utf-8')
except subprocess.CalledProcessError: # pragma: no cover
return False
# process version string
try:
version = output[12:-1].split(".")
if len(version) > 1 and version[0] == '2':
if len(version) > 2 and int(version[1]) >= 22:
return True
except ValueError: # pragma: no cover
pass
return False
class GitStatus:
"""
Class for resolving git gpg singing state
"""
class Definition:
"""
Definition of a git gpg sing state
"""
key: str = 'N'
status: int = 2
msg: str = ''
def __init__(self, key: str = 'N', status: int = 2, msg: str = '') -> None:
self.key = key
self.status = status
self.msg = msg
N = Definition(key='N', status=2, msg='no signature',)
G = Definition(key='G', status=0, msg='valid signature',)
B = Definition(key='B', status=2, msg='bad signature',)
U = Definition(key='U', status=1, msg='good signature, unknown validity',)
X = Definition(key='X', status=1, msg='good signature, expired',)
Y = Definition(key='Y', status=1, msg='good signature, expired key',)
R = Definition(key='R', status=2, msg='good signature, revoked key',)
E = Definition(key='E', status=1, msg='cannot be checked',)
# endregion
# region plugin finders
def get_modules(pkg):
"""get all modules in a package"""
context = {}
for loader, name, ispkg in pkgutil.walk_packages(pkg.__path__):
try:
module = loader.find_module(name).load_module(name)
pkg_names = getattr(module, '__all__', None)
for k, v in vars(module).items():
if not k.startswith('_') and (pkg_names is None or k in pkg_names):
context[k] = v
context[name] = module
except AppRegistryNotReady: # pragma: no cover
pass
except Exception as error:
# this 'protects' against malformed plugin modules by more or less silently failing
# log to stack
log_error({name: str(error)}, 'discovery')
return [v for k, v in context.items()]
def get_classes(module):
"""get all classes in a given module"""
return inspect.getmembers(module, inspect.isclass)
def get_plugins(pkg, baseclass):
"""
Return a list of all modules under a given package.
- Modules must be a subclass of the provided 'baseclass'
- Modules must have a non-empty PLUGIN_NAME parameter
"""
plugins = []
modules = get_modules(pkg)
# Iterate through each module in the package
for mod in modules:
# Iterate through each class in the module
for item in get_classes(mod):
plugin = item[1]
if issubclass(plugin, baseclass) and plugin.PLUGIN_NAME:
plugins.append(plugin)
return plugins
# endregion
|
py | 1a508a241f5a08f04aaed82672c538f941a25712 | import logging
from botocore import exceptions
import json
import sys
from utils.utils import get_region_name, get_price1, get_price2, handle_limit_exceeded_exception
class Pricing:
"""For getting and returning the price of the Elastic IP's."""
#Filter for get_products pricing api call used to fetch EIP price.
eip_filter = '[{{"Field": "location", "Value": "{r}", "Type": "TERM_MATCH"}},' \
' {{"Field": "group", "Value": "ElasticIP:Address", "Type": "TERM_MATCH"}},' \
'{{"Field": "productFamily", "Value": "IP Address", "Type": "TERM_MATCH"}}]'
def __init__(self, pricing_client=None, region=None):
self.pricing_client = pricing_client
self.region = region
self.formatted_region = get_region_name(region)
logging.basicConfig(level=logging.WARNING)
self.logger = logging.getLogger()
def get_eip_price(self):
"""Returns EIP price."""
try:
f = self.eip_filter.format(r=self.formatted_region)
data = self.pricing_client.get_products(ServiceCode='AmazonEC2', Filters=json.loads(f))
if "eu-west-1" in self.region:
price = get_price2(data)
return float(price)
price = get_price1(data)
return float(price)
except exceptions.ClientError as error:
handle_limit_exceeded_exception(error, 'eip pricing.py')
sys.exit(1)
except Exception as e:
print("Error on line {} in eip pricing.py".format(sys.exc_info()[-1].tb_lineno) + " | Message: " + str(e))
sys.exit(1)
|
py | 1a508a278c658bb2025dc0c78357a34533e4d402 | from collections import deque
d = deque()
for _ in range(int(input())):
line = input().split()
if line[0] == 'append':
d.append(line[1])
elif line[0] == 'pop':
d.pop()
elif line[0] == 'popleft':
d.popleft()
elif line[0] == 'appendleft':
d.appendleft(line[1])
print(*d) |
py | 1a508ac5ac09d76e86379adf28ed24a60e95b0ad | from typing import List, Optional
from enum import IntEnum
import numpy as np
import logging
from numpy import random
# The two following classes just make it convenient to select which mutation/recombination/selectoin to use with EA
class Recombination(IntEnum):
NONE = -1 # can be used when only mutation is required
UNIFORM = 0 # uniform crossover (only really makes sense for function dimension > 1)
INTERMEDIATE = 1 # intermediate recombination
class Mutation(IntEnum):
NONE = -1 # Can be used when only recombination is required
UNIFORM = 0 # Uniform mutation
GAUSSIAN = 1 # Gaussian mutation
class ParentSelection(IntEnum):
NEUTRAL = 0
FITNESS = 1
TOURNAMENT = 2
class Member:
"""
Class to simplify member handling.
"""
def __init__(self, initial_x: np.ndarray, target_function: callable, bounds: List[float],
mutation: Mutation, recombination: Recombination,
sigma: Optional[float] = None, recom_prob: Optional[float] = None) -> None:
"""
Init
:param initial_x: Initial coordinate of the member
:param target_function: The target function that determines the fitness value
:param bounds: Allowed bounds. For simplicities sake we assume that all elements in initial_x have the same
bounds -> bounds[0] lower bound && bounds[1] upper bounds
:param mutation: hyperparameter that determines which mutation type use
:param recombination: hyperparameter that determines which recombination type to use
:param sigma: Optional hyperparameter that is only active if mutation is gaussian
:param recom_prob: Optional hyperparameter that is only active if recombination is uniform
"""
self._x = initial_x.astype(float) # astype is crucial here. Otherwise numpy might cast everything to int
self._f = target_function
self.__bounds = bounds
self._age = 0 # basically indicates how many offspring were generated from this member
self._mutation = mutation
self._recombination = recombination
self._x_changed = True
self._fit = None
self._sigma = sigma
self._recom_prob = recom_prob
self.logger = logging.getLogger(self.__class__.__name__)
@property # fitness can only be queried never set
def fitness(self):
if self._x_changed: # Only if the x_coordinate has changed we need to evaluate the fitness.
self._x_changed = False
self._fit = self._f(self._x)
return self._fit # otherwise we can return the cached value
@property # properties let us easily handle getting and setting without exposing our private variables
def x_coordinate(self):
return self._x
@x_coordinate.setter
def x_coordinate(self, value):
assert np.all((self.__bounds[0] <= value) & (value <= self.__bounds[1])), 'Member out of bounds'
self._x_changed = True
self._x = value
def mutate(self):
"""
Mutation which creates a new offspring
:return: new member who is based on this member
"""
new_x = self.x_coordinate.copy()
self.logger.debug('new point before mutation:')
self.logger.debug(new_x)
# modify new_x either through uniform or gaussian mutation
if self._mutation == Mutation.UNIFORM:
new_x = np.random.uniform(self.__bounds[0], self.__bounds[1], new_x.size)
elif self._mutation == Mutation.GAUSSIAN:
assert self._sigma, 'Sigma has to be set when gaussian mutation is used'
new_x = new_x + self._sigma*np.random.randn()
new_x[new_x < self.__bounds[0]] = self.__bounds[0]
new_x[new_x > self.__bounds[1]] = self.__bounds[1]
elif self._mutation != Mutation.NONE:
# We won't consider any other mutation types
raise NotImplementedError
self.logger.debug('new point after mutation:')
self.logger.debug(new_x)
child = Member(new_x, self._f, self.__bounds, self._mutation, self._recombination,
self._sigma, self._recom_prob)
self._age += 1
return child
def recombine(self, partner):
"""
Recombination of this member with a partner
:param partner: Member
:return: new offspring based on this member and partner
"""
if self._recombination == Recombination.INTERMEDIATE:
new_x = 0.5*(self.x_coordinate + partner.x_coordinate)
elif self._recombination == Recombination.UNIFORM:
assert self._recom_prob is not None, \
'for this recombination type you have to specify the recombination probability'
cross = np.random.binomial(1,self._recom_prob,self.x_coordinate.size)
new_x = self.x_coordinate*cross + partner.x_coordinate*(1-cross)
elif self._recombination == Recombination.NONE:
new_x = self.x_coordinate.copy() # copy is important here to not only get a reference
else:
raise NotImplementedError
self.logger.debug('new point after recombination:')
self.logger.debug(new_x)
child = Member(new_x, self._f, self.__bounds, self._mutation, self._recombination,
self._sigma, self._recom_prob)
self._age += 1
return child
def __str__(self):
"""Makes the class easily printable"""
str = "Population member: Age={}, x={}, f(x)={}".format(self._age, self.x_coordinate, self.fitness)
return str
def __repr__(self):
"""Will also make it printable if it is an entry in a list"""
return self.__str__() + '\n'
class EA:
def __init__(self, target_func: callable, population_size: int = 10, problem_dim: int = 2,
problem_bounds: List = [-30, 30], mutation_type: Mutation = Mutation.UNIFORM,
recombination_type: Recombination = Recombination.INTERMEDIATE,
sigma: float = 1., recom_proba: float = 0.5, selection_type: ParentSelection = ParentSelection.NEUTRAL,
total_number_of_function_evaluations: int = 200, children_per_step: int = 5,
fraction_mutation: float = .5
):
"""
Simple evolutionary algorithm
:param target_func: callable target function we optimize
:param population_size: int
:param problem_dim: int
:param problem_bounds: list[int] used to make sure population members are valid
:param mutation_type: hyperparameter to set mutation strategy
:param recombination_type: hyperparameter to set recombination strategy
:param sigma: conditional hyperparameter dependent on mutation_type GAUSSIAN
:param recom_proba: conditional hyperparameter dependent on recombination_type UNIFORM
:param selection_type: hyperparameter to set selection strategy
:param total_number_of_function_evaluations: maximum allowed function evaluations
:param children_per_step: how many children to produce per step
:param fraction_mutation: balance between sexual and asexual reproduction
"""
assert 0 <= fraction_mutation <= 1
assert 0 < children_per_step
assert 0 < total_number_of_function_evaluations
assert 0 < sigma
assert 0 < problem_dim
assert 0 < population_size
# Step 1: initialize Population
self.population = [
Member(np.random.uniform(*problem_bounds, problem_dim),
target_func, problem_bounds, mutation_type, recombination_type, sigma, recom_proba
) for _ in range(population_size)]
self.population.sort(key=lambda x: x.fitness) # sort population by fitness for easier handling downstream
self.pop_size = population_size
self.selection = selection_type
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.info('Initial average fitness of population: %f', self.get_average_fitness())
self.max_func_evals = total_number_of_function_evaluations
self._func_evals = population_size
self.num_children = children_per_step
self.frac_mutants = fraction_mutation
# will store the optimization trajectory and lets you easily observe how often
self.trajectory = [self.population[0]]
# a new best member was generated
def get_average_fitness(self) -> float:
"""Helper to quickly access average population fitness"""
return np.mean(list(map(lambda x: x.fitness, self.population)))
def select_parents(self):
"""
Method that implements all selection mechanism.
For ease of computation we assume that the population members are sorted according to their fitness
:return: list of ids of selected parents.
"""
parent_ids = []
mu = self.num_children
if self.selection == ParentSelection.NEUTRAL:
for i in range(mu):
id = random.randint(0, self.pop_size)
parent_ids.append(id)
elif self.selection == ParentSelection.FITNESS:
for i in range(mu):
max = sum([c.fitness for c in self.population])
pick = random.uniform(0, max)
current = 0
for id, member in zip(range(self.pop_size), self.population):
current+= member.fitness
if current > pick:
parent_ids.append(self.pop_size-1-id)
break
elif self.selection == ParentSelection.TOURNAMENT:
for i in range(mu):
tournament_size = 5
if self.pop_size <= tournament_size:
parent_ids.append(0)
else:
arr = np.array([1]*tournament_size + [0] * (self.pop_size-tournament_size))
np.random.shuffle(arr)
one_idx= np.where(arr==1)
parent_ids.append(one_idx[0][0])
else:
raise NotImplementedError
self.logger.debug('Selected parents:')
self.logger.debug(parent_ids)
return parent_ids
def step(self) -> float:
"""
Performs one step of parent selection -> offspring creation -> survival selection
:return: average population fittness
"""
# Step 2: Parent selection
parent_ids = self.select_parents()
# Step 3: Variation / create offspring
children = []
for id in parent_ids:
# for each parent create exactly one offspring (use the frac_mutants) parameter to determine
# if more recombination or mutation should be performed
parent = self.population[id]
new_pop = parent.mutate()
if np.random.uniform(0., 1., 1) < self.frac_mutants:
new_pop = parent.recombine(new_pop)
children.append(new_pop)
self._func_evals += 1
self.logger.debug('Children:')
self.logger.debug(children)
# Step 4: Survival selection
# (\mu + \lambda)-selection i.e. combine offspring and parents in one sorted list, keep the #pop_size best
self.population.extend(children)
self.population.sort(key=lambda x: x.fitness)
self.population = self.population[:self.pop_size]
self.trajectory.append(self.population[0])
return self.get_average_fitness()
def optimize(self):
"""
Simple optimization loop that stops after a predetermined number of function evaluations
:return:
"""
step = 1
while self._func_evals < self.max_func_evals:
avg_fitness = self.step()
self.logger.info(
'Step {:>3d} | Average fitness {:>10.7f} | Best fitness {:>10.7f} | #Func Evals: {:>4d}'.format(
step, avg_fitness, self.population[0].fitness, self._func_evals))
step += 1
return self.population[0]
if __name__ == '__main__':
"""
Simple main to give an example of how to use the EA
"""
from target_function import ackley
np.random.seed(0) # fix seed for comparisons sake
logging.basicConfig(level=logging.INFO)
dimensionality = 2
max_func_evals = 500 * dimensionality
pop_size = 20
ea = EA(ackley, pop_size, dimensionality, selection_type=ParentSelection.TOURNAMENT,
total_number_of_function_evaluations=max_func_evals)
optimum = ea.optimize()
# print(ea.trajectory)
print(optimum)
print('#' * 120)
ea = EA(ackley, pop_size, dimensionality, selection_type=ParentSelection.FITNESS,
total_number_of_function_evaluations=max_func_evals)
optimum = ea.optimize()
# print(ea.trajectory)
print(optimum)
print('#' * 120)
ea = EA(ackley, pop_size, dimensionality, selection_type=ParentSelection.NEUTRAL,
total_number_of_function_evaluations=max_func_evals)
optimum = ea.optimize()
# print(ea.trajectory)
print(optimum)
print('#' * 120)
|
py | 1a508b7c8857ef2062b0bd1c3deb38363465d49d | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the SageMaker TrainingJob API.
"""
import botocore
import pytest
import logging
from typing import Dict
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import (
service_marker,
create_sagemaker_resource,
wait_for_status,
sagemaker_client,
)
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.bootstrap_resources import get_bootstrap_resources
from e2e.common import config as cfg
from time import sleep
RESOURCE_PLURAL = "trainingjobs"
@pytest.fixture(scope="function")
def xgboost_training_job_debugger():
resource_name = random_suffix_name("xgboost-trainingjob-debugger", 32)
replacements = REPLACEMENT_VALUES.copy()
replacements["TRAINING_JOB_NAME"] = resource_name
reference, _, resource = create_sagemaker_resource(
resource_plural=RESOURCE_PLURAL,
resource_name=resource_name,
spec_file="xgboost_trainingjob_debugger",
replacements=replacements,
)
assert resource is not None
assert k8s.get_resource_arn(resource) is not None
yield (reference, resource)
if k8s.get_resource_exists(reference):
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted
def get_sagemaker_training_job(training_job_name: str):
try:
training_job = sagemaker_client().describe_training_job(
TrainingJobName=training_job_name
)
return training_job
except botocore.exceptions.ClientError as error:
logging.error(
f"SageMaker could not find a training debugger job with the name {training_job_name}. Error {error}"
)
return None
# TODO: Move to __init__.py
def get_training_sagemaker_status(training_job_name: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc["TrainingJobStatus"]
def get_training_resource_status(reference: k8s.CustomResourceReference):
resource = k8s.get_resource(reference)
assert "trainingJobStatus" in resource["status"]
return resource["status"]["trainingJobStatus"]
def get_training_debugger_sagemaker_status(training_job_name: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc["DebugRuleEvaluationStatuses"][0]["RuleEvaluationStatus"]
def get_training_debugger_resource_status(reference: k8s.CustomResourceReference):
resource = k8s.get_resource(reference)
resource_status = resource["status"]["debugRuleEvaluationStatuses"][0][
"ruleEvaluationStatus"
]
assert resource_status is not None
return resource_status
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_status(
self,
training_job_name,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_sagemaker_status,
training_job_name,
)
def _wait_resource_training_status(
self,
reference: k8s.CustomResourceReference,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_resource_status,
reference,
)
def _assert_training_status_in_sync(
self, training_job_name, reference, expected_status
):
assert (
self._wait_sagemaker_training_status(training_job_name, expected_status)
== self._wait_resource_training_status(reference, expected_status)
== expected_status
)
def _wait_sagemaker_training_debugger_status(
self,
training_job_name,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_debugger_sagemaker_status,
training_job_name,
)
def _wait_resource_training_debugger_status(
self,
reference: k8s.CustomResourceReference,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_debugger_resource_status,
reference,
)
def _assert_training_debugger_status_in_sync(
self, training_job_name, reference, expected_status
):
assert (
self._wait_sagemaker_training_debugger_status(
training_job_name, expected_status
)
== self._wait_resource_training_debugger_status(reference, expected_status)
== expected_status
)
def test_completed(self, xgboost_training_job_debugger):
(reference, resource) = xgboost_training_job_debugger
assert k8s.get_resource_exists(reference)
training_job_name = resource["spec"].get("trainingJobName", None)
assert training_job_name is not None
training_job_desc = get_sagemaker_training_job(training_job_name)
assert k8s.get_resource_arn(resource) == training_job_desc["TrainingJobArn"]
assert training_job_desc["TrainingJobStatus"] == cfg.JOB_STATUS_INPROGRESS
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
self._assert_training_status_in_sync(
training_job_name, reference, cfg.JOB_STATUS_COMPLETED
)
# TODO: This test is failing
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
self._assert_training_debugger_status_in_sync(
training_job_name, reference, cfg.DEBUGGERJOB_STATUS_COMPLETED
)
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True")
# Check that you can delete a completed resource from k8s
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted is True
|
py | 1a508c997d77d0a7166980ec6b504c06e8c7ad4c | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='scrapy-autounit',
version='0.0.22',
author='',
author_email='',
description='Automatic unit test generation for Scrapy.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/fcanobrash/scrapy-autounit',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
],
install_requires=[
'pathlib',
'datadiff==2.0.0',
],
entry_points = {
'console_scripts': ['autounit-inspect=scrapy_autounit.inspect:main'],
},
)
|
py | 1a508cb3c4d0fd909c39b31e5183e84ed4e56b9e | n = input("Credit Card No.: ")
s1 = 0
s2 = 0
for i in range(len(n)):
if(i%2 == 0):
s1 += int(n[i])
else:
tmp = int(n[i]) * 2
while (tmp > 9):
tmp -= 9
s2 += tmp
tot = s1 + s2
if( tot % 10 == 0):
print(n + " passes the test")
else:
print(n + " failed the test")
|
py | 1a508dbac7dfd15a6330d757fa97c1caff0d6bc0 | # find the minimum number of coins needed to make up a given amount
# greedy version, not dynamic programming version
denominations = [1, 2, 5, 10, 20, 50, 100, 1000]
# add the largest coin that does not exceed the target amount to the total
def coins_required(amount):
total = 0
coins = []
for denomination in denominations[::-1]:
while total + denomination <= amount:
total += denomination
coins.append(denomination)
return coins
print(coins_required(2035))
|
py | 1a508ef3bdac9c9611f6c4ba82a14522651d3743 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/errors/customer_client_link_error.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/errors/customer_client_link_error.proto',
package='google.ads.googleads.v6.errors',
syntax='proto3',
serialized_options=b'\n\"com.google.ads.googleads.v6.errorsB\034CustomerClientLinkErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V6.Errors\312\002\036Google\\Ads\\GoogleAds\\V6\\Errors\352\002\"Google::Ads::GoogleAds::V6::Errors',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n?google/ads/googleads/v6/errors/customer_client_link_error.proto\x12\x1egoogle.ads.googleads.v6.errors\x1a\x1cgoogle/api/annotations.proto\"\x8f\x03\n\x1b\x43ustomerClientLinkErrorEnum\"\xef\x02\n\x17\x43ustomerClientLinkError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12*\n&CLIENT_ALREADY_INVITED_BY_THIS_MANAGER\x10\x02\x12\'\n#CLIENT_ALREADY_MANAGED_IN_HIERARCHY\x10\x03\x12\x1b\n\x17\x43YCLIC_LINK_NOT_ALLOWED\x10\x04\x12\"\n\x1e\x43USTOMER_HAS_TOO_MANY_ACCOUNTS\x10\x05\x12#\n\x1f\x43LIENT_HAS_TOO_MANY_INVITATIONS\x10\x06\x12*\n&CANNOT_HIDE_OR_UNHIDE_MANAGER_ACCOUNTS\x10\x07\x12-\n)CUSTOMER_HAS_TOO_MANY_ACCOUNTS_AT_MANAGER\x10\x08\x12 \n\x1c\x43LIENT_HAS_TOO_MANY_MANAGERS\x10\tB\xf7\x01\n\"com.google.ads.googleads.v6.errorsB\x1c\x43ustomerClientLinkErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V6.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V6\\Errors\xea\x02\"Google::Ads::GoogleAds::V6::Errorsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CUSTOMERCLIENTLINKERRORENUM_CUSTOMERCLIENTLINKERROR = _descriptor.EnumDescriptor(
name='CustomerClientLinkError',
full_name='google.ads.googleads.v6.errors.CustomerClientLinkErrorEnum.CustomerClientLinkError',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CLIENT_ALREADY_INVITED_BY_THIS_MANAGER', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CLIENT_ALREADY_MANAGED_IN_HIERARCHY', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CYCLIC_LINK_NOT_ALLOWED', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_HAS_TOO_MANY_ACCOUNTS', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CLIENT_HAS_TOO_MANY_INVITATIONS', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CANNOT_HIDE_OR_UNHIDE_MANAGER_ACCOUNTS', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_HAS_TOO_MANY_ACCOUNTS_AT_MANAGER', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CLIENT_HAS_TOO_MANY_MANAGERS', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=162,
serialized_end=529,
)
_sym_db.RegisterEnumDescriptor(_CUSTOMERCLIENTLINKERRORENUM_CUSTOMERCLIENTLINKERROR)
_CUSTOMERCLIENTLINKERRORENUM = _descriptor.Descriptor(
name='CustomerClientLinkErrorEnum',
full_name='google.ads.googleads.v6.errors.CustomerClientLinkErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CUSTOMERCLIENTLINKERRORENUM_CUSTOMERCLIENTLINKERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=130,
serialized_end=529,
)
_CUSTOMERCLIENTLINKERRORENUM_CUSTOMERCLIENTLINKERROR.containing_type = _CUSTOMERCLIENTLINKERRORENUM
DESCRIPTOR.message_types_by_name['CustomerClientLinkErrorEnum'] = _CUSTOMERCLIENTLINKERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CustomerClientLinkErrorEnum = _reflection.GeneratedProtocolMessageType('CustomerClientLinkErrorEnum', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMERCLIENTLINKERRORENUM,
'__module__' : 'google.ads.googleads.v6.errors.customer_client_link_error_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.errors.CustomerClientLinkErrorEnum)
})
_sym_db.RegisterMessage(CustomerClientLinkErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | 1a508f5df0e22854fbed72fbd197ebe002525c17 | from typing import List
from secrets import choice
from discord.ext import commands
from .. import config
keywords = ["dm"]
reply = (
"SCAM ALERT! Never accept any trade on DEVNET, SOL on this network are fake and unlimited.",
"SCAM ALERT! PLEASE ONLY DO BUSINESS ON MAGICEDEN OR SOLANART.",
"SCAM ALERT! TO STAY SAFE, PLEASE TURN OFF YOUR DMS!!.",
)
def check(word, list):
return True if word in list else False
class ScamAlert(commands.Cog):
bot: commands.Bot
allowed_channels: List[str]
def __init__(self, bot):
self.bot = bot
self.allowed_channels = config.allowed_check_scam_channels
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot:
return
if not message.content:
return
if message.channel.name in self.allowed_channels:
msg_to_list = message.content.split()
msg_to_list = [x.lower() for x in msg_to_list]
for word in keywords:
if check(word.lower(), msg_to_list):
await message.channel.send(choice(reply))
|
py | 1a508fd21943030ee68e5287e73bd1d74433364a | # Copyright 2020 . All Rights Reserved.
# Author : Lei Sha
from Hyperparameters import args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g')
parser.add_argument('--modelarch', '-m')
parser.add_argument('--aspect', '-a')
parser.add_argument('--choose', '-c')
cmdargs = parser.parse_args()
print(cmdargs)
usegpu = True
if cmdargs.gpu is None:
usegpu = False
args['device'] = 'cpu'
else:
usegpu = True
args['device'] = 'cuda:' + str(cmdargs.gpu)
if cmdargs.modelarch is None:
args['model_arch'] = 'lstm'
else:
args['model_arch'] = cmdargs.modelarch
if cmdargs.aspect is None:
args['aspect'] = 0
else:
args['aspect'] = int(cmdargs.aspect)
if cmdargs.choose is None:
args['choose'] = 0
else:
args['choose'] = int(cmdargs.aspect)
import functools
print = functools.partial(print, flush=True)
import os
from textdataBeer import TextDataBeer
import time, sys
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import time, datetime
import math, random
import nltk
import pickle
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
# import matplotlib.pyplot as plt
import numpy as np
import copy
from LanguageModel_beer import LanguageModel
import LSTM_IB_GAN_beer
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (%s)' % (asMinutes(s), datetime.datetime.now())
class Runner:
def __init__(self):
self.model_path = args['rootDir'] + '/chargemodel_' + args['model_arch'] + '.mdl'
def main(self):
if args['model_arch'] in ['lstmibgan']:
args['classify_type'] = 'single'
args['batchSize'] = 256
self.textData = TextDataBeer('beer')
# self.start_token = self.textData.word2index['START_TOKEN']
# self.end_token = self.textData.word2index['END_TOKEN']
args['vocabularySize'] = self.textData.getVocabularySize()
args['chargenum'] = 5
args['embeddingSize'] = self.textData.index2vector.shape[1]
print(self.textData.getVocabularySize())
args['model_arch'] = 'lstmibgan'
# args['aspect'] = 0
args['hiddenSize'] = 200
print(args)
if args['model_arch'] == 'lstmibgan':
print('Using LSTM information bottleneck GAN model for Beer.')
LM = torch.load(args['rootDir']+'/LMbeer.pkl', map_location=args['device'])
for param in LM.parameters():
param.requires_grad = False
ppl = self.CalPPL(LM)
print('PPL=',ppl)
# LM=0
LSTM_IB_GAN_beer.train(self.textData, LM, self.textData.index2vector)
def indexesFromSentence(self, sentence):
return [self.textData.word2index[word] if word in self.textData.word2index else self.textData.word2index['UNK']
for word in sentence]
def tensorFromSentence(self, sentence):
indexes = self.indexesFromSentence(sentence)
# indexes.append(self.textData.word2index['END_TOKEN'])
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def evaluate(self, sentence, correctlabel, max_length=20):
with torch.no_grad():
input_tensor = self.tensorFromSentence(sentence)
input_length = input_tensor.size()[0]
# encoder_hidden = encoder.initHidden()
# encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
x = {}
# print(input_tensor)
x['enc_input'] = torch.transpose(input_tensor, 0, 1)
x['enc_len'] = [input_length]
x['labels'] = [correctlabel]
# print(x['enc_input'], x['enc_len'])
# print(x['enc_input'].shape)
decoded_words, label, _ = self.model.predict(x, True)
return decoded_words, label
def evaluateRandomly(self, n=10):
for i in range(n):
sample = random.choice(self.textData.datasets['train'])
print('>', sample)
output_words, label = self.evaluate(sample[2], sample[1])
output_sentence = ' '.join(output_words[0]) # batch=1
print('<', output_sentence, label)
print('')
def CalPPL(self, LM):
batches = self.textData.getBatches('dev')
total = 0
loss_sum = 0
for index, batch in enumerate(batches):
x = {}
x['dec_input'] = autograd.Variable(torch.LongTensor(batch.decoderSeqs)).to(args['device'])
x['dec_len'] = batch.decoder_lens
x['dec_target'] = autograd.Variable(torch.LongTensor(batch.targetSeqs)).to(args['device'])
total += x['dec_input'].size()[0]
print(x['dec_input'].size())
embedding = nn.Embedding.from_pretrained(torch.FloatTensor(self.textData.index2vector))
decoderTargetsEmbeddings = embedding(x['dec_target'])
_, recon_loss = LM.getloss(x['dec_input'],decoderTargetsEmbeddings, x['dec_target'] )
loss_sum += recon_loss.sum()
loss_mean = loss_sum / total
return torch.exp(loss_mean)
if __name__ == '__main__':
r = Runner()
r.main() |
py | 1a509141414a5ef6470ad8ed7ebecfc818ca601b | # -*- coding: utf-8 -*-
"""RAPIDS_Intro.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11M0rHM6Q3cao8_tYzp-XTZrOh9e-CAL1
# Accelerating Pandas and Scikit Learn on GPU using RAPIDS
- Note use only T4 or P100 Or P4 GPU which is compatible for RAPIDS.
- https://github.com/rapidsai
- RAPIDS Uses the folowing componenets: -
- CuML : - Cuda Accelerated Machine learning. (GPU Replacement for sklearn)
- Cudf : - Cuda DataFrames (GPU Replacement for pandas)
- CuGraph : - Cuda accelerated Graphs. (GPU Replacement for networkX)
- CuDNN : - Cuda Deep Neural Networks
# Installing RAPIDS.
- Follow the procedure.
"""
!nvidia-smi
# Install RAPIDS
!git clone https://github.com/rapidsai/rapidsai-csp-utils.git
!bash rapidsai-csp-utils/colab/rapids-colab.sh
import sys, os
dist_package_index = sys.path.index('/usr/local/lib/python3.6/dist-packages')
sys.path = sys.path[:dist_package_index] + ['/usr/local/lib/python3.6/site-packages'] + sys.path[dist_package_index:]
sys.path
exec(open('rapidsai-csp-utils/colab/update_modules.py').read(), globals())
"""# Now we can use CuML and CuDf"""
import cuml, cudf
import sys,tempfile, urllib, os
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.datasets import fetch_openml
covtyp = fetch_openml(name='covertype', version=4)
# Predicting the forest cover type using 53 variables.
# Predict one categorical class.
covtyp.data.shape
np.unique(covtyp.target)
# Still we have not loaded the data
!nvidia-smi
cov_df = pd.DataFrame(data=np.c_[covtyp['data'], covtyp['target']], columns=covtyp['feature_names'] + ['target'])
cov_df.memory_usage().sum()
cov_df.head()
cov_df.target.value_counts()
cov_df.dtypes
"""- Convert all objects into float32 format.
- Keep the categorical target as int32
"""
for cols in cov_df.columns:
cov_df[cols] = cov_df[cols].astype(np.float32)
cov_df.dtypes
cov_df['target'] = cov_df['target'].astype(np.int32)
"""- Keep target variable from 0 - 7 instead of 1 - 8"""
cov_df['target'] = cov_df['target'] - 1
cov_df_x = cov_df.drop(['target'], axis=1)
cov_df_y = cov_df['target']
cov_df_x.head()
cov_df_y = pd.DataFrame(cov_df_y)
cov_df_y['target'] = cov_df_y['target'].astype(np.int32)
cov_df_y['target'].value_counts()
cov_df_y.dtypes
X_train, X_test, y_train, y_test = train_test_split(cov_df_x, cov_df_y, train_size=0.75, stratify=cov_df_y, random_state=31)
"""- This moves data to GPU by making a GPU dataframe"""
X_train_gdf = cudf.DataFrame.from_pandas(X_train)
X_test_gdf = cudf.DataFrame.from_pandas(X_test)
y_train_gdf = cudf.DataFrame.from_pandas(y_train)
y_test_gdf = cudf.DataFrame.from_pandas(y_test)
!nvidia-smi
from cuml import RandomForestClassifier as curf
import time
curf_params = {
'n_estimators' : 250,
'max_depth' : 3,
'n_streams' : 1,
'split_algo' : 0,
'seed' : 1000
}
clf = curf(**curf_params)
start_time = time.time()
clf.fit(X_train_gdf, y_train_gdf)
end_time = time.time()
print("Time taken to train = %s" %(end_time - start_time))
pred = clf.predict(X_test_gdf)
print(pred)
print(pred[0])
# There may be problem with older versions. Tested on v 0.12
clf.score(X_test_gdf, y_test_gdf)
# We can take values to local memory
pred_out = pred.copy_to_host()
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, pred_out) |
py | 1a5092922e3ff08032f24d8039449c8152e45b30 | import skimage.transform as st
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, feature
def ex_1(): # Hough Transform
image = np.zeros((100, 100))
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
h, theta, d = st.hough_line(image)
fig, (ax0, ax1) = plt.subplots(1, 2)
plt.tight_layout()
ax0.imshow(image, plt.cm.gray)
ax0.set_title('input')
ax0.set_axis_off()
ax1.imshow(np.log(1 + h))
ax1.set_title('Hough')
ax1.set_xlabel('Angles (degrees)')
ax1.set_ylabel('Distance (pixels)')
ax1.axis('image')
plt.show()
def ex_2(): # Hough Transform Line Detection
image = np.zeros((100, 100))
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
h, theta, d = st.hough_line(image)
fig, (ax0, ax1, ax2) = plt.subplots(1, 3)
plt.tight_layout()
ax0.imshow(image, plt.cm.gray)
ax0.set_title('input image')
ax0.set_axis_off()
ax1.imshow(np.log(1 + h))
ax1.set_title('Hough transform')
ax1.set_xlabel('Angles (degrees)')
ax1.set_ylabel('Distance (pixels)')
ax1.axis('image')
ax2.imshow(image, plt.cm.gray)
row1, col1 = image.shape
for _, angle, dist in zip(*st.hough_line_peaks(h, theta, d)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - col1 * np.cos(angle)) / np.sin(angle)
ax2.plot((0, col1), (y0, y1), '-r')
ax2.axis((0, col1, row1, 0))
ax2.set_title('Detected')
ax2.set_axis_off()
plt.show()
def ex_3(): # Probabilistic Hough Transform
image = data.camera()
edges = feature.canny(image, sigma=2, low_threshold=1, high_threshold=25)
lines = st.probabilistic_hough_line(edges, threshold=10, line_length=5, line_gap=3)
fig, (ax0, ax1, ax2) = plt.subplots(1, 3)
plt.tight_layout()
ax0.imshow(image, plt.cm.gray)
ax0.set_title('input')
ax0.set_axis_off()
ax1.imshow(edges, plt.cm.gray)
ax1.set_title('canny edges')
ax1.set_axis_off()
ax2.imshow(edges * 0)
for line in lines:
p0, p1 = line
ax2.plot((p0[0], p1[0]), (p0[1], p1[1]))
row2, col2 = image.shape
ax2.axis((0, col2, row2, 0))
ax2.set_title('probabilistic')
ax2.set_axis_off()
plt.show()
if __name__ == '__main__':
ex_3()
|
py | 1a5092a930491418a72a2f8ed8025184ae877684 | import os
import random
from comet_ml import Experiment
import torch
import copy
import colbert.utils.distributed as distributed
from colbert.utils.parser import Arguments
from colbert.utils.runs import Run
from colbert.training.training import train
def main():
parser = Arguments(description='Training ColBERT with <query, positive passage, negative passage> triples.')
parser.add_model_parameters()
parser.add_model_training_parameters()
parser.add_training_input()
args = parser.parse()
assert args.bsize % args.accumsteps == 0, ((args.bsize, args.accumsteps),
"The batch size must be divisible by the number of gradient accumulation steps.")
assert args.query_maxlen <= 512
assert args.doc_maxlen <= 512
args.lazy = args.collection is not None
experiment = Experiment(project_name="ColBERT_Training")
experiment.log_parameters(args)
with Run.context(consider_failed_if_interrupted=False):
train(args)
if __name__ == "__main__":
main()
|
py | 1a5092c32e262621e8919badd17fc25eb17aea3d | # Generated by Django 3.1.4 on 2020-12-16 19:32
from django.db import migrations
def add_questions(apps, schema_editor):
YES_NO_CHOICES = ['Yes', 'No'] # noqa: N806
Question = apps.get_model('studies', 'Question') # noqa: N806
QuestionChoice = apps.get_model('studies', 'QuestionChoice') # noqa: N806
questions = {
'Can this be used for the desired application/study?': YES_NO_CHOICES,
'Does the lesion appear to be benign or malignant?': [
'Benign',
'Malignant',
],
'Does the lesion appear to be benign, malignant, or neither?': [
'Benign',
'Malignant',
'Unsure',
],
'Does the lesion appear with border/corners?': YES_NO_CHOICES,
'Does the lesion contain a network?': YES_NO_CHOICES,
'Does the lesion image contain a ruler?': YES_NO_CHOICES,
'Does the lesion image contain any sensitive content or potential Protected Health Information?': YES_NO_CHOICES, # noqa: E501
'Does the lesion image contain light leak spots?': YES_NO_CHOICES,
'Does the lesion image contain pen markings?': YES_NO_CHOICES,
'Indicate your management decision': ['Biopsy', 'Observation and/or reassurance'],
'Is the lesion area blurry?': YES_NO_CHOICES,
'Is the lesion diagnosis consistent with the current image?': YES_NO_CHOICES,
'Is the lesion a nevus, seborrheic keratosis, or melanoma?': [
'Nevus',
'Seborrheic keratosis',
'Melanoma',
],
'Is the lesion a nevus, melanoma, or other?': ['Melanoma', 'Nevus', 'Other'],
'Is the lesion organized or disorganized?': ['Organized', 'Disorganized'],
'Is there hair obscuring the lesion?': YES_NO_CHOICES,
'What is your level of confidence (1-7)?': [
'Absolutely confident',
'Confident',
'Somewhat confident',
'Neither confident nor unconfident',
'Somewhat unconfident',
'Unconfident',
'Not confident at all',
],
'What is your level of confidence (1-5)?': [
'Very Confident',
'Somewhat Confident',
'Neither Confident / Not Confident',
'Somewhat Not Confident',
'Very Not Confident',
],
'Does the lesion contain the color black?': YES_NO_CHOICES,
'Does the lesion contain the color brown?': YES_NO_CHOICES,
'Does the lesion contain the color grey/blue?': YES_NO_CHOICES,
'Does the lesion contain the color light brown?': YES_NO_CHOICES,
'Does the lesion contain the color red?': YES_NO_CHOICES,
'Does the lesion contain the color white?': YES_NO_CHOICES,
}
for question, choices in questions.items():
q = Question.objects.create(prompt=question, official=True)
for choice in choices:
QuestionChoice.objects.create(question=q, text=choice)
class Migration(migrations.Migration):
dependencies = [
('studies', '0001_initial'),
]
operations = [
migrations.RunPython(add_questions),
]
|
py | 1a50939315de53dbb797c712bdfa7784a3f5e51e | from .api import Api
__all__ = ['Api']
|
py | 1a50940be7c36e428a0aed2a0a801692edfdfcb3 | from django.utils.encoding import force_unicode
from django.forms.forms import BoundField
from django.utils.html import conditional_escape
def as_p(instance=None, cls=None):
"Returns this form rendered as HTML <p>s."
if not instance and not cls:
return TypeError('as_p takes at least 1 argument (0 given)')
elif instance and cls:
return TypeError('as_p takes at most 1 argument (2 given)')
elif cls:
# This might not always work, if your form requires params,
# pass an instance instead of a class!
instance = cls()
return html_output(
instance,
normal_row = u'<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row = u'%s',
row_ender = '</p>',
help_text_html = u' %s',
errors_on_separate_row = True)
def as_div(instance=None, cls=None):
"Returns this form rendered as HTML <div>s."
if not instance and not cls:
return TypeError('as_div takes at least 1 argument (0 given)')
elif instance and cls:
return TypeError('as_div takes at most 1 argument (2 given)')
elif cls:
# This might not always work, if your form requires params,
# pass an instance instead of a class!
instance = cls()
return html_output(
instance,
normal_row = u'<div%(html_class_attr)s>%(label)s %(field)s%(help_text)s</div>',
error_row = u'%s',
row_ender = '</div>',
help_text_html = u' %s',
errors_on_separate_row = True)
def as_table(instance=None, cls=None):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
if not instance and not cls:
return TypeError('as_table takes at least 1 argument (0 given)')
elif instance and cls:
return TypeError('as_table takes at most 1 argument (2 given)')
elif cls:
# This might not always work, if your form requires params,
# pass an instance instead of a class!
instance = cls()
return html_output(
instance,
normal_row = u'<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = u'<tr><td colspan="2">%s</td></tr>',
row_ender = u'</td></tr>',
help_text_html = u'<br />%s',
errors_on_separate_row = False)
def as_ul(instance=None, cls=None):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
if not instance and not cls:
return TypeError('as_ul takes at least 1 argument (0 given)')
elif instance and cls:
return TypeError('as_ul takes at most 1 argument (2 given)')
elif cls:
# This might not always work, if your form requires params,
# pass an instance instead of a class!
instance = cls()
return html_output(
instance,
normal_row = u'<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row = u'<li>%s</li>',
row_ender = '</li>',
help_text_html = u' %s',
errors_on_separate_row = False)
def html_output(form, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = form.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in form.fields.items():
html_class_attr = ''
bf = BoundField(form, field, name)
bf_errors = form.error_class([conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf.is_hidden:
if bf_errors:
top_errors.extend([u'(Hidden field %s) %s' % (name, force_unicode(e)) for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row:
output.append(error_row % \
'{%% if form.%s.errors %%}{%% for error in form.%s.errors %%}{{ error }}{%% endfor %%}{%% endif %%}' \
% (name, name,))
output.append(normal_row % {
'errors': \
'{%% if form.%s.errors %%}{%% for error in form.%s.errors %%}{{ error }}{%% endfor %%}{%% endif %%}' \
% (name, name,),
'label': '{{ form.%s.label_tag }}' % (name,),
'field': '{{ form.%s }}' % (name,),
'help_text': '',
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0,
r'{% if form.errors %}{% for field, error in form.errors %}(Hidden field {{ field }}) {{ error }}{% endfor %}{% end if %}'
)
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = u'{%% for field in form.hidden_fields %%}{{ field }}{%% endfor %%}'
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return u'\n'.join(output)
|
py | 1a5095816ddf423e13d4d9acf5c2d0f530d587d0 | import numpy as np
import statistics as stat
from config import *
def process_info(info):
"""
Process a line of info from data source and extract distance
:param info: a line of info. See below for format sample
:return: directory of {node_id (str): distance}
"""
dist = {}
rough_split = info.split('[')
if len(rough_split) <= 2:
return None
dis_list = rough_split[1].split(']')[0].split(',')
if len(dis_list) < 4:
return None
id_list = rough_split[2].split(']')[0].split(',')
if len(id_list) < 4:
return None
if (len(dis_list) != len(id_list)) | (len(dis_list) < 4):
return None
for i in range(0, len(dis_list)):
id_list[i] = id_list[i].strip('"')
if id_list[i] in ref_nodes:
dist[id_list[i]] = float(dis_list[i].strip('"'))
if len(dist) < 4:
return None
return dist
pre_process_threshold = 0.5
def pre_process_data(ranges):
"""
Pre-process a list of ranges from one reference node and eliminate points that are away from the medium by
pre_process_threshold
:param ranges: a list of ranges
:return average range after filtering
"""
median = stat.median(ranges)
s = 0 # sum
c = 0 # count
for r in ranges:
if median - pre_process_threshold < r < median + pre_process_threshold:
s += r
c += 1
if c == 0:
return None
else:
return s / c
def calc_position(dist):
"""
Calculate position based on distances to reference point
:param dist: directory of {node_id (str): distance}
:return: 1D np.array of position [x, y, z]
"""
A = np.array([0, 0, 0])
B = np.array([0])
for i in dist.keys():
if i == base_node:
continue
A = np.vstack((A, ref_nodes[i] - ref_nodes[base_node]))
B = np.vstack(
(B, dist[i] ** 2 - dist[base_node] ** 2 - np.dot(ref_nodes[i] ** 2 - ref_nodes[base_node] ** 2, np.array([1, 1, 1]))))
A = A[1:len(dist)]
B = B[1:len(dist)] * (-0.5)
AT = np.transpose(A)
B = np.dot(AT, B)
rev = np.linalg.inv(np.dot(AT, A))
pos = np.dot(rev, B)
posT = np.transpose(pos)
return posT[0]
if __name__ == '__main__':
dist = process_info(
'{"utime": 2157172559,"survey": {"seq": 26,"mask": 15,"nrngs": [{"mask": 14,"nrng": ["2.495","3.583","2.443"]},{"mask": 13,"nrng": ["1.613","5.014","3.034"]},{"mask": 11,"nrng": ["3.550","4.971","5.377"]},{"mask": 7,"nrng": ["4.971","3.018","5.377"]}]}}')
if dist is not None:
print(dist)
pos = calc_position(dist)
if pos is not None:
print(pos)
else:
print("Fail to calculate position")
else:
print("Fail to process info")
|
py | 1a5095a3d93d4bf47d0f3ddd6bf8a5ddf32d4ed5 | weight = 10
def run():
# Pak za guranje 1300cm od ivice, 774
# Pak za guranje: (200,1000)
# Goldium: (726,1000)
r.speed(140) #bilo 180
'''x,y = coord('gold_setpos')
r.goto(x,y)
r.absrot(-90)
r.goto(x,y-200)
r.speed(60)
def f():
_goto(offset=1, ref='main')
r.conf_set('enable_stuck', 1)
_on('motion:stuck', f)
r.absrot(-90)
r.forward(150)
r.setpos(y=-885)
r.conf_set('enable_stuck', 0)
r.speed(200) #bilo 180
r.forward(-100)
r.absrot(0)'''
'''
r.goto(0, 0)
x,y= coord('aktiviranje_akceleratora')
#r.goto(x,y,1)
r.goto(x-100-10,y-25+14+5+7+5+6+3+50-20+150)
@_do
def _():
print("tek sad ocitaj: ")
atoms = cam_read()
if len(atoms) == 1:
a = atoms[0]
r.turn(-90)
r.forward(int(-a[1]) + 120)
r.turn(90)
r.forward(int(a[0]) - 140)
r.turn(90)
r.forward(300)
r.goto(x-100-10,y-25-20+5+14+7+5+6+3+50-20+150, -1)
r.goto(x,y-25+14+7+5+6-20+5+3,-1)
r.absrot(180)
lrucica(1)
#r.forward(120)
#r.goto(x+100,y,1)
r.goto(x+100+20,y-25+14-15+7+5+6+3,-1)
lrucica(0)
#####
# Nakon sto gurne pak
#Poeni za guranje plavog u akcelerator i otklj goldeniuma
addpts(10)
addpts(10)
r.forward(-50)
r.turn(8)
'''
|
py | 1a5095d0e44f12da3bbdfafe0d8b27ecb9d0bf5f | # Copyright (c) 2013, Yanky and contributors
# For license information, please see license.txt
import frappe
def execute(filters=None):
columns, data = [], []
columns = get_columns()
article_data = get_article_data(filters)
for article in article_data:
temp_dict = {
"title":article.get("title"),
"isbn":article.get("isbn"),
"stock":article.get("stock"),
"total_quantity":article.get("total_quantity"),
"issued_count":article.get("total_quantity") - article.get("stock")
}
data.append(temp_dict)
chart = get_chart()
return columns, data, None, chart
def get_columns():
columns = ["" for column in range(5)]
columns[0] = {
"label": ("Title"),
"fieldname": "title",
"fieldtype": "Link",
"options": "Article",
"width": 200
}
columns[1] = {
"label": ("Isbn"),
"fieldname": "isbn",
"width": 200
}
columns[2] = {
"label": ("Stock"),
"fieldname": "stock",
"width": 150
}
columns[3] = {
"label": ("Total Quantity"),
"fieldname": "total_quantity",
"width": 150
}
columns[4] = {
"label": ("Issued Count"),
"fieldname": "issued_count",
"width": 150
}
return columns
def get_article_data(filters) :
if filters:
query = "select title, isbn, stock, total_quantity from tabArticle where title = '" + str(filters.get("title_filter")) + "'"
article_data = frappe.db.sql(query, as_dict=1)
else:
article_data = frappe.db.sql("""select title, isbn, stock, total_quantity from tabArticle """, as_dict=1)
return article_data
def get_chart():
chart_data = {
"labels": frappe.db.get_list('Article', fields=['title'],as_list=True),
"datasets": [
{
'name': "Stock",
'values': frappe.db.get_list('Article',fields=['stock'],as_list=True)
},
{
'name': "Total Quantity",
'values': frappe.db.get_list('Article',fields=['total_quantity'],as_list=True)
}
]
}
chart = {
"title": "Book Avialability",
"data": chart_data,
"type": 'bar',
"height": 250,
"color": ['#4463F0', '#7cd6fd']
}
return chart |
py | 1a5095e4f7d52bf70ff8974af3783f497f20281b | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
self.metadata.thing_classes.append("object")
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput a little bit when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
|
py | 1a509614e09528d1efc21103efc4e29ec189b7bc | from torch.utils.data import TensorDataset
import numpy as np
import logging
import os
import random
import torch
import time
from tqdm import tqdm
from _utils import *
logger = logging.getLogger(__name__)
def load_and_cache_gen_data(args, filename, pool, tokenizer, split_tag, only_src=False, is_sample=False):
# cache the data into args.cache_path except it is sampled
# only_src: control whether to return only source ids for bleu evaluating (dev/test)
# return: examples (Example object), data (TensorDataset)
data_tag = '_all' if args.data_num == -1 else '_%d' % args.data_num
cache_fn = '{}/{}.pt'.format(args.cache_path, split_tag + ('_src' if only_src else '') + data_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, min(5000, len(examples)))
if split_tag == 'train':
calc_stats(examples, tokenizer, is_tokenize=True)
else:
calc_stats(examples)
if os.path.exists(cache_fn) and not is_sample:
logger.info("Load cache data from %s", cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info("Sample 5k data for computing bleu from %s", filename)
else:
logger.info("Create cache data into %s", cache_fn)
tuple_examples = [(example, idx, tokenizer, args, split_tag) for idx, example in enumerate(examples)]
features = pool.map(convert_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
if split_tag == 'test' or only_src:
data = TensorDataset(all_source_ids)
else:
all_target_ids = torch.tensor([f.target_ids for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_target_ids)
if args.local_rank in [-1, 0] and not is_sample:
torch.save(data, cache_fn)
return examples, data
def load_and_cache_clone_data(args, filename, pool, tokenizer, split_tag, is_sample=False):
cache_fn = '{}/{}.pt'.format(args.cache_path, split_tag + '_all' if args.data_num == -1 else '_%d' % args.data_num)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, int(len(examples) * 0.1))
calc_stats(examples, tokenizer, is_tokenize=True)
if os.path.exists(cache_fn):
logger.info("Load cache data from %s", cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info("Sample 10 percent of data from %s", filename)
elif args.data_num == -1:
logger.info("Create cache data into %s", cache_fn)
tuple_examples = [(example, idx, tokenizer, args) for idx, example in enumerate(examples)]
features = pool.map(convert_clone_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_labels)
if args.local_rank in [-1, 0] and args.data_num == -1:
torch.save(data, cache_fn)
return examples, data
def load_and_cache_defect_data(args, filename, pool, tokenizer, split_tag, is_sample=False):
cache_fn = os.path.join(args.cache_path, split_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, int(len(examples) * 0.1))
calc_stats(examples, tokenizer, is_tokenize=True)
if os.path.exists(cache_fn):
logger.info("Load cache data from %s", cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info("Sample 10 percent of data from %s", filename)
elif args.data_num == -1:
logger.info("Create cache data into %s", cache_fn)
tuple_examples = [(example, idx, tokenizer, args) for idx, example in enumerate(examples)]
features = pool.map(convert_defect_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
# features = [convert_clone_examples_to_features(x) for x in tuple_examples]
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_labels)
if args.local_rank in [-1, 0] and args.data_num == -1:
torch.save(data, cache_fn)
return examples, data
def load_and_cache_multi_gen_data(args, pool, tokenizer, split_tag, only_src=False, is_sample=False):
cache_fn = os.path.join(args.cache_path, split_tag)
if os.path.exists(cache_fn) and not is_sample:
logger.info("Load cache data from %s", cache_fn)
examples_data_dict = torch.load(cache_fn)
else:
examples_data_dict = {}
task_list = ['summarize', 'translate', 'refine', 'concode', 'defect']
for task in task_list:
if task == 'summarize':
sub_tasks = ['ruby', 'javascript', 'go', 'python', 'java', 'php']
elif task == 'translate':
sub_tasks = ['java-cs', 'cs-java']
elif task == 'refine':
sub_tasks = ['small', 'medium']
else:
sub_tasks = ['none']
args.task = task
for sub_task in sub_tasks:
args.sub_task = sub_task
if task == 'summarize':
args.max_source_length = 256
args.max_target_length = 128
elif task == 'translate':
args.max_source_length = 320
args.max_target_length = 256
elif task == 'refine':
if sub_task == 'small':
args.max_source_length = 130
args.max_target_length = 120
else:
args.max_source_length = 240
args.max_target_length = 240
elif task == 'concode':
args.max_source_length = 320
args.max_target_length = 150
elif task == 'defect':
args.max_source_length = 512
args.max_target_length = 3 # as do not need to add lang ids
filename = get_filenames(args.data_dir, args.task, args.sub_task, split_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, min(5000, len(examples)))
if split_tag == 'train':
calc_stats(examples, tokenizer, is_tokenize=True)
else:
calc_stats(examples)
tuple_examples = [(example, idx, tokenizer, args, split_tag) for idx, example in enumerate(examples)]
if args.data_num == -1:
features = pool.map(convert_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
else:
features = [convert_examples_to_features(x) for x in tuple_examples]
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
if only_src:
data = TensorDataset(all_source_ids)
else:
all_target_ids = torch.tensor([f.target_ids for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_target_ids)
examples_data_dict['{}_{}'.format(task, sub_task) if sub_task != 'none' else task] = (examples, data)
if args.local_rank in [-1, 0] and not is_sample:
torch.save(examples_data_dict, cache_fn)
logger.info("Save data into %s", cache_fn)
return examples_data_dict
def get_filenames(data_root, task, sub_task, split=''):
if task == 'generation':
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.json'.format(data_dir)
dev_fn = '{}/dev.json'.format(data_dir)
test_fn = '{}/test.json'.format(data_dir)
elif task == 'concode':
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.json'.format(data_dir)
dev_fn = '{}/dev.json'.format(data_dir)
test_fn = '{}/test.json'.format(data_dir)
elif task == 'summarize':
data_dir = '{}/{}/{}'.format(data_root, task, sub_task)
train_fn = '{}/train.jsonl'.format(data_dir)
dev_fn = '{}/valid.jsonl'.format(data_dir)
test_fn = '{}/test.jsonl'.format(data_dir)
elif task == 'refine':
data_dir = '{}/{}/{}'.format(data_root, task, sub_task)
train_fn = '{}/train.buggy-fixed.buggy,{}/train.buggy-fixed.fixed'.format(data_dir, data_dir)
dev_fn = '{}/valid.buggy-fixed.buggy,{}/valid.buggy-fixed.fixed'.format(data_dir, data_dir)
test_fn = '{}/test.buggy-fixed.buggy,{}/test.buggy-fixed.fixed'.format(data_dir, data_dir)
elif task == 'translate':
data_dir = '{}/{}'.format(data_root, task)
if sub_task == 'cs-java':
train_fn = '{}/train.java-cs.txt.cs,{}/train.java-cs.txt.java'.format(data_dir, data_dir)
dev_fn = '{}/valid.java-cs.txt.cs,{}/valid.java-cs.txt.java'.format(data_dir, data_dir)
test_fn = '{}/test.java-cs.txt.cs,{}/test.java-cs.txt.java'.format(data_dir, data_dir)
else:
train_fn = '{}/train.java-cs.txt.java,{}/train.java-cs.txt.cs'.format(data_dir, data_dir)
dev_fn = '{}/valid.java-cs.txt.java,{}/valid.java-cs.txt.cs'.format(data_dir, data_dir)
test_fn = '{}/test.java-cs.txt.java,{}/test.java-cs.txt.cs'.format(data_dir, data_dir)
elif task == 'clone':
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.txt'.format(data_dir)
dev_fn = '{}/valid.txt'.format(data_dir)
test_fn = '{}/test.txt'.format(data_dir)
elif task == 'defect':
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.jsonl'.format(data_dir)
dev_fn = '{}/valid.jsonl'.format(data_dir)
test_fn = '{}/test.jsonl'.format(data_dir)
if split == 'train':
return train_fn
elif split == 'dev':
return dev_fn
elif split == 'test':
return test_fn
else:
return train_fn, dev_fn, test_fn
def read_examples(filename, data_num, task):
read_example_dict = {
'summarize': read_summarize_examples,
'refine': read_refine_examples,
'translate': read_translate_examples,
'generation': read_generation_examples,
'concode': read_concode_examples,
'clone': read_clone_examples,
'defect': read_defect_examples,
}
return read_example_dict[task](filename, data_num)
def calc_stats(examples, tokenizer=None, is_tokenize=False):
avg_src_len = []
avg_trg_len = []
avg_src_len_tokenize = []
avg_trg_len_tokenize = []
for ex in examples:
if is_tokenize:
avg_src_len.append(len(ex.source.split()))
avg_trg_len.append(len(str(ex.target).split()))
avg_src_len_tokenize.append(len(tokenizer.tokenize(ex.source)))
avg_trg_len_tokenize.append(len(tokenizer.tokenize(str(ex.target))))
else:
avg_src_len.append(len(ex.source.split()))
avg_trg_len.append(len(str(ex.target).split()))
if is_tokenize:
logger.info("Read %d examples, avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d",
len(examples), np.mean(avg_src_len), np.mean(avg_trg_len), max(avg_src_len), max(avg_trg_len))
logger.info("[TOKENIZE] avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d",
np.mean(avg_src_len_tokenize), np.mean(avg_trg_len_tokenize), max(avg_src_len_tokenize),
max(avg_trg_len_tokenize))
else:
logger.info("Read %d examples, avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d",
len(examples), np.mean(avg_src_len), np.mean(avg_trg_len), max(avg_src_len), max(avg_trg_len))
def get_elapse_time(t0):
elapse_time = time.time() - t0
if elapse_time > 3600:
hour = int(elapse_time // 3600)
minute = int((elapse_time % 3600) // 60)
return "{}h{}m".format(hour, minute)
else:
minute = int((elapse_time % 3600) // 60)
return "{}m".format(minute)
|
py | 1a509758f72e1e53e0aef95f37d197cfb391bbad | from datasources.acs_population import ACSPopulation
from datasources.cdc_covid_deaths import CDCCovidDeaths
from datasources.cdc_restricted import CDCRestrictedData
from datasources.county_adjacency import CountyAdjacency
from datasources.county_names import CountyNames
from datasources.covid_tracking_project import CovidTrackingProject
from datasources.covid_tracking_project_metadata import CtpMetadata
from datasources.household_income import HouseholdIncome
from datasources.manual_uploads import ManualUploads
from datasources.primary_care_access import PrimaryCareAccess
from datasources.state_names import StateNames
from datasources.urgent_care_facilities import UrgentCareFacilities
from datasources.acs_health_insurance import ACSHealthInsurance
from datasources.acs_poverty import ACSPovertyDataSource
from datasources.acs_household_income import ACSHouseholdIncomeDatasource
# Map of data source ID to the class that implements the ingestion methods for
# that data source.
DATA_SOURCES_DICT = {
ACSPopulation.get_id(): ACSPopulation(),
CDCCovidDeaths.get_id(): CDCCovidDeaths(),
CDCRestrictedData.get_id(): CDCRestrictedData(),
CountyAdjacency.get_id(): CountyAdjacency(),
CountyNames.get_id(): CountyNames(),
CovidTrackingProject.get_id(): CovidTrackingProject(),
CtpMetadata.get_id(): CtpMetadata(),
HouseholdIncome.get_id(): HouseholdIncome(),
ManualUploads.get_id(): ManualUploads(),
PrimaryCareAccess.get_id(): PrimaryCareAccess(),
StateNames.get_id(): StateNames(),
UrgentCareFacilities.get_id(): UrgentCareFacilities(),
ACSHealthInsurance.get_id(): ACSHealthInsurance(),
ACSHouseholdIncomeDatasource.get_id(): ACSHouseholdIncomeDatasource(),
ACSPovertyDataSource.get_id(): ACSPovertyDataSource()
}
|
py | 1a5097bcfd40086c0226231404d6f94a2f9efb82 | import os
from conans import CMake, ConanFile, tools
class QtXlsxWriterConan(ConanFile):
name = "qtxlsxwriter"
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/dbzhang800/QtXlsxWriter"
description = ".xlsx file reader and writer for Qt5"
topics = ("qtxlsxwriter", "excel", "xlsx", "conan-recipe")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False]
}
default_options = {
"shared": False,
"fPIC": True
}
generators = "cmake"
exports_sources = "CMakeLists.txt", "patches/**"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["QT_ROOT"] = self.deps_cpp_info["qt"].rootpath.replace("\\", "/")
self._cmake.configure()
return self._cmake
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("qt/5.15.2")
def source(self):
for source in self.conan_data["sources"][self.version]:
url = source["url"]
filename = url.rsplit("/", 1)[-1]
tools.download(url, filename, sha256=source["sha256"])
tools.unzip(os.path.join(self.source_folder, "v0.3.0.zip"), self._source_subfolder, strip_root=True)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("LICENSE", dst="licenses")
def package_info(self):
if not self.options.shared:
self.cpp_info.defines = ["QTXLSX_STATIC"]
self.cpp_info.libs = tools.collect_libs(self)
|
py | 1a5098360cfab211a640f610d53a3ccbbd774c19 | """Viessmann ViCare climate device."""
import logging
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
PRESET_ECO,
PRESET_COMFORT,
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_AUTO,
)
from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE, PRECISION_WHOLE
from . import DOMAIN as VICARE_DOMAIN
from . import VICARE_API
from . import VICARE_NAME
_LOGGER = logging.getLogger(__name__)
VICARE_MODE_DHW = "dhw"
VICARE_MODE_DHWANDHEATING = "dhwAndHeating"
VICARE_MODE_FORCEDREDUCED = "forcedReduced"
VICARE_MODE_FORCEDNORMAL = "forcedNormal"
VICARE_MODE_OFF = "standby"
VICARE_PROGRAM_ACTIVE = "active"
VICARE_PROGRAM_COMFORT = "comfort"
VICARE_PROGRAM_ECO = "eco"
VICARE_PROGRAM_EXTERNAL = "external"
VICARE_PROGRAM_HOLIDAY = "holiday"
VICARE_PROGRAM_NORMAL = "normal"
VICARE_PROGRAM_REDUCED = "reduced"
VICARE_PROGRAM_STANDBY = "standby"
VICARE_HOLD_MODE_AWAY = "away"
VICARE_HOLD_MODE_HOME = "home"
VICARE_HOLD_MODE_OFF = "off"
VICARE_TEMP_HEATING_MIN = 3
VICARE_TEMP_HEATING_MAX = 37
SUPPORT_FLAGS_HEATING = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
VICARE_TO_HA_HVAC_HEATING = {
VICARE_MODE_DHW: HVAC_MODE_OFF,
VICARE_MODE_DHWANDHEATING: HVAC_MODE_AUTO,
VICARE_MODE_FORCEDREDUCED: HVAC_MODE_OFF,
VICARE_MODE_FORCEDNORMAL: HVAC_MODE_HEAT,
VICARE_MODE_OFF: HVAC_MODE_OFF,
}
HA_TO_VICARE_HVAC_HEATING = {
HVAC_MODE_HEAT: VICARE_MODE_FORCEDNORMAL,
HVAC_MODE_OFF: VICARE_MODE_FORCEDREDUCED,
HVAC_MODE_AUTO: VICARE_MODE_DHWANDHEATING,
}
VICARE_TO_HA_PRESET_HEATING = {
VICARE_PROGRAM_COMFORT: PRESET_COMFORT,
VICARE_PROGRAM_ECO: PRESET_ECO,
}
HA_TO_VICARE_PRESET_HEATING = {
PRESET_COMFORT: VICARE_PROGRAM_COMFORT,
PRESET_ECO: VICARE_PROGRAM_ECO,
}
PYVICARE_ERROR = "error"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare climate devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
add_entities(
[ViCareClimate(f"{hass.data[VICARE_DOMAIN][VICARE_NAME]} Heating", vicare_api)]
)
class ViCareClimate(ClimateDevice):
"""Representation of the ViCare heating climate device."""
def __init__(self, name, api):
"""Initialize the climate device."""
self._name = name
self._state = None
self._api = api
self._target_temperature = None
self._current_mode = None
self._current_temperature = None
self._current_program = None
def update(self):
"""Let HA know there has been an update from the ViCare API."""
_room_temperature = self._api.getRoomTemperature()
_supply_temperature = self._api.getSupplyTemperature()
if _room_temperature is not None and _room_temperature != PYVICARE_ERROR:
self._current_temperature = _room_temperature
elif _supply_temperature != PYVICARE_ERROR:
self._current_temperature = _supply_temperature
else:
self._current_temperature = None
self._current_program = self._api.getActiveProgram()
# The getCurrentDesiredTemperature call can yield 'error' (str) when the system is in standby
desired_temperature = self._api.getCurrentDesiredTemperature()
if desired_temperature == PYVICARE_ERROR:
desired_temperature = None
self._target_temperature = desired_temperature
self._current_mode = self._api.getActiveMode()
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATING
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def hvac_mode(self):
"""Return current hvac mode."""
return VICARE_TO_HA_HVAC_HEATING.get(self._current_mode)
def set_hvac_mode(self, hvac_mode):
"""Set a new hvac mode on the ViCare API."""
vicare_mode = HA_TO_VICARE_HVAC_HEATING.get(hvac_mode)
if vicare_mode is None:
_LOGGER.error(
"Cannot set invalid vicare mode: %s / %s", hvac_mode, vicare_mode
)
return
_LOGGER.debug("Setting hvac mode to %s / %s", hvac_mode, vicare_mode)
self._api.setMode(vicare_mode)
@property
def hvac_modes(self):
"""Return the list of available hvac modes."""
return list(HA_TO_VICARE_HVAC_HEATING)
@property
def min_temp(self):
"""Return the minimum temperature."""
return VICARE_TEMP_HEATING_MIN
@property
def max_temp(self):
"""Return the maximum temperature."""
return VICARE_TEMP_HEATING_MAX
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_WHOLE
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
self._api.setProgramTemperature(
self._current_program, self._target_temperature
)
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
return VICARE_TO_HA_PRESET_HEATING.get(self._current_program)
@property
def preset_modes(self):
"""Return the available preset mode."""
return list(VICARE_TO_HA_PRESET_HEATING)
def set_preset_mode(self, preset_mode):
"""Set new preset mode and deactivate any existing programs."""
vicare_program = HA_TO_VICARE_PRESET_HEATING.get(preset_mode)
if vicare_program is None:
_LOGGER.error(
"Cannot set invalid vicare program: %s / %s",
preset_mode,
vicare_program,
)
return
_LOGGER.debug("Setting preset to %s / %s", preset_mode, vicare_program)
self._api.deactivateProgram(self._current_program)
self._api.activateProgram(vicare_program)
|
py | 1a509976aff4ae971d08eda82ebb6b867906789a | #!/usr/bin/env python3
import argparse
import curses
import sys
import threading
import traceback
from .source_handler import CandumpHandler, InvalidFrame, SerialHandler
should_redraw = threading.Event()
stop_reading = threading.Event()
can_messages = {}
can_messages_lock = threading.Lock()
thread_exception = None
def reading_loop(source_handler, blacklist):
"""Background thread for reading."""
try:
while not stop_reading.is_set():
try:
frame_id, data = source_handler.get_message()
except InvalidFrame:
continue
except EOFError:
break
if frame_id in blacklist:
continue
# Add the frame to the can_messages dict and tell the main thread to refresh its content
with can_messages_lock:
can_messages[frame_id] = data
should_redraw.set()
stop_reading.wait()
except:
if not stop_reading.is_set():
# Only log exception if we were not going to stop the thread
# When quitting, the main thread calls close() on the serial device
# and read() may throw an exception. We don't want to display it as
# we're stopping the script anyway
global thread_exception
thread_exception = sys.exc_info()
def init_window(stdscr):
"""Init a window filling the entire screen with a border around it."""
stdscr.clear()
stdscr.refresh()
max_y, max_x = stdscr.getmaxyx()
root_window = stdscr.derwin(max_y, max_x, 0, 0)
root_window.box()
return root_window
def format_data_hex(data):
"""Convert the bytes array to an hex representation."""
# Bytes are separated by spaces.
return ' '.join('%02X' % byte for byte in data)
def format_data_ascii(data):
"""Try to make an ASCII representation of the bytes.
Non printable characters are replaced by '?' except null character which
is replaced by '.'.
"""
msg_str = ''
for byte in data:
char = chr(byte)
if char == '\0':
msg_str = msg_str + '.'
elif ord(char) < 32 or ord(char) > 126:
msg_str = msg_str + '?'
else:
msg_str = msg_str + char
return msg_str
def main(stdscr, reading_thread):
"""Main function displaying the UI."""
# Don't print typed character
curses.noecho()
curses.cbreak()
curses.curs_set(0) # set cursor state to invisible
# Set getch() to non-blocking
stdscr.nodelay(True)
win = init_window(stdscr)
while True:
# should_redraw is set by the serial thread when new data is available
if should_redraw.wait(timeout=0.05): # Timeout needed in order to react to user input
max_y, max_x = win.getmaxyx()
column_width = 100
id_column_start = 2
bytes_column_start = 13
text_column_start = 38
# Compute row/column counts according to the window size and borders
row_start = 3
lines_per_column = max_y - (1 + row_start)
num_columns = (max_x - 2) // column_width
# Setting up column headers
for i in range(0, num_columns):
win.addstr(1, id_column_start + i * column_width, 'ID')
win.addstr(1, 25 + bytes_column_start + i * column_width, 'Bytes')
win.addstr(1, 30 + text_column_start + i * column_width, 'Text')
win.addstr(3, id_column_start, "Press 'q' to quit")
row = row_start + 2 # The first column starts a bit lower to make space for the 'press q to quit message'
current_column = 0
# Make sure we don't read the can_messages dict while it's being written to in the reading thread
with can_messages_lock:
for frame_id in sorted(can_messages.keys()):
msg = can_messages[frame_id]
msg_bytes = format_data_hex(msg)
msg_str = format_data_ascii(msg)
# print frame ID in decimal and hex
win.addstr(row, id_column_start + current_column * column_width, '%s' % str(frame_id).ljust(5))
win.addstr(row, id_column_start + 18 + current_column * column_width, '%X'.ljust(5) % frame_id)
# print frame bytes
win.addstr(row, 25 + bytes_column_start + current_column * column_width, msg_bytes.ljust(23))
# print frame text
win.addstr(row, 30 + text_column_start + current_column * column_width, msg_str.ljust(8))
row = row + 1
if row >= lines_per_column + row_start:
# column full, switch to the next one
row = row_start
current_column = current_column + 1
if current_column >= num_columns:
break
win.refresh()
should_redraw.clear()
c = stdscr.getch()
if c == ord('q') or not reading_thread.is_alive():
break
elif c == curses.KEY_RESIZE:
win = init_window(stdscr)
should_redraw.set()
def parse_ints(string_list):
int_set = set()
for line in string_list:
try:
int_set.add(int(line, 0))
except ValueError:
continue
return int_set
def run():
parser = argparse.ArgumentParser(description='Process CAN data from a serial device or from a file.')
parser.add_argument('serial_device', type=str, nargs='?')
parser.add_argument('baud_rate', type=int, default=115200, nargs='?',
help='Serial baud rate in bps (default: 115200)')
parser.add_argument('-f', '--candump-file', metavar='CANDUMP_FILE', help="File (of 'candump' format) to read from")
parser.add_argument('-s', '--candump-speed', type=float, metavar='CANDUMP_SPEED', help="Speed scale of file read")
parser.add_argument('--blacklist', '-b', nargs='+', metavar='BLACKLIST', help="Ids that must be ignored")
parser.add_argument(
'--blacklist-file',
'-bf',
metavar='BLACKLIST_FILE',
help="File containing ids that must be ignored",
)
args = parser.parse_args()
# checks arguments
if not args.serial_device and not args.candump_file:
print("Please specify serial device or file name")
print()
parser.print_help()
return
if args.serial_device and args.candump_file:
print("You cannot specify a serial device AND a file name")
print()
parser.print_help()
return
# --blacklist-file prevails over --blacklist
if args.blacklist_file:
with open(args.blacklist_file) as f_obj:
blacklist = parse_ints(f_obj)
elif args.blacklist:
blacklist = parse_ints(args.blacklist)
else:
blacklist = set()
if args.serial_device:
source_handler = SerialHandler(args.serial_device, baudrate=args.baud_rate)
elif args.candump_file:
source_handler = CandumpHandler(args.candump_file, args.candump_speed)
reading_thread = None
try:
# If reading from a serial device, it will be opened with timeout=0 (non-blocking read())
source_handler.open()
# Start the reading background thread
reading_thread = threading.Thread(target=reading_loop, args=(source_handler, blacklist,))
reading_thread.start()
# Make sure to draw the UI the first time even if no data has been read
should_redraw.set()
# Start the main loop
curses.wrapper(main, reading_thread)
finally:
# Cleanly stop reading thread before exiting
if reading_thread:
stop_reading.set()
if source_handler:
source_handler.close()
reading_thread.join()
# If the thread returned an exception, print it
if thread_exception:
traceback.print_exception(*thread_exception)
sys.stderr.flush()
if __name__ == '__main__':
run()
|
py | 1a5099940f9b50bcda3a92132b6dee68ddc6ddb1 | # -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import os
import time
import logging
import colorsys
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Input
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.compat.v1.keras.backend import get_session
from tensorflow.compat.v1 import disable_eager_execution
from .model import yolo_eval, yolo_body_full, yolo_body_tiny
from .utils import letterbox_image, update_path, get_anchors, get_class_names
from .visual import draw_bounding_box
# swap X-Y axis
PREDICT_FIELDS = ('class', 'label', 'confidence', 'ymin', 'xmin', 'ymax', 'xmax')
class YOLO(object):
"""YOLO detector with tiny alternative
Example
-------
>>> # prepare EMPTY model since download and convert existing is a bit complicated
>>> anchors = get_anchors(YOLO.get_defaults('anchors_path'))
>>> classes = get_class_names(YOLO.get_defaults('classes_path'))
>>> yolo_empty = yolo_body_tiny(Input(shape=(None, None, 3)), len(anchors) // 2, len(classes))
>>> path_model = os.path.join(update_path('model_data'), 'yolo_empty.h5')
>>> yolo_empty.save(path_model)
>>> # use the empty one, so no reasonable detections are expected
>>> from keras_yolo3.utils import image_open
>>> yolo = YOLO(weights_path=path_model,
... anchors_path=YOLO.get_defaults('anchors_path'),
... classes_path=YOLO.get_defaults('classes_path'),
... model_image_size=YOLO.get_defaults('model_image_size'))
>>> img = image_open(os.path.join(update_path('model_data'), 'bike-car-dog.jpg'))
>>> yolo.detect_image(img) # doctest: +ELLIPSIS
(<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=520x518 at ...>, [...])
"""
_DEFAULT_PARAMS = {
"weights_path": os.path.join(update_path('model_data'), 'tiny-yolo.h5'),
"anchors_path": os.path.join(update_path('model_data'), 'tiny-yolo_anchors.csv'),
"classes_path": os.path.join(update_path('model_data'), 'coco_classes.txt'),
"score": 0.3,
"iou": 0.45,
# "model_image_size": (416, 416),
"nb_gpu": 1,
}
@classmethod
def get_defaults(cls, name):
if name not in cls._DEFAULT_PARAMS:
logging.warning('Unrecognized attribute name "%s"', name)
return cls._DEFAULT_PARAMS.get(name)
def __init__(self, weights_path, anchors_path, classes_path, model_image_size=(None, None),
score=0.3, iou=0.45, nb_gpu=1, **kwargs):
"""
:param str weights_path: path to loaded model weights, e.g. 'model_data/tiny-yolo.h5'
:param str anchors_path: path to loaded model anchors, e.g. 'model_data/tiny-yolo_anchors.csv'
:param str classes_path: path to loaded trained classes, e.g. 'model_data/coco_classes.txt'
:param float score: confidence score
:param float iou:
:param tuple(int,int) model_image_size: e.g. for tiny (416, 416)
:param int nb_gpu:
:param kwargs:
"""
self.__dict__.update(kwargs) # and update with user overrides
self.weights_path = update_path(weights_path)
self.anchors_path = update_path(anchors_path)
self.classes_path = update_path(classes_path)
self.score = score
self.iou = iou
self.nb_gpu = nb_gpu
if not self.nb_gpu:
# disable all GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
self.class_names = get_class_names(self.classes_path)
self.anchors = get_anchors(self.anchors_path)
self._open_session()
disable_eager_execution()
self.boxes, self.scores, self.classes = self._create_model(model_image_size)
self._generate_class_colors()
def _open_session(self):
logging.warning('Using %s backend.', K.backend())
self.sess = get_session()
def _create_model(self, model_image_size=(None, None)):
# weights_path = update_path(self.weights_path)
logging.debug('loading model from "%s"', self.weights_path)
assert self.weights_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
try:
self.yolo_model = load_model(self.weights_path, compile=False)
except Exception:
logging.warning('Loading weights from "%s"', self.weights_path)
is_tiny_version = (num_anchors == 6) # default setting
cnn_h, cnn_w = model_image_size
input = Input(shape=(cnn_h, cnn_w, 3))
if is_tiny_version:
self.yolo_model = yolo_body_tiny(input, num_anchors // 2, num_classes)
else:
self.yolo_model = yolo_body_full(input, num_anchors // 3, num_classes)
# make sure model, anchors and classes match
self.yolo_model.load_weights(self.weights_path, by_name=True, skip_mismatch=True)
else:
out_shape = self.yolo_model.layers[-1].output_shape[-1]
ration_anchors = num_anchors / len(self.yolo_model.output) * (num_classes + 5)
assert out_shape == ration_anchors, \
'Mismatch between model and given anchor %r and class %r sizes' \
% (ration_anchors, out_shape)
logging.info('loaded model, anchors (%i), and classes (%i) from %s',
num_anchors, num_classes, self.weights_path)
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2,))
if self.nb_gpu >= 2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.nb_gpu)
boxes, scores, classes = yolo_eval(self.yolo_model.output,
self.anchors,
len(self.class_names),
self.input_image_shape,
score_threshold=self.score,
iou_threshold=self.iou)
return boxes, scores, classes
def _generate_class_colors(self):
"""Generate colors for drawing bounding boxes."""
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
_fn_colorr = lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255))
self.colors = list(map(_fn_colorr, self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
# Shuffle colors to decorrelate adjacent classes.
np.random.shuffle(self.colors)
np.random.seed(None) # Reset seed to default.
def detect_image(self, image):
start = time.time()
# this should be taken from the model
model_image_size = self.yolo_model._input_layers[0].input_shape[0][1:3]
if all(model_image_size):
for size in model_image_size:
assert size % 32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
logging.debug('image shape: %s', repr(image_data.shape))
if image_data.max() > 1.5:
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
end = time.time()
logging.debug('Found %i boxes in %f sec.', len(out_boxes), (end - start))
thickness = (image.size[0] + image.size[1]) // 500
predicts = []
for i, c in reversed(list(enumerate(out_classes))):
draw_bounding_box(image, self.class_names[c], out_boxes[i],
out_scores[i], self.colors[c], thickness)
pred = dict(zip(
PREDICT_FIELDS,
(int(c), self.class_names[c], float(out_scores[i]),
*[int(x) for x in out_boxes[i]])
))
predicts.append(pred)
return image, predicts
def _close_session(self):
self.sess.close()
def __del__(self):
self._close_session()
|
py | 1a509996142cc4ccb2bdd0f2debb42cd7c03c1b7 |
# Copyright 2014 Intel Corporation, All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from vsm_dashboard.dashboards.vsm import dashboard
class ClusterImport(horizon.Panel):
name = _("Import Cluster")
slug = 'cluster-import'
dashboard.VizDash.register(ClusterImport)
|
py | 1a5099d86e2325abef29e7709a4a067100cd7289 | import pygame
pygame.init()
def drawGrid(window, cell_width):
for i in range(1,9):
if i%3 == 0:
stroke = 3
else:
stroke = 1
pygame.draw.line(window, (60,113,210), (0, i*cell_width), (WIDTH, i*cell_width), stroke)
pygame.draw.line(window, (69,113,210), (i*cell_width, 0), (i*cell_width, HEIGHT), stroke)
def displayBoard(board):
number_font = pygame.font.SysFont("Century Gothic", 30)
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 0:
number = number_font.render(" ", 1, (203,217,243))
else:
number = number_font.render(str(board[i][j]), 1, (203,217,243))
WIN.blit(number, ((j*CELL_WIDTH)+int(CELL_WIDTH/2.5), (i*CELL_WIDTH)+int(CELL_WIDTH/3)))
def findEmpty(board):
for i in range(9):
for j in range(9):
if board[i][j] == 0:
return (i, j)
return False
def valid(board, pos , n):
# Check row
for j in range(9):
if board[pos[0]][j] == n:
return False
# Check column
for i in range(9):
if board[i][pos[1]] == n:
return False
# Check square
row_index = pos[0]//3
col_index = pos[1]//3
for i in range(row_index*3, row_index*3 + 3):
for j in range(col_index*3, col_index*3 + 3):
if board[i][j] == n:
return False
return True
def solve():
global grid
pos = findEmpty(grid)
if pos == False:
return True
for i in range(1, 10):
if valid(grid, pos, i):
grid[pos[0]][pos[1]] = i
displayBoard(grid)
pygame.display.update()
if solve():
return True
grid[pos[0]][pos[1]] = 0
return False
if __name__ == "__main__":
WIDTH, HEIGHT = 540, 540
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
CELL_WIDTH = WIDTH/9
grid = [
[7,8,0,4,0,0,1,2,0],
[6,0,0,0,7,5,0,0,9],
[0,0,0,6,0,1,0,7,8],
[0,0,7,0,4,0,2,6,0],
[0,0,1,0,5,0,9,3,0],
[9,0,4,0,6,0,0,0,5],
[0,7,0,3,0,0,0,1,2],
[1,2,0,0,0,7,4,0,0],
[0,4,9,2,0,6,0,0,7]
]
running = True
while running:
drawGrid(WIN, CELL_WIDTH)
displayBoard(grid)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
solve()
pygame.display.update()
WIN.fill((50,50,50))
|
py | 1a509a04abf02e90ce4a098046a3d44bbbef7602 | # QR Code Reader
# Author: Johnjimy Som
# Created: June 3, 2021
import math
# Initialising hex string
ini_string = "11109D6B2700A000200000E000000000" #sample
#ini_string = input('Please insert a hexcode: ')#import the hex here
# Printing initial string
# Step 1 Step1 QR code(16進数)を読み取る
print ("Initial string:", ini_string)
# Code to convert hex to binary
#Step2 読み取ったQR code(16進数)を2進数へ変換する
n = int(ini_string, 16)
binaryStr = ''
while n > 0:
binaryStr = str(n % 2) + binaryStr
n = n >> 1
result = binaryStr
# Print the resultant string
print ("\nResultant string [Binary]:", str(result))
#00010001000100001001110101101011001001110000000010100000000000000010000000000000000000001110000000000000000000000000000000000000
# Print binary characters start[9]-[34]length should be : 00010000100111010110101100
print ("\nResultant string [9-34] [Binary]:", str(result[1:]))
#Step3 2進数に変換した結果を、項目毎にデータを区切る
# import module
from tabulate import tabulate #unresolved import <'from tabulate'>
# assigned binaryStr data
mydata = [{"Encode Version", "x","x","y"},
{"Print Area", "wololo","xxx","yyyu"},
{"Item Code", "xx","yy","zz"}]
# create header
head = [" ", "City", "Binary", "Value(Decimal)"]
# display table
print(tabulate(mydata, headers=head, tablefmt="pretty")) |
py | 1a509a43d1fbe589e64631ca67b2e316572f4c9c | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Different gradient implementations for benchmark purposes
def SpecializedGrad(l, grad):
return gen_linalg_ops.cholesky_grad(l, grad)
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def TriAngSolveCompositeGrad(l, grad):
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
# Compute ((l^{H} @ grad) * (tril(ones)-1/2*eye)) = middle
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
# Compute l^{-H} @ middle = z
l_inverse_middle = linalg_ops.matrix_triangular_solve(l, middle, adjoint=True)
# We need to compute z @ l^{-1}. With matrix_triangular_solve we
# actually compute l^{-H} @ z^{H} = grad. Since we later add grad^{H}
# we can ommit the conjugate transpose here.
z_h = math_ops.conj(array_ops.matrix_transpose(l_inverse_middle))
grad_a = linalg_ops.matrix_triangular_solve(l, z_h, adjoint=True)
grad_a += linalg.adjoint(grad_a)
return grad_a * 0.5
def MatrixInverseCompositeGrad(l, grad):
l_inverse = linalg_ops.matrix_inverse(l)
return _GradWithInverseL(l, l_inverse, grad)
def TriAngInvCompositeGrad(l, grad):
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
return _GradWithInverseL(l, l_inverse, grad)
class CholeskyOpTest(test.TestCase):
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = self.evaluate([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.cached_session(use_gpu=True) as sess:
chol = linalg_ops.cholesky(x)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, x, chol, verification)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in (np.float32, np.float64):
self._verifyCholesky(data.astype(dtype))
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyCholesky(complex_data)
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
# Generate random complex valued positive-definite matrices.
matrices = np.random.rand(10, 5, 5) + 1j * np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
self._verifyCholesky(matrices)
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
def testWrongDimensions(self):
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
def testNotInvertibleCPU(self):
# The input should be invertible.
with self.session(use_gpu=True):
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful. The"
" input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(
np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
matrix1 = math_ops.matmul(matrix1, matrix1, adjoint_a=True)
matrix2 = math_ops.matmul(matrix2, matrix2, adjoint_a=True)
c1 = linalg_ops.cholesky(matrix1)
c2 = linalg_ops.cholesky(matrix2)
c1_val, c2_val = self.evaluate([c1, c2])
self.assertAllClose(c1_val, c2_val)
class CholeskyGradTest(test.TestCase):
_backprop_block_size = 32
def getShapes(self, shapeList):
return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList)
def testSmallMatrices(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64))
def testSmallMatricesComplex(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64, dtypes_lib.complex128))
def testOneBlockMatrices(self):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalarTest=True)
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64,), scalarTest=True)
def testTwoBlockMatrixComplexDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex128,), scalarTest=True)
def testAgainstSpecialized(self):
np.random.seed(0)
data = np.random.randn(33, 33).astype(np.float32)
data = np.matmul(data, data.T)
grad_data = np.random.randn(*data.shape).astype(np.float32)
with ops.Graph().as_default(), self.session(use_gpu=False) as s:
x = constant_op.constant(data, dtypes_lib.float32)
chol = linalg_ops.cholesky(x)
composite_grad = gradients_impl.gradients(chol, x, grad_data)[0]
specialized_grad = SpecializedGrad(chol, grad_data)
reference, actual = s.run([specialized_grad, composite_grad])
self.assertAllClose(reference, actual)
def runFiniteDifferences(self,
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128),
scalarTest=False):
with self.session(use_gpu=True):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
data = np.random.randn(shape[0], shape[1])
if dtype.is_complex:
data = data.astype(np.complex64)
data += 1j * np.random.randn(shape[0], shape[1])
x = constant_op.constant(data, dtype)
tensor = math_ops.matmul(
x, math_ops.conj(array_ops.transpose(x))) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
data = np.random.randn()
if dtype.is_complex:
data = np.complex64(data)
data += 1j * np.random.randn()
x = constant_op.constant(data, dtype)
R = constant_op.constant(
np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.multiply(R, x)
tensor = math_ops.matmul(
e, math_ops.conj(array_ops.transpose(e))) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(
array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(
x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
elif dtype == dtypes_lib.complex128:
self.assertLess(error, 5e-5)
else:
self.assertLess(error, 5e-3)
class CholeskyBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkCholeskyOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_gpu_{shape}".format(shape=shape))
def benchmarkGradVariants(self):
def _BenchmarkGrad(grad_fn, name, device):
for shape in self.shapes:
matrix = self._GenerateMatrix(shape)
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device):
l = variables.Variable(np.linalg.cholesky(matrix))
grad_matrix = variables.Variable(
np.random.randn(*matrix.shape).astype(np.float32))
grad = grad_fn(l, grad_matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
grad,),
min_iters=25,
name="{name}_{dev}_{shape}".format(
name=name, dev=grad.device, shape=shape))
if test.is_gpu_available(True):
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/device:GPU:0")
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/cpu:0")
_BenchmarkGrad(SpecializedGrad, "specialized", "/cpu:0")
if __name__ == "__main__":
test.main()
|
py | 1a509a608a69b97f0d9b13832334a6b25a649328 | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_management_managed_database_group_facts
short_description: Fetches details about one or multiple ManagedDatabaseGroup resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple ManagedDatabaseGroup resources in Oracle Cloud Infrastructure
- Gets the Managed Database Group for a specific ID or the list of Managed Database Groups in
a specific compartment. Managed Database Groups can also be filtered based on the name parameter.
Only one of the parameters, ID or name should be provided. If none of these parameters is provided,
all the Managed Database Groups in the compartment are listed.
- If I(managed_database_group_id) is specified, the details of a single ManagedDatabaseGroup will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
managed_database_group_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Managed Database Group.
- Required to get a specific managed_database_group.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
- Required to list multiple managed_database_groups.
type: str
name:
description:
- A filter to return only resources that match the entire name.
type: str
lifecycle_state:
description:
- The lifecycle state of a resource.
type: str
choices:
- "CREATING"
- "UPDATING"
- "ACTIVE"
- "DELETING"
- "DELETED"
- "FAILED"
sort_by:
description:
- The field to sort information by. Only one sortOrder can be used. The default sort order
for 'TIMECREATED' is descending and the default sort order for 'NAME' is ascending.
The 'NAME' sort order is case-sensitive.
type: str
choices:
- "TIMECREATED"
- "NAME"
sort_order:
description:
- The option to sort information in ascending ('ASC') or descending ('DESC') order. Ascending order is the default order.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List managed_database_groups
oci_database_management_managed_database_group_facts:
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
- name: Get a specific managed_database_group
oci_database_management_managed_database_group_facts:
managed_database_group_id: "ocid1.manageddatabasegroup.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
managed_database_groups:
description:
- List of ManagedDatabaseGroup resources
returned: on success
type: complex
contains:
name:
description:
- The name of the Managed Database Group.
returned: on success
type: str
sample: name_example
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Managed Database Group.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
description:
description:
- The information specified by the user about the Managed Database Group.
returned: on success
type: str
sample: description_example
managed_databases:
description:
- A list of Managed Databases in the Managed Database Group.
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Managed Database.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
name:
description:
- The name of the Managed Database.
returned: on success
type: str
sample: name_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment in which the Managed Database
resides.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
deployment_type:
description:
- The infrastructure used to deploy the Oracle Database.
returned: on success
type: str
sample: ONPREMISE
database_type:
description:
- The type of Oracle Database installation.
returned: on success
type: str
sample: EXTERNAL_SIDB
database_sub_type:
description:
- The subtype of the Oracle Database. Indicates whether the database is a Container Database, Pluggable Database, or a Non-container
Database.
returned: on success
type: str
sample: CDB
time_added:
description:
- The date and time the Managed Database was added to the group.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- The current lifecycle state of the Managed Database Group.
returned: on success
type: str
sample: CREATING
time_created:
description:
- The date and time the Managed Database Group was created.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The date and time the Managed Database Group was last updated.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
managed_database_count:
description:
- The number of Managed Databases in the Managed Database Group.
returned: on success
type: int
sample: 56
sample: [{
"name": "name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"description": "description_example",
"managed_databases": [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"name": "name_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"deployment_type": "ONPREMISE",
"database_type": "EXTERNAL_SIDB",
"database_sub_type": "CDB",
"time_added": "2013-10-20T19:20:30+01:00"
}],
"lifecycle_state": "CREATING",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"managed_database_count": 56
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database_management import DbManagementClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ManagedDatabaseGroupFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"managed_database_group_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_managed_database_group,
managed_database_group_id=self.module.params.get(
"managed_database_group_id"
),
)
def list_resources(self):
optional_list_method_params = [
"name",
"lifecycle_state",
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_managed_database_groups,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
ManagedDatabaseGroupFactsHelperCustom = get_custom_class(
"ManagedDatabaseGroupFactsHelperCustom"
)
class ResourceFactsHelper(
ManagedDatabaseGroupFactsHelperCustom, ManagedDatabaseGroupFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
managed_database_group_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
name=dict(type="str"),
lifecycle_state=dict(
type="str",
choices=[
"CREATING",
"UPDATING",
"ACTIVE",
"DELETING",
"DELETED",
"FAILED",
],
),
sort_by=dict(type="str", choices=["TIMECREATED", "NAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="managed_database_group",
service_client_class=DbManagementClient,
namespace="database_management",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(managed_database_groups=result)
if __name__ == "__main__":
main()
|
py | 1a509b2c0f3f925ebe7ebe7dd07cca76e71d10b4 | import os
from bootstrapbase import BootstrapBase
from common.const import Constants
from common.mapr_logger.log import Log
from operations.operationsbase import OperationsBase
from operations.shared import SharedSystem
from operations.csi import CSI
from operations.csinfs import CSINFS
from operations.dataplatform import DataPlatform
from operations.compute import Compute
from operations.drill import Drill
from operations.ldap import LDAP
from operations.kubeflow import Kubeflow
from operations.nodesvc import Nodesvc
from operations.spark import Spark
from operations.autoticket_generator import AutoTicketGenerator
from operations.dataplatform_validator import DataPlatformValidator
from operations.tenant_validator import TenantValidator
from cluster_info import ClusterInfo
class BootstrapUninstall(BootstrapBase):
def __init__(self):
super(BootstrapUninstall, self).__init__(BootstrapBase.UNINSTALL)
self.cloud_instance = None
self.cloud_created = False
self._parse_args()
def run(self):
super(BootstrapUninstall, self).run()
k8s = OperationsBase()
k8s.load_replace_dict()
shared = SharedSystem()
nodesvc = Nodesvc()
csi = CSI()
csinfs = CSINFS()
ldap = LDAP(self._prompts)
dataplatform = DataPlatform()
compute = Compute()
spark = Spark()
autoticket_generator = AutoTicketGenerator()
dataplatform_validator = DataPlatformValidator()
tenant_validator = TenantValidator()
kubeflow = Kubeflow()
# openshift = OpenShift()
drill = Drill()
cluster_info = ClusterInfo()
self.prologue()
self.python_check()
if dataplatform.dataplatform_operation(self.parsed_args, False):
return
self.check_laptop_tools()
self.confirm_delete_installation()
if self.core_install_enabled:
do_storage = True
else:
do_storage = self.parsed_args.core_uninstall
do_compute = True
do_drill = self.parsed_args.drill_uninstall
do_csi = True
uninstall_csi = False
uninstall_compute = False
# uninstall_autoticket_generator = False
uninstall_compute_templates = False
uninstall_storage = False
uninstall_storage_templates = False
do_kubeflow = True
uninstall_kubeflow = True
do_spark = False
uninstall_spark = True
do_external = True
uninstall_external = False
do_secure = True
uninstall_secure = False
do_exampleldap = True
uninstall_exampleldap = False
uninstall_drill = False
str_tolerations = ""
if cluster_info.schedule_pods_on_master:
str_tolerations = "\n - key: node-role.kubernetes.io/master\n operator: Exists\n effect: NoSchedule"
OperationsBase.replace_dict["{tolerate-master-node}"] = str_tolerations
OperationsBase.replace_dict["{operator-repo}"] = Constants.OPERATOR_REPO
OperationsBase.replace_dict["{csi-repo}"] = Constants.CSI_REPO
OperationsBase.replace_dict["{kdf-repo}"] = Constants.KDF_REPO
OperationsBase.replace_dict["{kubeflow-repo}"] = Constants.KUBEFLOW_REPO
OperationsBase.replace_dict["{local-path-provisioner-repo}"] = Constants.LOCAL_PATH_PROVISIONER_REPO
OperationsBase.replace_dict["{kfctl-hcp-istio-repo}"] = Constants.KFCTL_HSP_ISTIO_REPO
OperationsBase.replace_dict["{busybox-repo}"] = Constants.BUSYBOX_REPO
OperationsBase.replace_dict["{fake-labels}"] = "true"
if do_csi:
uninstall_csi = self.check_remove_csi()
if do_storage:
uninstall_storage = self.check_remove_storage()
uninstall_storage_templates = self.check_remove_storage_templates()
if do_external:
uninstall_external = self.check_remove_external()
if do_secure:
uninstall_secure = self.check_remove_secure()
if do_exampleldap:
uninstall_exampleldap = self.check_remove_exampleldap(k8s)
if do_compute:
uninstall_compute = self.check_remove_compute()
# uninstall_autoticket_generator = uninstall_compute
if uninstall_compute:
uninstall_compute_templates = self.check_remove_compute_templates()
if cluster_info.is_spark_installed():
uninstall_spark = self.check_remove_spark()
uninstall_drill = do_drill
if do_kubeflow:
uninstall_kubeflow = self.check_remove_kubeflow()
# Check if the connected k8s environment is Openshift
# if k8s.is_openshift_connected():
# k8s.is_openshift = True
# k8s.switch_to_oc()
if uninstall_external:
shared.uninstall_external_components()
if uninstall_storage:
dataplatform.uninstall_dataplatform(uninstall_templates=uninstall_storage_templates)
dataplatform_validator.run_uninstall()
if uninstall_compute:
compute.uninstall_compute_components(uninstall_templates=uninstall_compute_templates)
autoticket_generator.run_uninstall()
tenant_validator.run_uninstall()
# uninstall_autoticket_generator = uninstall_compute
if uninstall_spark:
spark.uninstall_spark_components()
if uninstall_drill:
drill.uninstall_drill_components()
if uninstall_compute or uninstall_storage:
shared.uninstall_common_components()
nodesvc.uninstall_nodesvc()
elif uninstall_kubeflow:
shared.uninstall_common_components()
if uninstall_secure:
shared.uninstall_secure_components()
if uninstall_exampleldap:
ldap.uninstall_exampleldap()
if uninstall_kubeflow:
kubeflow.uninstall_kubeflow_components()
if uninstall_csi:
csi.uninstall_csi_components()
csinfs.uninstall_csi_components()
self.complete_uninstallation()
def confirm_delete_installation(self):
print(os.linesep)
Log.info("This will uninstall ALL Ezmeral Data Fabric for Kubernetes operators from your Kubernetes environment. This will cause all "
"Tenants to be destroyed. They cannot be recovered!", True)
agree = self._prompts.prompt_boolean("Do you agree?", False, key_name="AGREEMENT")
if not agree:
Log.info("Very wise decision. Exiting uninstall...", True)
BootstrapBase.exit_application(2)
def check_remove_csi(self):
choice = self._prompts.prompt_boolean("Remove the Ezmeral Data Fabric CSI driver?", False, key_name="REMOVE_CSI")
return choice
def check_remove_spark(self):
choice = self._prompts.prompt_boolean("Remove the Spark Operator?", False, key_name="REMOVE_SPARK")
return choice
def check_remove_drill(self):
choice = self._prompts.prompt_boolean("Remove the Drill Operator?", False, key_name="REMOVE_DRILL")
return choice
def check_remove_kubeflow(self):
choice = self._prompts.prompt_boolean("Remove the Kubeflow Operator?", False, key_name="REMOVE_KUBEFLOW")
return choice
def check_remove_compute(self):
choice = self._prompts.prompt_boolean("Remove Compute components?", False, key_name="REMOVE_COMPUTE")
return choice
def check_remove_compute_templates(self):
choice = self._prompts.prompt_boolean("Remove the Compute templates? Note: You will lose your template changes!", False, key_name="REMOVE_COMPUTE_TEMPLATES")
return choice
def check_remove_storage(self):
choice = self._prompts.prompt_boolean("Remove Data Platform?", False, key_name="REMOVE_STORAGE")
return choice
def check_remove_storage_templates(self):
choice = self._prompts.prompt_boolean("Remove the Data Platform Templates? Note: You will lose your template changes!", False, key_name="REMOVE_STORAGE_TEMPLATES")
return choice
def check_remove_external(self):
choice = self._prompts.prompt_boolean("Remove the External Cluster Info? Note: You will lose your imported cluster info!", False, key_name="REMOVE_EXTERNAL_INFO")
return choice
def check_remove_secure(self):
choice = self._prompts.prompt_boolean("Remove the Secure Namespace? Note: You will lose your template changes!", False, key_name="REMOVE_SECURE")
return choice
@staticmethod
def check_remove_exampleldap(k8s):
get_str = "namespace {0}".format(Constants.EXAMPLE_LDAP_NAMESPACE)
response, status = k8s.run_get(get_str, False)
result = (status == 0)
return result
def is_cloud_env(self):
print(os.linesep)
is_cloud = self._prompts.prompt_boolean("Is this a cloud env?", True, key_name="CLOUD_ENV")
if is_cloud:
return True
return False
@staticmethod
def complete_uninstallation():
print(os.linesep)
msg = "This Kubernetes environment"
warnings = Log.get_warning_count()
errors = Log.get_error_count()
if errors > 0 and warnings > 0:
msg = "{0} had {1} error(s) and {2} warning(s) during the uninstall process for selected components".format(msg, errors, warnings)
Log.error(msg)
elif errors > 0 and warnings == 0:
msg = "{0} had {1} error(s) during the uninstall process for selected components".format(msg, errors)
Log.error(msg)
elif errors == 0 and warnings > 0:
msg = "{0} had {1} warnings(s) during the uninstall process for selected components".format(msg, warnings)
Log.warning(msg)
else:
msg = "{0} has had selected components successfully uninstalled".format(msg)
Log.info(msg, True)
if errors > 0 or warnings > 0:
msg = "Please check the bootstrap log file for this session here: {0}".format(Log.get_log_filename())
Log.warning(msg)
Log.info("")
if __name__ == '__main__':
bootstrap_uninstall = BootstrapUninstall()
try:
bootstrap_uninstall.run()
except Exception as e:
Log.exception(e)
raise e
BootstrapBase.exit_application(0)
|
py | 1a509b81eaaccae145956ff5aa798834e9d60b6f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from speech_recognition_msgs.msg import SpeechRecognitionCandidates
from std_msgs.msg import String
import rospy
class SpeechRecognition(object):
def __init__(self):
rospy.Subscriber('/Tablet/voice', SpeechRecognitionCandidates, self.callback)
self.pub_ = rospy.Publisher('/speech', String, queue_size=1)
self.num_dict = {'one':'1','two':'2','three':'3','four':'4','five':'5','six':'6','seven':'7','eight':'8','nine':'9','zero':'0'}
self.num_list = ['1','2','3','4','5','6','7','8','9']
self.subject_list = ['全部', 'everything']
self.verb_list = ['片付けて', '片付けといて', '直して', '直しといて', 'なおして', 'なおしといて', 'clean']
print SpeechRecognitionCandidates
def callback(self, msg):
rospy.loginfo('{} ({})'.format(msg.transcript[0], msg.confidence[0]))
raw_msg = str()
pub_msg = str()
if msg.confidence[0] > 0.5 :
raw_msg = msg.transcript[0]
tmp_flg = True
for n_dic_key in self.num_dict.keys():
if n_dic_key in raw_msg:
raw_msg = raw_msg.replace(n_dic_key,self.num_dict[n_dic_key])
rospy.loginfo('%s', raw_msg)
for sbj in self.subject_list:
if sbj in raw_msg:
for verb in self.verb_list:
if verb in raw_msg:
tmp_flg = False
self.pub_.publish('99')
if tmp_flg:
for data in list(raw_msg):
if data in self.num_list:
pub_msg += data
if len(pub_msg) == 2:
self.pub_.publish(pub_msg)
if __name__ == '__main__':
rospy.init_node('speech_recognition')
speech_recognition = SpeechRecognition()
rospy.spin()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.