metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jihobak/shopping-classification",
"score": 2
} |
#### File: jihobak/shopping-classification/evaluate.py
```python
from collections import defaultdict
import fire
import h5py
import numpy as np
import six
from six.moves import zip, cPickle
from tqdm import tqdm
def get_size(data_path, div):
h = h5py.File(data_path)[div]
size = h['img'].shape[0]
return size
def toss_answer(data_path, div):
h = h5py.File(data_path)[div]
size = h['cate'].shape[0]
for i in range(size):
yield np.argmax(h['cate'][i])
def toss_chunk_answer(data_path, div):
h = h5py.File(data_path)[div]
size = h['img'].shape[0]
chunk_sz = 1000000
chunk_ix = [(i, min(i+chunk_sz, size)) for i in range(0, size, chunk_sz)]
for start, end in chunk_ix:
b = h['bindex'][start:end]
m = h['mindex'][start:end]
s = h['sindex'][start:end]
d = h['dindex'][start:end]
answer = [(a, b, c, d) for a, b, c, d in zip(b, m, s, d)]
yield answer
#yield np.argmax(h['cate'][start:end], axis=1)
def evaluate(predict_path, data_path, div, y_vocab_path, log_path):
"""
python evaluate.py evaluate onlym_khaiii_textimg1024_predict.tsv ./data/train/data.h5py dev ./data/y_vocab.py3.cPickle onlym_khaiii_textimg1024_score.txt
#khaiii textimg 1024
python evaluate.py evaluate valid_khaiii_textimg1024_predict.tsv ./data/train/khaiii_data.h5py dev ./data/y_vocab.py3.cPickle valid_khaiii_textimg1024_score.txt
#khaii2 textimg 1024
python evaluate.py evaluate valid_khaiii2_textimg1024_predict.tsv ./data/train/khaiii2_data.h5py dev ./data/y_vocab.py3.cPickle valid_khaiii2_textimg1024_score.txt
#khaiii2_12_512textimgdrop_relu_cw_1024
python evaluate.py evaluate valid_khaiii2_12_512textimgdrop_relu_cw_1024_predict.tsv ./data/train/khaiii2_data_120000.h5py dev ./data/y_vocab.py3.cPickle valid_khaiii2_12_512textimgdrop_relu_cw_1024_score.txt
"""
#h = h5py.File(data_path, 'r')[div]
y_vocab = cPickle.loads(open(y_vocab_path, 'rb').read())
inv_y_vocab = {v: k for k, v in six.iteritems(y_vocab)}
b_vocab = cPickle.loads(open("./data/b_vocab.cPickle", 'rb').read())
m_vocab = cPickle.loads(open("./data/m_vocab.cPickle", 'rb').read())
s_vocab = cPickle.loads(open("./data/s_vocab.cPickle", 'rb').read())
d_vocab = cPickle.loads(open("./data/d_vocab.cPickle", 'rb').read())
inv_b_vocab = {i: s for s, i in six.iteritems(b_vocab)}
inv_m_vocab = {i: s for s, i in six.iteritems(m_vocab)}
inv_s_vocab = {i: s for s, i in six.iteritems(s_vocab)}
inv_d_vocab = {i: s for s, i in six.iteritems(d_vocab)}
fin = open(predict_path, 'r')
hit, n = defaultdict(lambda: 0), defaultdict(lambda: 0)
print('loading ground-truth...')
#CATE = np.argmax(h['cate'], axis=1)
size = get_size(data_path, div)
#CATE = toss_answer(data_path, div)
bomb = toss_chunk_answer(data_path, div)
for bx in bomb:
for p, y in tqdm(zip(fin, bx), desc='bomb', total=len(list(bx))):
# format y = (b, m, s, d) this is answer
pid, b, m, s, d = p.split('\t')
b, m, s, d = list(map(int, [b, m, s, d])) # ๋์ prediction
#gt = list(map(int, inv_y_vocab[y].split('>'))) # ์ ๋ต
gt_b = inv_b_vocab[y[0]]
gt_m = inv_m_vocab[y[1]]
gt_s = inv_s_vocab[y[2]]
gt_d = inv_d_vocab[y[3]]
gt = [gt_b, gt_m, gt_s, gt_d]
for depth, _p, _g in zip(['b', 'm', 's', 'd'],
[b, m, s, d],
gt):
if _g == -1:
continue
n[depth] = n.get(depth, 0) + 1 # ์ด ๊ฐ์ ํ์
if _p == _g:
hit[depth] = hit.get(depth, 0) + 1 # ๋ง์ ๊ฐ์ ๊ธฐ๋ก
with open(log_path, 'w') as f:
for d in ['b', 'm', 's', 'd']:
if n[d] > 0:
print('%s-Accuracy: %.3f(%s/%s)' % (d, hit[d] / float(n[d]), hit[d], n[d]))
f.write('%s-Accuracy: %.3f(%s/%s) \n' % (d, hit[d] / float(n[d]), hit[d], n[d]))
score = sum([hit[d] / float(n[d]) * w
for d, w in zip(['b', 'm', 's', 'd'],
[1.0, 1.2, 1.3, 1.4])]) / 4.0
print('score: %.3f' % score)
f.write('score: %.3f\n' % score)
if __name__ == '__main__':
fire.Fire({'evaluate': evaluate})
```
#### File: jihobak/shopping-classification/make_vocab.py
```python
import os, json
from six.moves import cPickle
def make_vocab(load_path='cate1.json', write_path='./data'):
file_format = '{cate}_vocab.cPickle'
cate1 = json.loads(open(load_path, 'rb').read().decode('utf-8'))
for c in cate1.keys():
file_name = file_format.format(cate=c)
vocab = {value:label for label, value in enumerate(cate1[c].values())}
cPickle.dump(vocab, open(os.path.join(write_path, file_name), 'wb'), 2)
if __name__ == '__main__':
make_vocab(load_path='./cate1.json', write_path='./data')
```
#### File: jihobak/shopping-classification/train.py
```python
import os
import json
import threading
import fire
import h5py
import tqdm
import numpy as np
import six
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger
from six.moves import zip, cPickle
from misc import get_logger, Option
from network import TextImg
opt = Option('./config.json')
if six.PY2:
cate1 = json.loads(open('../cate1.json').read())
else:
cate1 = json.loads(open('../cate1.json', 'rb').read().decode('utf-8'))
DEV_DATA_LIST = opt.dev_data_list
TEST_DATA_LIST = opt.test_data_list
class Classifier():
def __init__(self):
self.logger = get_logger('Classifier')
self.num_classes = 0
def get_textimg_generator(self, ds, batch_size, cate, size, raise_stop_event=False):
left, limit = 0, size
if cate == 'b':
cate_index = 'bindex'
elif cate == 'm':
cate_index = 'mindex'
elif cate == 's':
cate_index = 'sindex'
else:
cate_index = 'dindex'
while True:
right = min(left + batch_size, limit)
X = [ds[t][left:right, :] for t in ['uni', 'w_uni', 'img']]
Y = [ds[t][left:right] for t in [cate_index]] ###
yield X, Y
left = right
if right == limit:
left = 0
if raise_stop_event:
raise StopIteration
def train_textimg(self, data_root, data_file_name, out_dir, cate, fc_hidden):
data_path = os.path.join(data_root, data_file_name)
data = h5py.File(data_path, 'r')
output_dir_base = "only"+cate+"_khaiii2_textimg_"+str(fc_hidden)
self.weight_fname = os.path.join(out_dir, output_dir_base+".weights.{epoch:02d}-{val_sparse_categorical_accuracy:.2f}.hdf5")
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
train = data['train']
dev = data['dev']
callbacks_list = [
EarlyStopping(
monitor='val_sparse_categorical_accuracy',
patience=5,
mode='max'
),
ModelCheckpoint(
self.weight_fname,
monitor='val_sparse_categorical_accuracy',
save_best_only=True,
mode='max',
period=10
),
CSVLogger(output_dir_base+'_log.csv',
append=True,
separator=',')
]
# b or m
num_size = 6503449
num_size_valid = 1625910
# s
num_ssize = 5012525
num_ssize_valid = 1252930
# d
num_dsize = 605398
num_dsize_valid = 151714
# Train
if cate == 'b':
total_train_samples = num_size
total_dev_samples = num_size_valid
elif cate == 'm':
total_train_samples = num_size
total_dev_samples = num_size_valid
elif cate == 's':
total_train_samples = num_ssize
total_dev_samples = num_ssize_valid
else:
total_train_samples = num_dsize
total_dev_samples = num_dsize_valid
textimg = TextImg(output_dir_base, cate, fc_hidden)
textimg_model = textimg.get_model()
train_gen = self.get_textimg_generator(
train,
opt.batch_size,
cate,
total_train_samples
)##
self.steps_per_epoch = int(np.ceil(total_train_samples / float(opt.batch_size)))
dev_gen = self.get_textimg_generator(
dev,
opt.batch_size,
cate,
total_dev_samples
)##
self.validation_steps = int(np.ceil(total_dev_samples / float(opt.batch_size)))
textimg_model.fit_generator(generator=train_gen,
steps_per_epoch=self.steps_per_epoch,
epochs=opt.num_epochs,
validation_data=dev_gen,
validation_steps=self.validation_steps,
shuffle=True,
callbacks=callbacks_list)
if __name__ == '__main__':
clsf = Classifier()
fire.Fire({
'train_textimg': clsf.train_textimg
})
``` |
{
"source": "JihoChoi/BD18F-JihoChoi",
"score": 2
} |
#### File: hw2_parallax/hw2_rnn/rnn_parallax.py
```python
import os
import time
import tensorflow as tf
import parallax
# from model import lenet
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('resource_info_file',
os.path.abspath(os.path.join(
os.path.dirname(__file__),
'.',
'resource_info')),
'Filename containing cluster information')
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of iterations to run for each workers.""")
tf.app.flags.DEFINE_integer('log_frequency', 50,
"""How many steps between two runop logs.""")
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Batch size""")
tf.app.flags.DEFINE_boolean('sync', True, '')
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
learning_rate = 1e-3
weight_decay = 1e-4
num_classes = 10
dropout_keep_prob = 0.5
time_step = 28
def rnn(only_logits=False):
tf.set_random_seed(1234)
# images_ph = tf.placeholder(tf.float32, shape=[None, 784])
images_ph = tf.placeholder(tf.float32, [None, time_step * 28])
# labels_ph = tf.placeholder(tf.int64, shape=[None, num_classes])
labels_ph = tf.placeholder(tf.int64, [None, num_classes])
is_training_ph = tf.placeholder(tf.bool, shape=())
global_step = tf.train.get_or_create_global_step()
# images = tf.reshape(images_ph, [-1, 28, 28, 1])
image = tf.reshape(images_ph, [-1, time_step, 28])
# RNN
rnn_cell = tf.nn.rnn_cell.LSTMCell(num_units=64)
outputs, (h_c, h_n) = tf.nn.dynamic_rnn(rnn_cell, image,
initial_state=None, dtype=tf.float32, time_major=False)
logits = tf.layers.dense(outputs[:, -1, :], num_classes)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels_ph, logits=logits)
loss += weight_decay * tf.losses.get_regularization_loss()
acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, axis=1), tf.argmax(labels_ph, axis=1)), tf.float32))
train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss, global_step=global_step)
return {'train_op': train_op,
'logits': logits,
'loss': loss,
'acc': acc,
'images': images_ph,
'labels': labels_ph,
'is_training': is_training_ph,
'global_step': global_step}
# Build single-GPU lenet model
single_gpu_graph = tf.Graph()
with single_gpu_graph.as_default():
# ops = lenet()
ops = rnn()
train_op = ops['train_op']
loss = ops['loss']
acc = ops['acc']
x = ops['images']
y = ops['labels']
is_training = ops['is_training']
parallax_config = parallax.Config()
ckpt_config = parallax.CheckPointConfig(ckpt_dir='parallax_ckpt',
save_ckpt_steps=1)
parallax_config.ckpt_config = ckpt_config
sess, num_workers, worker_id, num_replicas_per_worker = parallax.parallel_run(
single_gpu_graph,
FLAGS.resource_info_file,
sync=FLAGS.sync,
parallax_config=parallax_config)
start = time.time()
for i in range(FLAGS.max_steps):
batch = mnist.train.next_batch(FLAGS.batch_size, shuffle=False)
_, loss_ = sess.run([train_op, loss], feed_dict={x: [batch[0]],
y: [batch[1]],
is_training: [True]})
if i % FLAGS.log_frequency == 0:
end = time.time()
throughput = float(FLAGS.log_frequency) / float(end - start)
acc_ = sess.run(acc, feed_dict={x: [mnist.test.images],
y: [mnist.test.labels],
is_training: [False]})[0]
parallax.log.info("step: %d, test accuracy: %lf, throughput: %f steps/sec"
% (i, acc_, throughput))
start = time.time()
``` |
{
"source": "JihoChoi/BOJ",
"score": 3
} |
#### File: BOJ/scripts/01021.py
```python
import sys
# REVIEW
# References
# https://parkssiss.tistory.com/42
# https://roseline124.github.io/algorithm/2019/04/06/Altorithm-baekjoon-1021.html
N, M = map(int, sys.stdin.readline().split(' '))
pos = list(map(int, input().split(' '))) # positions, locations
count = 0 # step count
deque = list(range(1, N+1))
"""
def shift_left(deque): # (2) front to back
global count
count += 1
deque.append(deque.pop(0))
def shift_right(deque): # (3) back to front
global count
count += 1
# deque = [deque.pop(-1)] + deque # requires return
deque.insert(0, deque.pop(-1))
"""
while pos:
if pos[0] == deque[0]:
pos.pop(0)
deque.pop(0)
else:
if deque.index(pos[0]) <= len(deque) // 2:
while deque[0] != pos[0]:
# shift_left(deque)
deque.append(deque.pop(0))
count += 1
else:
while deque[0] != pos[0]:
# shift_right(deque)
deque.insert(0, deque.pop(-1))
count += 1
print(count)
```
#### File: BOJ/scripts/01850.py
```python
import sys
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
a, b = map(int, sys.stdin.readline().split())
ans = ['1'] * gcd(a, b)
print("".join(ans))
```
#### File: BOJ/scripts/01929.py
```python
def prime_list(min_num, max_num):
sieve = [True] * (max_num + 1) # Sieve of Eratosthenes
m = int(max_num ** 0.5) # sqrt(n)
for i in range(2, m + 1):
if sieve[i] == True:
for j in range(i+i, max_num+1, i):
sieve[j] = False
sieve[1] = False
return [i for i in range(min_num, max_num+1) if sieve[i] == True]
M, N = map(int, input().split(" "))
primes = prime_list(M, N)
for prime in primes:
print(prime)
```
#### File: BOJ/scripts/02309.py
```python
import sys
def find_index(heights):
height_sum = sum(heights)
for i in range(9):
for j in range(i+1, 9):
if height_sum - heights[i] - heights[j] == 100:
return heights[i], heights[j]
heights = [int(sys.stdin.readline()) for _ in range(9)]
heights.sort()
n1, n2 = find_index(heights)
for height in heights:
if height != n1 and height != n2:
print(height)
```
#### File: BOJ/scripts/02448.py
```python
from math import log
# N = 3ร2^k, k = 0, 1, 2, ..., 10
N = int(input())
k = int(log(N // 3, 2))
stars = [' * ',' * * ','***** ']
def append_stars(size):
for i in range(len(stars)):
stars.append(stars[i] + stars[i])
stars[i] = ' '*size + stars[i] + ' '*size
# print(stars[i], i)
for i in range(k):
# print(i)
append_stars(3*2**i)
# Print
for i in range(len(stars)):
print(stars[i])
```
#### File: BOJ/scripts/02609.py
```python
import sys
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def lcd(a, b):
if a > b:
a, b = b, a
ans = a
while ans % b != 0:
ans += a
return ans
a, b = map(int, sys.stdin.readline().split())
print(gcd(a, b))
print(lcd(a, b))
```
#### File: BOJ/scripts/06064.py
```python
def get_year(m, n, x, y):
while x <= m * n:
if (x-y) % n == 0:
return x
x += m
return -1
T = int(input())
for _ in range(T):
m, n, x, y = map(int, input().split())
print(get_year(m,n,x,y))
```
#### File: BOJ/scripts/12865.py
```python
N, W = map(int, input().split()) # number of items, capacity
weights = []
values = []
for i in range(N):
w, v = map(int, input().split())
weights.append(w)
values.append(v)
def knapsack(W, weights, values, n):
dp = [[0 for x in range(W+1)] for x in range(n+1)]
for i in range(n+1):
for w in range(W+1):
if i == 0 or w == 0:
dp[i][w] = 0
elif weights[i-1] <= w:
dp[i][w] = max(values[i-1] + dp[i-1][w - weights[i-1]], dp[i-1][w])
else:
dp[i][w] = dp[i-1][w]
return dp[n][W]
print(knapsack(W, weights, values, N))
# Naive
"""
def knapsack(W, weights, values, n):
if n == 0 or W == 0: # base
return 0
if (weights[n-1] > W):
return knapsack(W, weights, values, n-1)
else:
return max(
values[n-1] + knapsack(W - weights[n-1], weights, values, n-1),
knapsack(W, weights, values, n-1)
)
"""
``` |
{
"source": "JihoChoi/dynamic-gcn-deprecated-TBU",
"score": 2
} |
#### File: model/Twitter/BiGCN_Twitter-ORG-Self-Attn-0401.py
```python
import sys,os
sys.path.append(os.getcwd())
# from Process.process import *
from Process.process import *
import torch as th
from torch_scatter import scatter_mean
import torch.nn.functional as F
import numpy as np
from tools.earlystopping import EarlyStopping
from torch_geometric.data import DataLoader
from tqdm import tqdm
from Process.rand5fold import *
from tools.evaluate import *
from torch_geometric.nn import GCNConv
import copy
class TDrumorGCN(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(TDrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = scatter_mean(x, data.batch, dim=0)
return x
class BUrumorGCN(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(BUrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.BU_edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = scatter_mean(x, data.batch, dim=0)
return x
class Network(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(Network, self).__init__()
self.TDrumorGCN = TDrumorGCN(in_feats, hid_feats, out_feats)
self.BUrumorGCN = BUrumorGCN(in_feats, hid_feats, out_feats)
# Attention Module (Additive Attention)
"""
self.W_s1 = th.nn.Linear(out_feats * 2 * 2, 64)
self.W_s2 = th.nn.Linear(64, 1)
"""
self.fc = th.nn.Linear((out_feats+hid_feats)*2, 4)
# self.fc = th.nn.Linear((out_feats+hid_feats) * 2 * 5, 4)
"""
def attention_module(self, x0, x1, x2, x3, x4): # (Additive Attention)
attn_w_1 = self.W_s2(F.relu(self.W_s1(x0)))
attn_w_2 = self.W_s2(F.relu(self.W_s1(x1)))
attn_w_3 = self.W_s2(F.relu(self.W_s1(x2)))
attn_w_4 = self.W_s2(F.relu(self.W_s1(x3)))
attn_w_5 = self.W_s2(F.relu(self.W_s1(x4)))
attn_weights = th.cat((attn_w_1, attn_w_2, attn_w_3, attn_w_4, attn_w_5), 1) # B x 5
attn_weights = F.softmax(attn_weights, dim=1) # TODO: confirmed
x0 = th.bmm(x0.unsqueeze(2), attn_weights[:, 0].unsqueeze(1).unsqueeze(2))
x1 = th.bmm(x1.unsqueeze(2), attn_weights[:, 1].unsqueeze(1).unsqueeze(2))
x2 = th.bmm(x2.unsqueeze(2), attn_weights[:, 2].unsqueeze(1).unsqueeze(2))
x3 = th.bmm(x3.unsqueeze(2), attn_weights[:, 3].unsqueeze(1).unsqueeze(2))
x4 = th.bmm(x4.unsqueeze(2), attn_weights[:, 4].unsqueeze(1).unsqueeze(2))
return x0, x1, x2, x3, x4
"""
def self_attention_module(self, x0, x1, x2, x3, x4):
# Attention(Q, K, V) = softmax((QK^T)/sqrt(d_k)V)
# Query
# Key
# Value
x0 = th.bmm(x0.unsqueeze(2), attn_weights[:, 0].unsqueeze(1).unsqueeze(2))
x1 = th.bmm(x1.unsqueeze(2), attn_weights[:, 1].unsqueeze(1).unsqueeze(2))
x2 = th.bmm(x2.unsqueeze(2), attn_weights[:, 2].unsqueeze(1).unsqueeze(2))
x3 = th.bmm(x3.unsqueeze(2), attn_weights[:, 3].unsqueeze(1).unsqueeze(2))
x4 = th.bmm(x4.unsqueeze(2), attn_weights[:, 4].unsqueeze(1).unsqueeze(2))
return x0, x1, x2, x3, x4
def forward(self, s0, s1, s2, s3, s4):
# TD_x = self.TDrumorGCN(data)
# BU_x = self.BUrumorGCN(data)
# TODO:
# 1) share gcn weights for the snapshots (current)
# 2) separate gcn weights for the snapshots
# 3) temporal + sequence
TD_x0 = self.TDrumorGCN(s0)
BU_x0 = self.BUrumorGCN(s0)
TD_x1 = self.TDrumorGCN(s1)
BU_x1 = self.BUrumorGCN(s1)
TD_x2 = self.TDrumorGCN(s2)
BU_x2 = self.BUrumorGCN(s2)
TD_x3 = self.TDrumorGCN(s3)
BU_x3 = self.BUrumorGCN(s3)
TD_x4 = self.TDrumorGCN(s4)
BU_x4 = self.BUrumorGCN(s4)
x0 = th.cat((TD_x0, BU_x0), 1)
x1 = th.cat((TD_x1, BU_x1), 1)
x2 = th.cat((TD_x2, BU_x2), 1)
x3 = th.cat((TD_x3, BU_x3), 1)
x4 = th.cat((TD_x4, BU_x4), 1)
# x = th.cat((BU_x,TD_x), 1)
# x0, x1, x2, x3, x4 = self.attention_module(x0, x1, x2, x3, x4)
x0, x1, x2, x3, x4 = self.self_attention_module(x0, x1, x2, x3, x4)
# print("x0", x0.shape, x0[0][0])
# print("x1", x1.shape, x1[0][0])
# print("x2", x2.shape, x2[0][0])
# print("x3", x3.shape, x3[0][0])
# print("x4", x4.shape, x4[0][0])
x = th.stack([x0, x1, x2, x3, x4], 1).squeeze(2) # MobaXterm - 16.
# print("x", x.shape, x[0])
x = x.sum(dim=1).squeeze(2)
# print("x", x.shape, x[0][0])
x = self.fc(x)
x = F.log_softmax(x, dim=1)
return x
def train_GCN(treeDic, x_test, x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, dataname, iter):
model = Network(5000, 64, 64).to(device)
# BU_params = list(map(id, model.BUrumorGCN.conv1.parameters()))
# BU_params += list(map(id, model.BUrumorGCN.conv2.parameters()))
# print(filter(lambda p: id(p) not in BU_params, model.parameters()))
# BU_params += list(map(id, model.BUrumorGCN.conv2.parameters()))
# base_params = filter(lambda p: id(p) not in BU_params, model.parameters())
# optimizer = th.optim.Adam([
# {'params': base_params},
# {'params': model.BUrumorGCN.conv1.parameters(), 'lr': lr/5},
# {'params': model.BUrumorGCN.conv2.parameters(), 'lr': lr/5}
# ], lr=lr, weight_decay=weight_decay)
optimizer = th.optim.Adam([
{'params': model.parameters()},
], lr=lr, weight_decay=weight_decay)
model.train()
train_losses = []
val_losses = []
train_accs = []
val_accs = []
early_stopping = EarlyStopping(patience=patience, verbose=True)
for epoch in range(n_epochs):
# traindata_list, testdata_list = loadBiData(dataname, treeDic, x_train, x_test, TDdroprate,BUdroprate)
traindata_list, testdata_list = loadSnapshotData(
dataname, treeDic, x_train, x_test, TDdroprate, BUdroprate)
train_loader = DataLoader(
traindata_list, batch_size=batchsize, shuffle=True, num_workers=5)
test_loader = DataLoader(
testdata_list, batch_size=batchsize, shuffle=True, num_workers=5)
avg_loss = []
avg_acc = []
batch_idx = 0
# tqdm_train_loader = tqdm(train_loader) # JIHO
tqdm_train_loader = train_loader
for Batch_data in tqdm_train_loader:
# Batch_data.to(device)
# out_labels= model(Batch_data)
s0 = Batch_data[0].to(device)
s1 = Batch_data[1].to(device)
s2 = Batch_data[2].to(device)
s3 = Batch_data[3].to(device)
s4 = Batch_data[4].to(device)
out_labels = model(s0, s1, s2, s3, s4)
# finalloss = F.nll_loss(out_labels, Batch_data.y)
finalloss = F.nll_loss(out_labels, Batch_data[0].y)
loss = finalloss
optimizer.zero_grad()
loss.backward()
avg_loss.append(loss.item())
optimizer.step()
_, pred = out_labels.max(dim=-1)
# correct = pred.eq(Batch_data.y).sum().item()
# train_acc = correct / len(Batch_data.y)
correct = pred.eq(Batch_data[0].y).sum().item()
train_acc = correct / len(Batch_data[0].y)
avg_acc.append(train_acc)
print("Iter {:03d} | Epoch {:05d} | Batch{:02d} | Train_Loss {:.4f}| Train_Accuracy {:.4f}".format(
iter, epoch, batch_idx, loss.item(), train_acc))
batch_idx = batch_idx + 1
train_losses.append(np.mean(avg_loss))
train_accs.append(np.mean(avg_acc))
temp_val_losses = []
temp_val_accs = []
temp_val_Acc_all, temp_val_Acc1, temp_val_Prec1, temp_val_Recll1, temp_val_F1, \
temp_val_Acc2, temp_val_Prec2, temp_val_Recll2, temp_val_F2, \
temp_val_Acc3, temp_val_Prec3, temp_val_Recll3, temp_val_F3, \
temp_val_Acc4, temp_val_Prec4, temp_val_Recll4, temp_val_F4 = [], [
], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
model.eval()
# tqdm_test_loader = tqdm(test_loader) # JIHO
tqdm_test_loader = test_loader
for Batch_data in tqdm_test_loader:
# Batch_data.to(device)
# val_out = model(Batch_data)
s0 = Batch_data[0].to(device)
s1 = Batch_data[1].to(device)
s2 = Batch_data[2].to(device)
s3 = Batch_data[3].to(device)
s4 = Batch_data[4].to(device)
val_out = model(s0, s1, s2, s3, s4)
val_loss = F.nll_loss(val_out, Batch_data[0].y)
temp_val_losses.append(val_loss.item())
_, val_pred = val_out.max(dim=1)
correct = val_pred.eq(Batch_data[0].y).sum().item()
val_acc = correct / len(Batch_data[0].y)
Acc_all, Acc1, Prec1, Recll1, F1, Acc2, Prec2, Recll2, F2, Acc3, Prec3, Recll3, F3, Acc4, Prec4, Recll4, F4 = evaluation4class(
val_pred, Batch_data[0].y)
temp_val_Acc_all.append(Acc_all), temp_val_Acc1.append(Acc1), temp_val_Prec1.append(
Prec1), temp_val_Recll1.append(Recll1), temp_val_F1.append(F1), \
temp_val_Acc2.append(Acc2), temp_val_Prec2.append(Prec2), temp_val_Recll2.append(
Recll2), temp_val_F2.append(F2), \
temp_val_Acc3.append(Acc3), temp_val_Prec3.append(Prec3), temp_val_Recll3.append(
Recll3), temp_val_F3.append(F3), \
temp_val_Acc4.append(Acc4), temp_val_Prec4.append(Prec4), temp_val_Recll4.append(
Recll4), temp_val_F4.append(F4)
temp_val_accs.append(val_acc)
val_losses.append(np.mean(temp_val_losses))
val_accs.append(np.mean(temp_val_accs))
print("Epoch {:05d} | Val_Loss {:.4f}| Val_Accuracy {:.4f}".format(epoch, np.mean(temp_val_losses),
np.mean(temp_val_accs)))
res = ['acc:{:.4f}'.format(np.mean(temp_val_Acc_all)),
'C1:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc1), np.mean(temp_val_Prec1),
np.mean(temp_val_Recll1), np.mean(temp_val_F1)),
'C2:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc2), np.mean(temp_val_Prec2),
np.mean(temp_val_Recll2), np.mean(temp_val_F2)),
'C3:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc3), np.mean(temp_val_Prec3),
np.mean(temp_val_Recll3), np.mean(temp_val_F3)),
'C4:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc4), np.mean(temp_val_Prec4),
np.mean(temp_val_Recll4), np.mean(temp_val_F4))]
print('results:', res)
early_stopping(np.mean(temp_val_losses), np.mean(temp_val_accs), np.mean(temp_val_F1), np.mean(temp_val_F2),
np.mean(temp_val_F3), np.mean(temp_val_F4), model, 'BiGCN', dataname)
accs = np.mean(temp_val_accs)
F1 = np.mean(temp_val_F1)
F2 = np.mean(temp_val_F2)
F3 = np.mean(temp_val_F3)
F4 = np.mean(temp_val_F4)
if early_stopping.early_stop:
print("Early stopping")
accs = early_stopping.accs
F1 = early_stopping.F1
F2 = early_stopping.F2
F3 = early_stopping.F3
F4 = early_stopping.F4
break
return train_losses, val_losses, train_accs, val_accs, accs, F1, F2, F3, F4
# =========================
# MAIN
# =========================
lr = 0.0005
weight_decay = 1e-4
patience = 10
# n_epochs=200 # JIHO
n_epochs = 100
batchsize = 128
TDdroprate = 0.2
BUdroprate = 0.2
datasetname = sys.argv[1] # "Twitter15"ใ"Twitter16"
iterations = int(sys.argv[2])
model = "GCN"
device = th.device('cuda:2' if th.cuda.is_available() else 'cpu')
test_accs = []
NR_F1 = []
FR_F1 = []
TR_F1 = []
UR_F1 = []
for iter in range(iterations):
fold0_x_test, fold0_x_train, fold1_x_test, fold1_x_train, fold2_x_test, fold2_x_train, fold3_x_test, fold3_x_train, fold4_x_test, fold4_x_train = load5foldData(
datasetname)
treeDic = loadTree(datasetname)
train_losses, val_losses, train_accs, val_accs0, accs0, F1_0, F2_0, F3_0, F4_0 = train_GCN(
treeDic, fold0_x_test, fold0_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
train_losses, val_losses, train_accs, val_accs1, accs1, F1_1, F2_1, F3_1, F4_1 = train_GCN(
treeDic, fold1_x_test, fold1_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
train_losses, val_losses, train_accs, val_accs2, accs2, F1_2, F2_2, F3_2, F4_2 = train_GCN(
treeDic, fold2_x_test, fold2_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
train_losses, val_losses, train_accs, val_accs3, accs3, F1_3, F2_3, F3_3, F4_3 = train_GCN(
treeDic, fold3_x_test, fold3_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
train_losses, val_losses, train_accs, val_accs4, accs4, F1_4, F2_4, F3_4, F4_4 = train_GCN(
treeDic, fold4_x_test, fold4_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
test_accs.append((accs0+accs1+accs2+accs3+accs4)/5)
NR_F1.append((F1_0+F1_1+F1_2+F1_3+F1_4)/5)
FR_F1.append((F2_0 + F2_1 + F2_2 + F2_3 + F2_4) / 5)
TR_F1.append((F3_0 + F3_1 + F3_2 + F3_3 + F3_4) / 5)
UR_F1.append((F4_0 + F4_1 + F4_2 + F4_3 + F4_4) / 5)
print("Total_Test_Accuracy: {:.4f}|NR F1: {:.4f}|FR F1: {:.4f}|TR F1: {:.4f}|UR F1: {:.4f}".format(
sum(test_accs) / iterations, sum(NR_F1) / iterations, sum(FR_F1) / iterations, sum(TR_F1) / iterations, sum(UR_F1) / iterations))
```
#### File: model/Weibo/BiGCN_Weibo.py
```python
import sys,os
sys.path.append(os.getcwd())
from Process.process import *
import torch as th
from torch_scatter import scatter_mean
import torch.nn.functional as F
import numpy as np
from tools.earlystopping2class import EarlyStopping
from torch_geometric.data import DataLoader
from tqdm import tqdm
from Process.rand5fold import *
from tools.evaluate import *
from torch_geometric.nn import GCNConv
import copy
class TDrumorGCN(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(TDrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x1=copy.copy(x.float())
x = self.conv1(x, edge_index)
x2=copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x=F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x= scatter_mean(x, data.batch, dim=0)
return x
class BUrumorGCN(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(BUrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.BU_edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x= scatter_mean(x, data.batch, dim=0)
return x
class Net(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(Net, self).__init__()
self.TDrumorGCN = TDrumorGCN(in_feats, hid_feats, out_feats)
self.BUrumorGCN = BUrumorGCN(in_feats, hid_feats, out_feats)
self.fc=th.nn.Linear((out_feats+hid_feats)*2,2)
def forward(self, data):
TD_x = self.TDrumorGCN(data)
BU_x = self.BUrumorGCN(data)
x = th.cat((BU_x,TD_x), 1)
x=self.fc(x)
x = F.log_softmax(x, dim=1)
return x
def train_GCN(treeDic, x_test, x_train,TDdroprate,BUdroprate,lr, weight_decay,patience,n_epochs,batchsize,dataname,iter):
model = Net(5000,64,64).to(device)
BU_params=list(map(id,model.BUrumorGCN.conv1.parameters()))
BU_params += list(map(id, model.BUrumorGCN.conv2.parameters()))
base_params=filter(lambda p:id(p) not in BU_params,model.parameters())
optimizer = th.optim.Adam([
{'params':base_params},
{'params':model.BUrumorGCN.conv1.parameters(),'lr':lr/5},
{'params': model.BUrumorGCN.conv2.parameters(), 'lr': lr/5}
], lr=lr, weight_decay=weight_decay)
model.train()
train_losses,val_losses,train_accs,val_accs = [],[],[],[]
early_stopping = EarlyStopping(patience=patience, verbose=True)
for epoch in range(n_epochs):
traindata_list, testdata_list = loadBiData(dataname, treeDic, x_train, x_test, TDdroprate,BUdroprate)
train_loader = DataLoader(traindata_list, batch_size=batchsize,
shuffle=False, num_workers=10)
test_loader = DataLoader(testdata_list, batch_size=batchsize,
shuffle=True, num_workers=10)
avg_loss,avg_acc = [],[]
batch_idx = 0
tqdm_train_loader = tqdm(train_loader)
for Batch_data in tqdm_train_loader:
Batch_data.to(device)
out_labels = model(Batch_data)
loss = F.nll_loss(out_labels, Batch_data.y)
optimizer.zero_grad()
loss.backward()
avg_loss.append(loss.item())
optimizer.step()
_, pred = out_labels.max(dim=-1)
correct = pred.eq(Batch_data.y).sum().item()
train_acc = correct / len(Batch_data.y)
avg_acc.append(train_acc)
postfix = "Iter {:03d} | Epoch {:05d} | Batch{:02d} | Train_Loss {:.4f}| Train_Accuracy {:.4f}".format(iter,
epoch,
batch_idx,
loss.item(),
train_acc)
tqdm_train_loader.set_postfix_str(postfix)
batch_idx = batch_idx + 1
train_losses.append(np.mean(avg_loss))
train_accs.append(np.mean(avg_acc))
temp_val_losses,temp_val_accs,temp_val_Acc_all, temp_val_Acc1, temp_val_Prec1, temp_val_Recll1, temp_val_F1, \
temp_val_Acc2, temp_val_Prec2, temp_val_Recll2, temp_val_F2 = [],[],[], [], [], [], [], [], [], [], []
model.eval()
tqdm_test_loader = tqdm(test_loader)
for Batch_data in tqdm_test_loader:
Batch_data.to(device)
val_out = model(Batch_data)
val_loss = F.nll_loss(val_out, Batch_data.y)
temp_val_losses.append(val_loss.item())
_, val_pred = val_out.max(dim=1)
correct = val_pred.eq(Batch_data.y).sum().item()
val_acc = correct / len(Batch_data.y)
Acc_all, Acc1, Prec1, Recll1, F1, Acc2, Prec2, Recll2, F2 = evaluationclass(
val_pred, Batch_data.y)
temp_val_Acc_all.append(Acc_all), temp_val_Acc1.append(Acc1), temp_val_Prec1.append(
Prec1), temp_val_Recll1.append(Recll1), temp_val_F1.append(F1), \
temp_val_Acc2.append(Acc2), temp_val_Prec2.append(Prec2), temp_val_Recll2.append(
Recll2), temp_val_F2.append(F2)
temp_val_accs.append(val_acc)
val_losses.append(np.mean(temp_val_losses))
val_accs.append(np.mean(temp_val_accs))
print("Epoch {:05d} | Val_Loss {:.4f}| Val_Accuracy {:.4f}".format(epoch, np.mean(temp_val_losses),
np.mean(temp_val_accs)))
res = ['acc:{:.4f}'.format(np.mean(temp_val_Acc_all)),
'C1:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc1), np.mean(temp_val_Prec1),
np.mean(temp_val_Recll1), np.mean(temp_val_F1)),
'C2:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc2), np.mean(temp_val_Prec2),
np.mean(temp_val_Recll2), np.mean(temp_val_F2))]
print('results:', res)
early_stopping(np.mean(temp_val_losses), np.mean(temp_val_Acc_all), np.mean(temp_val_Acc1),
np.mean(temp_val_Acc2), np.mean(temp_val_Prec1),
np.mean(temp_val_Prec2), np.mean(temp_val_Recll1), np.mean(temp_val_Recll2),
np.mean(temp_val_F1),
np.mean(temp_val_F2), model, 'BiGCN', "weibo")
accs = np.mean(temp_val_Acc_all)
acc1 = np.mean(temp_val_Acc1)
acc2 = np.mean(temp_val_Acc2)
pre1 = np.mean(temp_val_Prec1)
pre2 = np.mean(temp_val_Prec2)
rec1 = np.mean(temp_val_Recll1)
rec2 = np.mean(temp_val_Recll2)
F1 = np.mean(temp_val_F1)
F2 = np.mean(temp_val_F2)
if early_stopping.early_stop:
print("Early stopping")
accs = early_stopping.accs
acc1 = early_stopping.acc1
acc2 = early_stopping.acc2
pre1 = early_stopping.pre1
pre2 = early_stopping.pre2
rec1 = early_stopping.rec1
rec2 = early_stopping.rec2
F1 = early_stopping.F1
F2 = early_stopping.F2
break
return train_losses, val_losses, train_accs, val_accs, accs, acc1, pre1, rec1, F1, acc2, pre2, rec2, F2
lr=0.0005
weight_decay=1e-4
patience=10
n_epochs=200
batchsize=16
tddroprate=0
budroprate=0
datasetname="Weibo"
iterations=int(sys.argv[1])
model="BiGCN"
device = th.device('cuda:1' if th.cuda.is_available() else 'cpu')
test_accs,ACC1,ACC2,PRE1,PRE2,REC1,REC2,F1,F2 = [],[],[],[],[],[],[],[],[]
for iter in range(iterations):
fold0_x_test, fold0_x_train,\
fold1_x_test, fold1_x_train, \
fold2_x_test, fold2_x_train, \
fold3_x_test, fold3_x_train, \
fold4_x_test, fold4_x_train = load5foldData(datasetname)
treeDic=loadTree(datasetname)
train_losses, val_losses, train_accs, val_accs, accs_0, acc1_0, pre1_0, rec1_0, F1_0, acc2_0, pre2_0, rec2_0, F2_0 = train_GCN(treeDic,
fold0_x_test,
fold0_x_train,
tddroprate,budroprate,
lr, weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs,accs_1, acc1_1, pre1_1, rec1_1, F1_1, acc2_1, pre2_1, rec2_1, F2_1 = train_GCN(treeDic,
fold1_x_test,
fold1_x_train,
tddroprate,budroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs, accs_2, acc1_2, pre1_2, rec1_2, F1_2, acc2_2, pre2_2, rec2_2, F2_2 = train_GCN(treeDic,
fold2_x_test,
fold2_x_train,
tddroprate,budroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs, accs_3, acc1_3, pre1_3, rec1_3, F1_3, acc2_3, pre2_3, rec2_3, F2_3 = train_GCN(treeDic,
fold3_x_test,
fold3_x_train,
tddroprate,budroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs, accs_4, acc1_4, pre1_4, rec1_4, F1_4, acc2_4, pre2_4, rec2_4, F2_4 = train_GCN(treeDic,
fold4_x_test,
fold4_x_train,
tddroprate,budroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
test_accs.append((accs_0+accs_1+accs_2+accs_3+accs_4)/5)
ACC1.append((acc1_0 + acc1_1 + acc1_2 + acc1_3 + acc1_4) / 5)
ACC2.append((acc2_0 + acc2_1 +acc2_2 + acc2_3 +acc2_4) / 5)
PRE1.append((pre1_0 + pre1_1 + pre1_2 + pre1_3 + pre1_4) / 5)
PRE2.append((pre2_0 + pre2_1 + pre2_2 + pre2_3 + pre2_4) / 5)
REC1.append((rec1_0 + rec1_1 + rec1_2 + rec1_3 + rec1_4) / 5)
REC2.append((rec2_0 + rec2_1 + rec2_2 + rec2_3 + rec2_4) / 5)
F1.append((F1_0+F1_1+F1_2+F1_3+F1_4)/5)
F2.append((F2_0 + F2_1 + F2_2 + F2_3 + F2_4) / 5)
print("weibo:|Total_Test_ Accuracy: {:.4f}|acc1: {:.4f}|acc2: {:.4f}|pre1: {:.4f}|pre2: {:.4f}"
"|rec1: {:.4f}|rec2: {:.4f}|F1: {:.4f}|F2: {:.4f}".format(sum(test_accs) / iterations, sum(ACC1) / iterations,
sum(ACC2) / iterations, sum(PRE1) / iterations, sum(PRE2) /iterations,
sum(REC1) / iterations, sum(REC2) / iterations, sum(F1) / iterations, sum(F2) / iterations))
```
#### File: bigcn/Process-10-snapshots/getWeibograph.py
```python
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
import os
cwd=os.getcwd()
class Node_tweet(object):
def __init__(self, idx=None):
self.children = []
self.idx = idx
self.word = []
self.index = []
self.parent = None
def str2matrix(Str): # str = index:wordfreq index:wordfreq
wordFreq, wordIndex = [], []
for pair in Str.split(' '):
freq=float(pair.split(':')[1])
index=int(pair.split(':')[0])
if index<=5000:
wordFreq.append(freq)
wordIndex.append(index-1)
return wordFreq, wordIndex
def constructMat(tree):
index2node = {}
for i in tree:
node = Node_tweet(idx=i)
index2node[i] = node
for j in tree:
indexC = j
indexP = tree[j]['parent']
nodeC = index2node[indexC]
wordFreq, wordIndex = str2matrix(tree[j]['vec'])
nodeC.index = wordIndex
nodeC.word = wordFreq
## not root node ##
if not indexP == 'None':
nodeP = index2node[int(indexP)]
nodeC.parent = nodeP
nodeP.children.append(nodeC)
## root node ##
else:
root = nodeC
rootindex=indexC-1
root_index=nodeC.index
root_word=nodeC.word
rootfeat = np.zeros([1, 5000])
if len(root_index)>0:
rootfeat[0, np.array(root_index)] = np.array(root_word)
## 3. convert tree to matrix and edgematrix
matrix=np.zeros([len(index2node),len(index2node)])
raw=[]
col=[]
x_word=[]
x_index=[]
edgematrix=[]
for index_i in range(len(index2node)):
for index_j in range(len(index2node)):
if index2node[index_i+1].children != None and index2node[index_j+1] in index2node[index_i+1].children:
matrix[index_i][index_j]=1
raw.append(index_i)
col.append(index_j)
x_word.append(index2node[index_i+1].word)
x_index.append(index2node[index_i+1].index)
edgematrix.append(raw)
edgematrix.append(col)
return x_word, x_index, edgematrix,rootfeat,rootindex
def getfeature(x_word,x_index):
x = np.zeros([len(x_index), 5000])
for i in range(len(x_index)):
if len(x_index[i])>0:
x[i, np.array(x_index[i])] = np.array(x_word[i])
return x
def main():
treePath = os.path.join(cwd, 'data/Weibo/weibotree.txt')
print("reading Weibo tree")
treeDic = {}
for line in open(treePath):
line = line.rstrip()
eid, indexP, indexC,Vec = line.split('\t')[0], line.split('\t')[1], int(line.split('\t')[2]), line.split('\t')[3]
if not treeDic.__contains__(eid):
treeDic[eid] = {}
treeDic[eid][indexC] = {'parent': indexP, 'vec': Vec}
print('tree no:', len(treeDic))
labelPath = os.path.join(cwd, "data/Weibo/weibo_id_label.txt")
print("loading weibo label:")
event,y= [],[]
l1 = l2 = 0
labelDic = {}
for line in open(labelPath):
line = line.rstrip()
eid,label = line.split(' ')[0], line.split(' ')[1]
labelDic[eid] = int(label)
y.append(labelDic[eid])
event.append(eid)
if labelDic[eid]==0:
l1 += 1
if labelDic[eid]==1:
l2 += 1
print(len(labelDic),len(event),len(y))
print(l1, l2)
def loadEid(event,id,y):
if event is None:
return None
if len(event) < 2:
return None
if len(event)>1:
x_word, x_index, tree, rootfeat, rootindex = constructMat(event)
x_x = getfeature(x_word, x_index)
rootfeat, tree, x_x, rootindex, y = np.array(rootfeat), np.array(tree), np.array(x_x), np.array(
rootindex), np.array(y)
np.savez(os.path.join(cwd,'data/Weibograph/'+id+'.npz'), x=x_x,root=rootfeat,edgeindex=tree,rootindex=rootindex,y=y)
return None
x_word, x_index, tree, rootfeat, rootindex = constructMat(event)
x_x = getfeature(x_word, x_index)
return rootfeat, tree, x_x, [rootindex]
print("loading dataset", )
results = Parallel(n_jobs=30, backend='threading')(delayed(loadEid)(treeDic[eid] if eid in treeDic else None,eid,labelDic[eid]) for eid in tqdm(event))
return
if __name__ == '__main__':
main()
```
#### File: dynamic-gcn-deprecated-TBU/dynamic-gcn/model.py
```python
import sys
import os
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch_scatter import scatter_mean
from torch_scatter import scatter_max
from torch_geometric.data import DataLoader
from torch_geometric.nn import GCNConv
# from torch_geometric.nn import SAGEConv
# from torch_geometric.nn import GINConv
from utils import save_json_file # Attetnion Weights
# References
# RvNN - https://github.com/majingCUHK/Rumor_RvNN
# BiGCN - https://github.com/TianBian95/BiGCN/
# Self-Attention - https://github.com/CyberZHG/torch-multi-head-attention/blob/master/torch_multi_head_attention/multi_head_attention.py
class TDRumorGCN(nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(TDRumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats + in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
root_index = data.root_index # skip connection (residual connection)
root_extend = torch.zeros(len(data.batch), x1.size(1))
root_extend = root_extend.to(Network.device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (torch.eq(data.batch, num_batch))
root_extend[index] = x1[root_index[num_batch]]
x = torch.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = torch.zeros(
len(data.batch), x2.size(1)).to(Network.device)
for num_batch in range(batch_size):
index = (torch.eq(data.batch, num_batch))
root_extend[index] = x2[root_index[num_batch]]
x = torch.cat((x, root_extend), 1)
# READOUT LAYER: mean, max pooling (nodes -> graph)
"""
x_mean = scatter_mean(x, data.batch, dim=0) # B x 64
x_max = scatter_max(x, data.batch, dim=0)[0] # B x 64
x = torch.cat((x_mean, x_max), 1) # CONCAT(mean, max)
return x # B x 128
"""
x_mean = scatter_mean(x, data.batch, dim=0) # B x 64
return x_mean
class BURumorGCN(nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(BURumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats + in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.BU_edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
root_index = data.root_index
root_extend = torch.zeros(len(data.batch), x1.size(1))
root_extend = root_extend.to(Network.device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (torch.eq(data.batch, num_batch))
root_extend[index] = x1[root_index[num_batch]]
x = torch.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = torch.zeros(
len(data.batch), x2.size(1)).to(Network.device)
for num_batch in range(batch_size):
index = (torch.eq(data.batch, num_batch))
root_extend[index] = x2[root_index[num_batch]]
x = torch.cat((x, root_extend), 1)
# READOUT LAYER: mean, max pooling (nodes -> graph)
"""
x_mean = scatter_mean(x, data.batch, dim=0) # B x 64
x_max = scatter_max(x, data.batch, dim=0)[0] # B x 64
x = torch.cat((x_mean, x_max), 1) # CONCAT(mean, max)
return x # B x 128
"""
x_mean = scatter_mean(x, data.batch, dim=0) # B x 64
return x_mean
class BiGCN(nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(BiGCN, self).__init__()
self.TDRumorGCN = TDRumorGCN(in_feats, hid_feats, out_feats)
self.BURumorGCN = BURumorGCN(in_feats, hid_feats, out_feats)
def forward(self, data):
TD_x = self.TDRumorGCN(data)
BU_x = self.BURumorGCN(data)
x = torch.cat((TD_x, BU_x), 1)
return x
class Network(nn.Module):
# def __init__(self, in_feats, hid_feats, out_feats, snapshot_num, device):
def __init__(self, in_feats, hid_feats, out_feats, settings):
super(Network, self).__init__()
Network.snapshot_num = settings['snapshot_num']
Network.device = settings['cuda']
Network.learning_sequence = settings['learning_sequence']
self.rumor_GCN_0 = BiGCN(in_feats, hid_feats, out_feats)
self.W_s1 = nn.Linear(out_feats * 2 * 4, 1) # additive attention
self.fc = nn.Linear((out_feats + hid_feats) * 2, 4)
# self.fc = nn.Linear((out_feats + hid_feats) * 2 * 2, 4)
# self.fc = nn.Linear((out_feats + hid_feats) * 2 * 4, 4)
self.init_weights()
def init_weights(self): # Xavier Init
init.xavier_normal_(self.rumor_GCN_0.TDRumorGCN.conv1.weight)
init.xavier_normal_(self.rumor_GCN_0.TDRumorGCN.conv2.weight)
init.xavier_normal_(self.rumor_GCN_0.BURumorGCN.conv1.weight)
init.xavier_normal_(self.rumor_GCN_0.BURumorGCN.conv2.weight)
init.xavier_normal_(self.W_s1.weight)
init.xavier_normal_(self.fc.weight)
def append_results(self, string): # TODO:
with open("./attention.txt", 'a') as out_file:
out_file.write(str(string) + '\n')
def additive_attention(self, x): # TODO:
x_stack = torch.stack(x, 1) # B * S * 256
x_context = x_stack.mean(dim=1) # B * S
attn_w = []
for current_x in x: # TODO: BATCH PARALLEL
attn_w.append(self.W_s1(torch.cat((current_x, x_context), 1)))
attn_weights = torch.cat((attn_w), 1) # B * S
attn_weights = F.softmax(attn_weights, dim=1) # B * S
updated_x = []
# print(attn_weights) # attention # TODO:
for index, current_x in enumerate(x):
weighted_x = torch.bmm(
current_x.unsqueeze(2), # B * 256 * 1
attn_weights[:, index].unsqueeze(1).unsqueeze(2) # B * 1 * 1
)
updated_x.append(weighted_x.squeeze(2))
updated_x = torch.stack(updated_x, 1)
return updated_x
def dot_product_attention(self, query, key, value, mask=None): # self-attention
dk = query.size()[-1] # 256
scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk)
# if mask is not None:
# scores = scores.masked_fill(mask == 0, -1e9)
attention = F.softmax(scores, dim=-1)
# self.append_results(attention.data) # batch average # TODO:
return attention.matmul(value)
def attention_module(self, x): # TODO: REFACTORING
# x: Batch x Seq x Embedding - E.g.: (20, 5, 256)
if Network.learning_sequence == "mean":
x_stack = torch.stack(x, 1) # B x S x D - E.g.: (20, 5, 256)
x = x_stack.mean(dim=1)
elif Network.learning_sequence == "mean_max":
x_stack = torch.stack(x, 1) # B x S x D - E.g.: (20, 5, 256)
x_mean = x_stack.mean(dim=1)
x_max = torch.max(x_stack, dim=1)[0]
x = torch.cat((x_mean, x_max), 1) # CONCAT(mean, max)
elif Network.learning_sequence == "additive":
x_stack = self.additive_attention(x)
x = x_stack.mean(dim=1)
elif Network.learning_sequence == "dot_product":
x_stack = torch.stack(x, 1) # B x S x D - E.g.: (20, 5, 256)
x_stack = self.dot_product_attention(x_stack, x_stack, x_stack)
x = x_stack.mean(dim=1)
elif Network.learning_sequence == "LSTM":
pass
elif Network.learning_sequence == "GRU":
pass
else:
pass
return x
def forward(self, snapshots):
# 2) GCN LAYERS + 3) READOUT LAYER
x = []
for s in snapshots:
x.append(self.rumor_GCN_0(s))
# 4) ATTENTION LAYER
x = self.attention_module(x)
x = self.fc(x)
x = F.log_softmax(x, dim=1)
return x
```
#### File: dynamic-gcn-deprecated-TBU/dynamic-gcn/utils.py
```python
import os
import json
"""
Author: <NAME> (<EMAIL>)
Usage:
- sys.path.insert(0, './[parent-directory]/')
"""
def ensure_directory(path):
path = os.path.split(path)
if not os.path.exists(path[0]):
os.makedirs(path[0])
def save_json_file(path, data):
ensure_directory(path)
with open(path, "w") as json_file:
json_file.write(json.dumps(data))
def append_json_file(path, data):
ensure_directory(path)
with open(path, 'a') as json_file:
json_file.write(json.dumps(data))
def load_json_file(path):
with open(path, "r") as json_file:
data = json.loads(json_file.read())
return data
def print_dict(dict_file):
for key in dict_file.keys():
print("\t {0}: {1}".format(key, dict_file[key]))
print()
``` |
{
"source": "JihoChoi/rumor-diffusion-network-analysis",
"score": 3
} |
#### File: scripts/classification/aggregation.py
```python
import sys
sys.path.append('..')
from utils import *
def aggregate_feature_sets():
# ------------------------
# Load latest feature sets
# ------------------------
latest_feature_set_timestamp = {'structural': time.strftime('0'),
'temporal': time.strftime('0'),
'social': time.strftime('0'),
'struct_temp': time.strftime('0')}
for index, file in enumerate(os.listdir(OUT_PATH)):
if file.endswith(".csv"):
file_name = os.path.splitext(file)[0].split('_')
if file.startswith("structural_analysis_"):
timestamp = file_name[2] + "_" + file_name[3]
if timestamp > latest_feature_set_timestamp['structural']:
latest_feature_set_timestamp['structural'] = timestamp
elif file.startswith("temporal_analysis_"):
timestamp = file_name[2] + "_" + file_name[3]
if timestamp > latest_feature_set_timestamp['temporal']:
latest_feature_set_timestamp['temporal'] = timestamp
elif file.startswith("social_analysis_"):
timestamp = file_name[2] + "_" + file_name[3]
if timestamp > latest_feature_set_timestamp['social']:
latest_feature_set_timestamp['social'] = timestamp
elif file.startswith("struct-temp_analysis_"):
timestamp = file_name[2] + "_" + file_name[3]
if timestamp > latest_feature_set_timestamp['struct_temp']:
latest_feature_set_timestamp['struct_temp'] = timestamp
print(latest_feature_set_timestamp)
# -----------------
# Load Feature Sets
# -----------------
# TODO Error Handling
structural_features_path = OUT_PATH + 'structural_analysis_' + latest_feature_set_timestamp['structural'] + ".csv"
structural_pd = pd.read_csv(structural_features_path)
temporal_features_path = OUT_PATH + 'temporal_analysis_' + latest_feature_set_timestamp['temporal'] + ".csv"
temporal_pd = pd.read_csv(temporal_features_path)
social_features_path = OUT_PATH + 'social_analysis_' + latest_feature_set_timestamp['social'] + ".csv"
social_pd = pd.read_csv(social_features_path)
struct_temp_features_path = OUT_PATH + 'struct-temp_analysis_' + latest_feature_set_timestamp['struct_temp'] + ".csv"
struct_temp_pd = pd.read_csv(struct_temp_features_path)
# struct-temp_features_path = OUT_PATH + "struct-temp_analysis_20190605_143358.csv"
# struct-temp_pd = pd.read_csv(struct-temp_features_path)
# ------------------
# Aggregate Features
# ------------------
combined_pd = pd.merge(structural_pd, temporal_pd, on=['tweet_id', 'label'])
combined_pd = pd.merge(combined_pd, social_pd, on=['tweet_id', 'label'])
combined_pd = pd.merge(combined_pd, struct_temp_pd, on=['tweet_id', 'label'])
print(combined_pd.shape)
# comb_pd = pd.merge(combined_pd, struct-temp_pd, on=['tweet_id', 'label'])
# print(comb_pd.shape)
#
out_file = OUT_PATH + 'comb_dataset_' + time.strftime("%Y%m%d_%H%M%S") + ".csv"
combined_pd.to_csv(out_file, sep=',', index=False)
def main():
aggregate_feature_sets()
print("=============================")
print(" Feature Aggregation ")
print("=============================")
if __name__ == '__main__':
start_time = time.time() # Timer Start
main()
print("Elapsed Time: {0} seconds".format(round(time.time() - start_time, 3))) # Execution time
```
#### File: scripts/feature-extraction/structural_feature_extraction.py
```python
import sys
sys.path.append('..')
from utils import *
class Cascade:
# --------------------------
# Initiate Cascade
# --------------------------
def __init__(self, root_tweet_id, cascade_path, label=None):
self.file_id = root_tweet_id # For label.txt
self.root_tweet_id = root_tweet_id # Tweet ID with ROOT Keyword (May updated)
self.root_user_id = 0
self.cascade_path = cascade_path
self.label = label
# ------------
# Load Cascade
# ------------
self.trace_count = None
self.src_users = set()
self.dst_users = set()
self.retweet_users = set()
self.reply_users = set()
self.retweet_count = 0
self.reply_count = 0
self.network = nx.DiGraph()
self.network_features = {}
self.load_cascade()
# -----------------
# Calculate Cascade
# -----------------
self.src_user_count = None
self.dst_user_count = None
self.avg_depth = 0
self.max_depth = 0
def load_cascade(self):
with open(self.cascade_path, 'r') as file:
# ---- -----------------
# Set Root: User, Tweet
# ---------------------
for index, line in enumerate(file):
elem_list = [x.strip() for x in re.split(r"[\'\,\->\[\]]", line.strip()) if x.strip()]
if elem_list[0] == 'ROOT' and elem_list[1] == 'ROOT':
self.root_user_id = elem_list[3]
if index != 0:
print('ROOT TWEET {} by {} @ line # {}'.format(elem_list[4], self.root_user_id, index))
break
if self.root_tweet_id != elem_list[4]: # Assert file_id == root_tweet_id
print('\t file_id:{1} -> root_tweet_id:{2} ({0}) '.format(self.label, self.root_tweet_id, elem_list[4]))
self.root_tweet_id = elem_list[4]
# ------------
# Load Cascade
# ------------
# file.read().count('\n') # line count
for index, line in enumerate(file): # Trace
elem_list = re.split(r"[\'\,\->\[\]]", line.strip())
elem_list = [x.strip() for x in elem_list if x.strip()] # Remove empty elements
# Error data handling
if float(elem_list[2]) >= float(elem_list[5]):
continue
src_user_id, src_tweet_id, src_tweet_time, dst_user_id, dst_tweet_id, dst_tweet_time = elem_list
self.src_users.add(src_user_id)
self.dst_users.add(dst_user_id)
# Different types of Tweets - https://help.twitter.com/en/using-twitter/types-of-tweets
if src_tweet_id == dst_tweet_id:
self.retweet_count += 1
self.retweet_users.add(dst_user_id)
else:
self.reply_count += 1
self.reply_users.add(dst_user_id)
# NetworkX Graph
self.network.add_weighted_edges_from(
[(src_user_id, dst_user_id, float(dst_tweet_time) - float(src_tweet_time))])
# Store computed cascade information
# self.trace_count = index
self.trace_count = self.retweet_count + self.reply_count
# =============================
# Structural Analysis
# =============================
def calc_structural_features(self):
G = self.network
# root_user_id = self.root_user_id
self.src_user_count = len(self.src_users)
self.dst_user_count = len(self.dst_users)
hops = []
max_hop_count = 10
for i in range(max_hop_count):
hops.append(len(nx.single_source_shortest_path_length(G, self.root_user_id, cutoff=i)))
# print(self.retweet_count, self.response_count)
# print("leaf:", nx.dag_to_branching(G))
# print('\t root_to_all_depth_length: ', len(nx.single_source_shortest_path_length(G, self.root_user_id)))
# print('\t user_count:', len(G.nodes())) # root + dst_user_count
print('\t depth: ', nx.dag_longest_path(G)) # weight - temporal feature
print('\t src_user_count: ', self.src_user_count)
print('\t dst_user_count: ', self.dst_user_count)
print('\t root_to_all_depth_sum: ', sum(nx.single_source_shortest_path_length(G, self.root_user_id).values()))
print('\t root_to_all_depth_max: ', max(nx.single_source_shortest_path_length(G, self.root_user_id).values()))
print('\t one_hop_neighbors:', len(list(G.neighbors(self.root_user_id))))
print(hops)
print('\t', "user count by hop(s): ", hops[1] - hops[0], hops[2] - hops[1], hops[3] - hops[2],
hops[4] - hops[3], hops[5] - hops[4], hops[6] - hops[5],
hops[7] - hops[6], hops[8] - hops[7], hops[9] - hops[8])
# df.loc[df['tweet_id'] == root_tweet_id, 'src_user_count'] = len(src_users)
shortest_path_dict = nx.single_source_shortest_path_length(G, self.root_user_id)
self.avg_depth = sum(shortest_path_dict.values()) / len(shortest_path_dict)
self.max_depth = max(shortest_path_dict.values())
for i in range(max_hop_count - 1):
self.network_features[str(i+1) + "_hop_neighbor_count"] = hops[i + 1] - hops[i]
# features to data frame
CascadeAnalyzer.feature_df = CascadeAnalyzer.feature_df.append({
'tweet_id': self.root_tweet_id, 'label': self.label,
'structural_trace_count': self.trace_count,
'structural_retweet_count': self.retweet_count,
'structural_reply_count': self.reply_count,
'structural_reply_retweet_ratio': self.reply_count / self.retweet_count,
'structural_src_user_count': self.src_user_count,
'structural_dst_user_count': self.dst_user_count,
'structural_dst_user_src_user_ratio': self.dst_user_count / self.src_user_count,
'structural_retweet_users_count': len(self.retweet_users),
'structural_reply_users_count': len(self.reply_users),
'structural_reply_retweet_users_ratio': len(self.reply_users) / len(self.retweet_users),
'structural_1_hop_neighbor_count': self.network_features['1_hop_neighbor_count'],
'structural_2_hop_neighbor_count': self.network_features['2_hop_neighbor_count'],
'structural_3_hop_neighbor_count': self.network_features['3_hop_neighbor_count'],
'structural_4_hop_neighbor_count': self.network_features['4_hop_neighbor_count'],
'structural_5_hop_neighbor_count': self.network_features['5_hop_neighbor_count'],
'structural_6_hop_neighbor_count': self.network_features['6_hop_neighbor_count'],
'structural_7_hop_neighbor_count': self.network_features['7_hop_neighbor_count'],
'structural_8_hop_neighbor_count': self.network_features['8_hop_neighbor_count'],
'structural_root_to_all_depth_sum': sum(nx.single_source_shortest_path_length(G, self.root_user_id).values()),
'structural_root_to_all_depth_avg': sum(nx.single_source_shortest_path_length(G, self.root_user_id).values()) / self.dst_user_count,
'structural_root_to_all_depth_max': max(nx.single_source_shortest_path_length(G, self.root_user_id).values()),
# 'structural_avg_depth': self.avg_depth, # duplicate
# 'structural_max_depth': self.max_depth, # duplicate
'structural_network_density': nx.density(G), # duplicate
'structural_network_avg_betweenness_centrality': np.mean(list(nx.betweenness_centrality(G).values())),
}, ignore_index=True)
# TODO: Class Inheritance
class CascadeAnalyzer(object):
feature_df = pd.DataFrame() # output
def __init__(self):
self.meta_df = pd.DataFrame() # labels / key: root_tweet_id
self.cascades_dict = {} # key: root_tweet_id, value: Cascade()
self.retrieve_cascade_labels()
self.load_cascades()
def retrieve_cascade_labels(self):
column_names = ['label', 'tweet_id']
self.meta_df = pd.read_csv(DATA_PATH + "label.txt", sep=':', names=column_names, converters={'tweet_id': str})
print("-------------------------------------")
print(self.meta_df.info())
print("-------------------------------------" * 2)
print(self.meta_df.shape, self.meta_df['label'].value_counts().to_dict())
print("-------------------------------------" * 2)
print(self.meta_df.head())
print("-------------------------------------\n")
def load_cascades(self):
# TODO: handle pickle data
# iterate tweet trees
for index, file in enumerate(os.listdir(DATA_PATH + 'tree_u')):
if not file.endswith('.txt'):
print("Unexpected Input File:", file)
continue
root_tweet_id = file.replace('.txt', '') # file_id
cascade_path = os.path.join(DATA_PATH + 'tree_u', file)
label = self.meta_df.loc[self.meta_df['tweet_id'] == root_tweet_id, 'label'].item() # label
self.cascades_dict[root_tweet_id] = Cascade(root_tweet_id, cascade_path, label)
print(self.cascades_dict[root_tweet_id])
# Main Outer loop
def iterate_cascades(self):
for index, row in self.meta_df.iterrows():
tweet_id = row['tweet_id']
cascade = self.cascades_dict[tweet_id]
print('#', index, row['tweet_id'], row['label'])
cascade.calc_structural_features()
# break
def cascade_to_csv(self): # CascadeAnalyzer
ensure_directory(OUT_PATH)
out_file_name = OUT_PATH + 'structural_analysis_' + time.strftime("%Y%m%d_%H%M%S") + ".csv"
out_file = open(out_file_name, 'w', encoding='utf-8', newline='')
self.feature_df.to_csv(out_file, sep=',', index=False)
def main():
# CascadeAnalyzer -> Overall / Cascade -> Individual
analyzer = CascadeAnalyzer()
analyzer.iterate_cascades()
analyzer.cascade_to_csv()
# Rumor Diffusion Analysis Project
# https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friendships-no_retweets-ids
print("=======================================")
print(" Structural Feature Extraction ")
print("=======================================\n\n")
if __name__ == '__main__':
start_time = time.time() # Timer Start
main()
print("\nElapsed Time: {0} seconds".format(round(time.time() - start_time, 3))) # Execution time
``` |
{
"source": "JihoChoi/wsdm-2022-challenge",
"score": 3
} |
#### File: wsdm-2022-challenge/scripts_dataset_a/parse_args.py
```python
import sys
import os
import platform
import time
import datetime
import argparse
import torch
def parse_arguments():
def str2bool(v):
if isinstance(v, bool):
return v
elif v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(description='')
parser.add_argument("--train-flag", required=True, type=str2bool)
args = vars(parser.parse_args())
return args
# -------------------------------------------------------
# CONFIG / ARGS -> PARAMS
# -------------------------------------------------------
args = {
# 'RETRIEVE_INPUT_FLAG': True,
}
configs = {
'device': torch.device("cuda:1" if torch.cuda.is_available() else "cpu"),
}
# args = parse_arguments()
params = {**configs, **args}
```
#### File: wsdm-2022-challenge/scripts_sigmoid/models.py
```python
import os
import sys
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn import Parameter
from torch.autograd import Variable
from torch_geometric.loader import DataLoader
from torch_geometric.data import Data
from dataset import LargeGraphDataset
from parse_args import params
from utils import correct_count, load_pickle_file, multi_acc, save_pickle_file
import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG
from torch_geometric.nn import GCNConv
from torch_geometric.nn import SAGEConv, to_hetero
from torch_geometric.nn import RGCNConv
# from torch_geometric.nn import BatchNorm, BatchNorm1d
from torch.nn import BatchNorm1d
from torch_geometric.utils import negative_sampling
from torch_geometric.utils import batched_negative_sampling
# HEATConv
# https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.RGCNConv
# https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rgcn_link_pred.py
# https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rgcn.py
class TemporalGNN(torch.nn.Module):
def __init__(self, num_nodes, num_relations):
super().__init__()
# -----------------
# Dataset B
# -----------------
# num_classes = 2
# self.node_emb = Parameter(torch.Tensor(num_nodes, hidden_channels))
# self.entity_embedding = nn.Embedding(num_entities, 100)
# self.relation_embedding = nn.Parameter(torch.Tensor(num_relations, 100))
self.node_embedding = nn.Embedding(num_nodes, 32) # Lookup
self.bn32 = BatchNorm1d(32)
self.conv1 = RGCNConv(
32, 64, num_relations, # num_bases=30
)
self.bn64 = BatchNorm1d(64)
self.bn64_2 = BatchNorm1d(64)
self.conv2 = RGCNConv(
64, 64, num_relations, # num_bases=30
)
self.emb_rel = Parameter(torch.Tensor(num_relations, 64))
# self.emb_rel = nn.Linear(num_relations, 64)
# self.bn1 = nn.BatchNorm1d(1) # continuous time
# self.timestamp_emb = Parameter(torch.Tensor())
# self. emb_ts = nn.Embedding(1, 100)
self.emb_ts = nn.Linear(1, 2)
self.bn2 = BatchNorm1d(2)
self.bn1 = BatchNorm1d(1)
self.fc_1 = nn.Linear(1, 12)
self.emb_triplets = nn.Linear(64 * 3, 64) # src, link, tgt -> tri
# self.emb_link = nn.Linear(64 + 2, 16) # tri + ts -> prob
self.emb_link = nn.Linear(64 + 12, 16) # tri + ts -> prob
self.emb_link2 = nn.Linear(16, 1) # tri + ts -> prob
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.conv2.weight)
# nn.init.xavier_uniform_(self.emb_rel.weight)
nn.init.xavier_uniform_(self.emb_ts.weight)
nn.init.xavier_uniform_(self.emb_triplets.weight)
nn.init.xavier_uniform_(self.emb_link.weight)
nn.init.xavier_uniform_(self.emb_link2.weight)
# print("self.emb_rel:", self.emb_rel.shape)
def forward(self, data):
x = np.unique(data.edge_index)
# print("data.node_idx:",data.node_idx)
# print("data.x:", x)
# print("data.x:", x.shape)
x = self.node_embedding(data.n_id)
x = self.bn32(x)
edge_index = data.edge_index
# edge_attrs = data.edge_attrs
edge_types = data.edge_types
# edge_timestamps = data.edge_timestamps
edge_types = edge_types[0:edge_index.size(1)]
x = F.relu(self.conv1(x, edge_index, edge_types)) # [2, 41] -> [869069, 32]
x = self.bn64(x)
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index, edge_types) # [869069, 32] -> [869069, 64]
x = self.bn64_2(x)
# x = F.log_softmax(x, dim=1)
return x
def link_embedding(self, node_embeddings, edge_index, edge_type):
z = node_embeddings
z_src, z_dst = z[edge_index[0]], z[edge_index[1]]
rel = self.emb_rel[edge_type]
# print("z_src :", z_src.shape)
# print("z_dst :", z_dst.shape)
# print("rel :", rel.shape)
# print("torch.sum(z_src * rel * z_dst, dim=1):", torch.sum(z_src * rel * z_dst, dim=1).shape)
# print(torch.sum(z_src * rel * z_dst, dim=1)[0:4])
z_tri = self.emb_triplets(torch.cat((z_src, rel, z_dst), 1))
return z_tri
# source, target, edge_type, timestamp
def temporal_link_prediction(self, z_tri, edge_timestamps):
# edge_timestamps.apply_(
# lambda x: int(datetime.datetime.fromtimestamp(x).strftime("%Y%m%d%H%M"))
# )
# print("edge_timestamps :", edge_timestamps.shape)
# edge_timestamps = self.bn1(edge_timestamps)
# print(edge_timestamps)
edge_timestamps = edge_timestamps.float().unsqueeze(1) # [41] -> [41, 1]
edge_timestamps = self.bn1(edge_timestamps) # TODO: TODO:
edge_timestamps = self.fc_1(edge_timestamps)
edge_timestamps = F.relu(edge_timestamps)
"""
edge_timestamps = self.emb_ts(edge_timestamps)
edge_timestamps = self.bn2(edge_timestamps)
edge_timestamps = F.relu(edge_timestamps) # TODO:
# print("edge_ts :", edge_timestamps.shape)
# edge_timestamps = edge_timestamps.unsqueeze(1)
"""
# print("z_tri:", z_tri.shape)
link_prob = self.emb_link(torch.cat((z_tri, edge_timestamps), 1))
link_prob = F.relu(link_prob)
link_prob = F.dropout(link_prob, p=0.5, training=self.training)
link_prob = self.emb_link2(link_prob)
# link_likelihood = F.relu(link_likelihood) # TODO:
# print("z_tri:", link_likelihood.shape)
# torch.sum(z_src * rel * z_dst, dim=1)
# torch.sum(z_src * rel * z_dst, dim=1) # element-wise product
link_prob = torch.sigmoid(link_prob)
return link_prob
def calc_loss(self, node_embeddings, samples, target):
pass
# source, target, edge_type, timestamp -> y
# model = GNN(hidden_channels=32, out_channels=dataset.num_classes)
# model = to_hetero(model, data.metadata(), aggr='sum')
if __name__ == '__main__':
"""
USAGE: (env) python3 ./scripts/models.py
"""
print("--------------------")
print(" MODELS (DEV) ")
print("--------------------")
dataset = LargeGraphDataset(dataset_name='B')
dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
data = next(iter(dataloader))
model = TemporalGNN(
# num_nodes: 869068, num_relations: 14
num_nodes=869068 + 1, # B) sample_dataset.num_nodes
num_relations=14 + 1, # B) sample_dataset.num_relations
)
# model = model #.to(params['device'])
y_pred = model(data)
``` |
{
"source": "jihokwak/bigwing",
"score": 3
} |
#### File: lib/bigwing/api.py
```python
import requests as req
import json
import pandas as pd
import warnings
from IPython.display import clear_output
from time import sleep
from abc import *
warnings.filterwarnings("ignore")
class BigwingAPIProcessor(metaclass=ABCMeta) :
''' ๋น
์์ถ์ํด๋์ค '''
def run(self, limit=True):
pass
def __fetch(self, address) :
pass
def insert(self, data, col) :
'''
๊ฒ์๋์ ๋ฐ์ดํฐ์
์
๋ ฅํจ์
:param data: ๋ฐ์ดํฐ์
(ํ์
: ๋ฐ์ดํฐํ๋ ์)
:param col: ๊ฒ์ ํค์๋ Column ์ง์ (ํ์
: ๋ฌธ์์ด)
:return: ์์
'''
self._check("url") # ์ธ์ฆํค ์ ํจ์ฑ ํ์ธ
# ๋ฐ์ดํฐ ์ ํจ์ฑ ํ์ธ ๋ฐ ์ฝ์
if data.__class__ != pd.DataFrame().__class__ :
print("FAILED : ์
๋ ฅํ์ ๋ฐ์ดํฐ๋ pandas ๋ฐ์ดํฐํ๋ ์์ด ์๋๋๋ค.")
else :
if col not in data.columns :
print("FAILED : ์
๋ ฅํ์ ๋ฐ์ดํฐ์ ํด๋น ์ปฌ๋ผ์ด ์กด์ฌํ์ง ์์ต๋๋ค.")
else :
self.data = data
self.col = col
print("SUCCEEDED : ๋ฐ์ดํฐ๋ฅผ ์ฝ์
ํ์ต๋๋ค.")
return self
def takeout(self) :
'''
๊ฒ์๋ data๋ฅผ ๋ฆฌํดํ๋ Getterํจ์
:return: ๋ฐ์ดํฐํ๋ ์ ํ์
๋ณ์
'''
try:
self.data
except NameError:
raise RuntimeError("FAILED : ์ฒ๋ฆฌ๋ ๋ฐ์ดํฐ๊ฐ ์์ต๋๋ค.")
return self.data
def get_param(self) :
'''
api ํ๋ผ๋ฏธํฐ ์ ๋ณด๋ฅผ ๋ฆฌํดํ๋ Getterํจ์
:return: Dict.items ๊ฐ์ฒด ๋ณ์
'''
try:
self.params
except NameError:
raise RuntimeError("FAILED : ์ธ์๋ฅผ ์ค์ ํ์ง ์์์ต๋๋ค.")
return self.params.items()
def _set_param(self) :
'''
api ํ๋ผ๋ฏธํฐ ์ ๋ณด๋ฅผ ์ค์ ํ๋ Setterํจ์
:return: ์์
'''
param_str = ""
for param_nm, param_val in self.params.items() :
param_str = param_str + "&" + param_nm + "=" + param_val
self.url = self.base_url + param_str
def summary(self) :
'''
์ฒ๋ฆฌ๊ฒฐ๊ณผ์์ฝ์ ์ถ๋ ฅํ๋ ํจ์
:return: ์์
'''
try:
self.data
except NameError:
raise RuntimeError("FAILED : ์ฒ๋ฆฌ๋ ๋ฐ์ดํฐ๊ฐ ์์ต๋๋ค.")
print("- ์ฒ๋ฆฌ ๊ฑด์ : ",self.data.shape[0])
print("- ์ฑ๊ณต ๊ฑด์ : ",sum(self.data.์ฒ๋ฆฌ์ํ == "OK"))
print("- ์คํจ ๊ฑด์ : ",sum(self.data.์ฒ๋ฆฌ์ํ != "OK"))
print("- ์ฑ๊ณต์จ : {}%".format(round(sum(self.data.์ฒ๋ฆฌ์ํ == "OK")/self.data.shape[0]*100,1)))
def _check(self, attr) :
'''
ํด๋์ค ์์ฑ์ด ์กด์ฌํ๋์ง ๊ฒ์ฌํ๋ ํจ์(ํด๋์ค ๋ด๋ถ์ฌ์ฉ)
:param attr: ์์ฑ ๋ณ์
:return: ์์
'''
try:
getattr(self, attr)
except AttributeError:
raise RuntimeError("FAILED : {} ๋ฅผ ํ์ธํด์ฃผ์ธ์.".format(attr))
class Vwolrd_Geocoder(BigwingAPIProcessor) :
'''๋ธ์ด์๋ ์ง์ค์ฝ๋'''
def __init__(self, key, crs="EPSG:5181",type_="ROAD") :
'''
๋ธ์ด์๋ ์ง์ค์ฝ๋ ํด๋์ค ์์ฑ์
:param key: ๋ธ์ด์๋ ์ธ์ฆํค ์
๋ ฅ ์ธ์
:param crs: ์ขํ๊ณ ์
๋ ฅ ์ธ์ (Default : EPSG:5181)
:param type_: ๋๋ก๋ช
๋๋ ์ง๋ฒ ์ฃผ์ ์ง์ ์ต์
์
๋ ฅ ์ธ์ (Default : ROAD) # ROAD(๋๋ก๋ช
) or PARCEL(์ง๋ฒ)
'''
#ํ๋ผ๋ฏธํฐ ์ค์
self.base_url = "http://api.vworld.kr/req/address?service=address&request=getCoord"
self.params = {}
self.params["key"] = key #์ธ์ฆํค ์ค์
self.params['crs'] = crs #์ขํ๊ณ ์ค์
self.params['type'] = type_ #๋๋ก๋ช
๋๋ ์ง๋ฒ ์ค์ (ROAD or PARCEL)
self.params['simple'] = "true" #๊ฐ๋จํ ์ถ๋ ฅ์ค์
self._set_param()
#์ธ์ฆํค ์ ํจ์ฑ ํ์ธ
status = self.__fetch("์์ธํน๋ณ์ ์ข
๋ก๊ตฌ ์ธ์ข
๋ก 1")[0]
if status != "OK" :
del self.params['key'], self.url
print("KEY " + status + " : ์ธ์ฆํค๋ฅผ ๋ค์ ํ์ธํด์ฃผ์ธ์.")
else :
print("KEY " + status + " : ์ธ์ฆํค ์ ํจ์ฑ ํ์ธ ์ฑ๊ณต!")
def __fetch(self, address) :
'''
์
๋ ฅ๋ ๊ฒ์์ฃผ์๋ฅผ ํตํด ์ง์ค์ฝ๋ฉ ๋จ์ผ๋ ์ฝ๋ ์ ๋ณด๋ฅผ ๋ฐ์์ค๋ ํจ์
:param address: ๊ฒ์ ํค์๋(์ฃผ์)
:return: ํํ ํ์
์ ๊ฒ์์ํ ๋ฐ ์ขํ์ ๋ณด
'''
values = {}
fetch_url = self.url +"&address="+ address
for cnt in range(10) :
try :
resp = req.get(fetch_url).text
except :
print("{}๋ฒ์งธ Fetch".format(cnt+2))
sleep(3)
continue
break
resp = json.loads(resp)
status = resp['response']['status'] #์ํ์ฝ๋ ์กฐํ
if status == 'OK' :
#๋ฐํ๋ฐ์ดํฐ ๋ณ์์ ์ฅ
values = resp['response']['result']['point']
return tuple([status] + [value for value in values.items()])
else :
return tuple(["NOT_FOUND"])
def run(self, limit=True) :
'''
api ํธ์ถ์ ์ผ๊ด์คํํ๋ ํจ์
limit ์ธ์๋ Boolean ์๋ฃํ์ ๋ฐ์ต๋๋ค. (Default : True)
limit์ด True์ผ๊ฒฝ์ฐ, ์ฒ๋ฆฌ์ํ๊ฐ "OK"์ธ ํ๋ฐ์ดํฐ๋ Skipํ๊ณ ์ฐ์์งํ
:return: ์์
'''
self._check("data") #๋ฐ์ดํฐ ์ฝ์
์ฌ๋ถ ํ์ธ
self._check("url") # ์ธ์ฆํค ์ ํจ์ฑ ํ์ธ
data = self.data.copy()
if (limit == True) & ("์ฒ๋ฆฌ์ํ" in data.columns) :
data = data[data["์ฒ๋ฆฌ์ํ"] != "OK"]
data_size = len(data)
succeed_cnt = 0
if data_size != 0 :
for idx, keyword in enumerate(data[self.col]) :
#๋ณํ ๋ฐ ์ ์ฅ
values = self.__fetch(keyword)
print("debug : ",values)
if values[0] == "OK" :
succeed_cnt += 1
for value in values[1:] :
self.data.loc[self.data[self.col]==keyword, value[0]] = value[1]
self.data.loc[self.data[self.col]==keyword, "์ฒ๋ฆฌ์ํ"] = values[0]
#๊ฒฐ๊ณผ ์ถ๋ ฅ
print("{} / {} ... {}%".format(idx+1,data_size, round((idx+1)/data_size*100),1))
print("{} --> {}".format(keyword,values))
clear_output(wait=True)
print("์ฒ๋ฆฌ์๋ฃ!")
print("์ถ๊ฐ์ ์์ฒ๋ฆฌ๊ฑด์ : ", succeed_cnt)
self.summary()
####๊ตฌ๊ธ์ง์ค์ฝ๋####
class Google_Geocoder(BigwingAPIProcessor) :
def __init__(self, key) :
'''
๊ตฌ๊ธ ์ง์ค์ฝ๋ ํด๋์ค ์์ฑ์
:param key: ๋ธ์ด์๋ ์ธ์ฆํค ์
๋ ฅ ์ธ์
'''
self.base_url = "https://maps.googleapis.com/maps/api/geocode/json?"
self.params = {}
self.params["key"] = key #์ธ์ฆํค ์ค์
self._set_param()
#์ธ์ฆํค ์ ํจ์ฑ ํ์ธ
status = self.__fetch("์์ธํน๋ณ์ ์ข
๋ก๊ตฌ ์ธ์ข
๋ก 1")[0]
if status != "OK" :
del self.params['key'], self.url
print("KEY " + status + " : ์ธ์ฆํค๋ฅผ ๋ค์ ํ์ธํด์ฃผ์ธ์.")
else :
print("KEY " + status + " : ์ธ์ฆํค ์ ํจ์ฑ ํ์ธ ์ฑ๊ณต!")
def __fetch(self, keyword) :
'''
์
๋ ฅ๋ ๊ฒ์์ฃผ์๋ฅผ ํตํด ์ง์ค์ฝ๋ฉ ๋จ์ผ๋ ์ฝ๋ ์ ๋ณด๋ฅผ ๋ฐ์์ค๋ ํจ์
:param address: ๊ฒ์ ํค์๋(์ฃผ์)
:return: ํํ ํ์
์ ๊ฒ์์ํ ๋ฐ ์ขํ์ ๋ณด
'''
values = {}
fetch_url = self.url +"&address="+ keyword
for cnt in range(10) :
try :
resp = req.get(fetch_url).text
except :
print("{}๋ฒ์งธ Fetch".format(cnt+2))
sleep(3)
continue
break
resp = json.loads(resp)
status = resp['status'] #์ํ์ฝ๋ ์กฐํ
if status == 'OK' :
values = resp['results'][0]['geometry']['location']
return tuple([status] + [value for value in values.items()])
else :
return tuple(["NOT_FOUND"])
def run(self, limit=True) :
'''
api ํธ์ถ์ ์ผ๊ด์คํํ๋ ํจ์
limit ์ธ์๋ Boolean ์๋ฃํ์ ๋ฐ์ต๋๋ค. (Default : True)
limit์ด True์ผ๊ฒฝ์ฐ, ์ฒ๋ฆฌ์ํ๊ฐ "OK"์ธ ํ๋ฐ์ดํฐ๋ Skipํ๊ณ ์ฐ์์งํ
:return: ์์
'''
self._check("data") #๋ฐ์ดํฐ ์ฝ์
์ฌ๋ถ ํ์ธ
self._check("url") # ์ธ์ฆํค ์ ํจ์ฑ ํ์ธ
data = self.data.copy()
if (limit == True) & ("์ฒ๋ฆฌ์ํ" in data.columns) :
data = data[data["์ฒ๋ฆฌ์ํ"] != "OK"]
data_size = len(data)
succeed_cnt = 0
if data_size != 0 :
for idx, keyword in enumerate(data[self.col]) :
#๋ณํ ๋ฐ ์ ์ฅ
values = self.__fetch(keyword)
print("debug : ",values)
if values[0] == "OK" :
succeed_cnt += 1
for value in values[1:] :
self.data.loc[self.data[self.col]==keyword, value[0]] = value[1]
self.data.loc[self.data[self.col]==keyword, "์ฒ๋ฆฌ์ํ"] = values[0]
#๊ฒฐ๊ณผ ์ถ๋ ฅ
print("{} / {} ... {}%".format(idx+1,data_size, round((idx+1)/data_size*100),1))
print("{} --> {}".format(keyword,values))
clear_output(wait=True)
print("์ฒ๋ฆฌ์๋ฃ!")
print("์ถ๊ฐ์ ์์ฒ๋ฆฌ๊ฑด์ : ", succeed_cnt)
self.summary()
### ํ์ ์์ ๋ถ ๋๋ก๋ช
์ฃผ์๋ณํ๊ธฐ ####
class AddressConverter(BigwingAPIProcessor) :
def __init__(self, key) :
'''
๋๋ก๋ช
์ฃผ์ ๋ณํ๊ธฐ ํด๋์ค ์์ฑ์
:param key: ํ์ ์์ ๋ถ ์ฃผ์๊ฒ์ ์ฌ์ดํธ ์ธ์ฆํค ์
๋ ฅ ์ธ์
'''
self.base_url = "http://www.juso.go.kr/addrlink/addrLinkApi.do?"
self.params = {}
self.params["confmKey"] = key #์ธ์ฆํค ์ค์
self.params['currentPage'] = "1"
self.params['countPerPage'] = "10"
self.params['resultType'] = "json"
self._set_param()
#์ธ์ฆํค ์ ํจ์ฑ ํ์ธ
status = self.__fetch("์์ธํน๋ณ์ ์ข
๋ก๊ตฌ ์ธ์ข
๋ก 1")[0]
if status != "OK" :
del self.params['confmKey'], self.url
print("KEY " + status + " : ์ธ์ฆํค๋ฅผ ๋ค์ ํ์ธํด์ฃผ์ธ์.")
else :
print("KEY " + status + " : ์ธ์ฆํค ์ ํจ์ฑ ํ์ธ ์ฑ๊ณต!")
def __fetch(self, keyword) :
'''
์
๋ ฅ๋ ๊ฒ์์ฃผ์๋ฅผ ํตํด ๋ณํ์ ๋ณด ๋จ์ผ๋ ์ฝ๋ ์ ๋ณด๋ฅผ ๋ฐ์์ค๋ ํจ์
:param keyword: ๊ฒ์ ํค์๋(์ฃผ์)
:return: ํํ ํ์
์ ๊ฒ์์ํ ๋ฐ ๋ณํ ์ฃผ์์ ๋ณด
'''
values = {}
fetch_url = self.url +"&keyword="+ keyword
for cnt in range(10) :
try :
resp = req.get(fetch_url).text
except :
print("{}๋ฒ์งธ Fetch".format(cnt+2))
sleep(3)
continue
break
resp = json.loads(resp)
status = "OK" if "juso" in resp["results"].keys() else "NOT_FOUND" #์ํ์ฝ๋ ์กฐํ
if status == 'OK':
if resp["results"]["juso"]:
values = resp['results']['juso'][0]
return tuple([status] + [value for value in values.items()])
else:
return tuple(["NOT_FOUND"])
else :
return tuple(["NOT_FOUND"])
def run(self, limit=True) :
'''
api ํธ์ถ์ ์ผ๊ด์คํํ๋ ํจ์
limit ์ธ์๋ Boolean ์๋ฃํ์ ๋ฐ์ต๋๋ค. (Default : True)
limit์ด True์ผ๊ฒฝ์ฐ, ์ฒ๋ฆฌ์ํ๊ฐ "OK"์ธ ํ๋ฐ์ดํฐ๋ Skipํ๊ณ ์ฐ์์งํ
:return: ์์
'''
self._check("data") #๋ฐ์ดํฐ ์ฝ์
์ฌ๋ถ ํ์ธ
self._check("url") # ์ธ์ฆํค ์ ํจ์ฑ ํ์ธ
data = self.data.copy()
if (limit == True) & ("์ฒ๋ฆฌ์ํ" in data.columns) :
data = data[data["์ฒ๋ฆฌ์ํ"] != "OK"]
data_size = len(data)
succeed_cnt = 0
if data_size != 0 :
for idx, keyword in enumerate(data[self.col]) :
#๋ณํ ๋ฐ ์ ์ฅ
values = self.__fetch(keyword)
print("debug : ",values)
if values[0] == "OK" :
succeed_cnt += 1
for value in values[1:] :
self.data.loc[self.data[self.col]==keyword, value[0]] = value[1]
self.data.loc[self.data[self.col]==keyword, "์ฒ๋ฆฌ์ํ"] = values[0]
#๊ฒฐ๊ณผ ์ถ๋ ฅ
print("{} / {} ... {}%".format(idx+1,data_size, round((idx+1)/data_size*100),1))
print("{} --> {}".format(keyword,values))
clear_output(wait=True)
print("์ฒ๋ฆฌ์๋ฃ!")
print("์ถ๊ฐ์ ์์ฒ๋ฆฌ๊ฑด์ : ", succeed_cnt)
self.summary()
class SuperAPICaller(BigwingAPIProcessor) :
'''์ ๋๋ด API ์์ฒญ ํด๋์ค'''
def __init__(self, base_url, **params) :
'''
์ผ๋ฐ API ์์ฒญ ํด๋์ค ์์ฑ์
:param base_url: BASE URL ์
๋ ฅ ์ธ์
:param params: API ์์ฒญ ํ๋ผ๋ฏธํฐ ์
๋ ฅ ์ธ์
'''
self.base_url = base_url
self.params = params
self._set_param()
def set_tagname(self, name) :
'''
๊ฒ์์ด ํ๊ทธ ์ด๋ฆ ํ๋ผ๋ฏธํฐ ์ค์ Setter ํจ์
:return: ์์
'''
self.tagname = name
def set_status(self, status_loc, OK) :
'''
์ํ์ฝ๋ ์์น์ ์ ์์ฝ๋๋ฅผ ์ค์ ํฉ๋๋ค.
:status_loc: dict๊ฐ์ฒด๋ก ์ถ์ํ๋ json ๋ฌธ์์ ํธ๋ฆฌ๊ตฌ์กฐ์์ ์ํ์ฝ๋์ ์์น๋ฅผ ์ง์ ํ๋ ์ธ์
ex) self.status = "OK" if resp['results']['juso'] != [] else "NOT_FOUND"
์ ์ฝ๋์์ resp['results']['juso'] ๋ฅผ ๋งํจ.
:param OK: API๊ฐ ์ ์์ ์ผ๋ก ๊ฐ์ ๋ฐํํ ๋, ํจ๊ป ์ถ๋ ฅ๋๋ "์ ์"์ ์๋ฏธํ๋ ์ฝ๋๋ช
์ ์
๋ ฅ๋ฐ๋ ์ธ์
:return: ์์
'''
self.status_loc = status_loc
self.OK = OK
def set_values(self, values) :
'''
API์์ฒญ์ผ๋ก ๋ฐ์์จ json ๋์
๋๋ฆฌ ๊ฐ์ฒด์์ ๋ฐ์ดํฐ์ ์์น๋ฅผ ์ค์ ํ๋ ํจ์
:param values: json ๋ฌธ์์ dict์ถ์ํ ๊ฐ์ฒด์์์ ์์น ๊ฐ ์ค์ ์ธ์
ex) values = resp['results']['juso'][0]
:return: ์์
'''
self.values = values
def __fetch(self, keyword) :
'''
์
๋ ฅ๋ ๊ฒ์์ฃผ์๋ฅผ ํตํด ๋จ์ผ๋ ์ฝ๋ ์ ๋ณด๋ฅผ ๋ฐ์์ค๋ ํจ์
:param keyword: ๊ฒ์ ํค์๋
:return: ํํ ํ์
์ ๊ฒ์์ํ ๋ฐ ์ ๋ณด
'''
values = {}
fetch_url = self.url +"&" + self.tagname + "="+ keyword
for cnt in range(10) :
try :
resp = req.get(fetch_url).text
except :
print("{}๋ฒ์งธ Fetch".format(cnt+2))
sleep(3)
continue
break
resp = json.loads(resp)
status = "OK" if self.status_loc != self.OK else "NOT_FOUND" #์ํ์ฝ๋ ์กฐํ
if status == 'OK' :
return tuple([status] + [value for value in self.values.items()])
else :
return tuple(["NOT_FOUND"])
def run(self, limit=True) :
'''
api ํธ์ถ์ ์ผ๊ด์คํํ๋ ํจ์
limit ์ธ์๋ Boolean ์๋ฃํ์ ๋ฐ์ต๋๋ค. (Default : True)
limit์ด True์ผ๊ฒฝ์ฐ, ์ฒ๋ฆฌ์ํ๊ฐ "OK"์ธ ํ๋ฐ์ดํฐ๋ Skipํ๊ณ ์ฐ์์งํ
:return: ์์
'''
self._check("data") #๋ฐ์ดํฐ ์ฝ์
์ฌ๋ถ ํ์ธ
self._check("url") # ์ธ์ฆํค ์ ํจ์ฑ ํ์ธ
data = self.data.copy()
if (limit == True) & ("์ฒ๋ฆฌ์ํ" in data.columns) :
data = data[data["์ฒ๋ฆฌ์ํ"] != "OK"]
data_size = len(data)
succeed_cnt = 0
if data_size != 0 :
for idx, keyword in enumerate(data[self.col]) :
#๋ณํ ๋ฐ ์ ์ฅ
values = self.__fetch(keyword)
print("debug : ",values)
if values[0] == "OK" :
succeed_cnt += 1
for value in values[1:] :
self.data.loc[self.data[self.col]==keyword, value[0]] = value[1]
self.data.loc[self.data[self.col]==keyword, "์ฒ๋ฆฌ์ํ"] = values[0]
#๊ฒฐ๊ณผ ์ถ๋ ฅ
print("{} / {} ... {}%".format(idx+1,data_size, round((idx+1)/data_size*100),1))
print("{} --> {}".format(keyword,values))
clear_output(wait=True)
print("์ฒ๋ฆฌ์๋ฃ!")
print("์ถ๊ฐ์ ์์ฒ๋ฆฌ๊ฑด์ : ", succeed_cnt)
self.summary()
```
#### File: lib/bigwing/crawler.py
```python
from bs4 import BeautifulSoup
import warnings; warnings.filterwarnings("ignore")
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from IPython.display import clear_output
import re, os, time, pickle, errno
import pandas as pd
import numpy as np
import threading
class BigwingCrawler():
def __init__(self, url='about:blank', page_range=None, page_type=None, browser='Chrome', headless=True, n_jobs=1, verbose=True):
'''
ํฌ๋กค๋ฌ ํด๋์ค ์์ฑ์
:param url:
:param browser: ํค๋๋ฆฌ์ค ๋ธ๋ผ์ฐ์ ์ง์ Chrome(Default) or PhantomJS
:param headless: ํค๋๋ฆฌ์ค ๋ชจ๋ ์ค์ True(Default) or False
'''
try :
self.url = url
self.page_type = page_type
self.browser = browser
self.headless = headless
self.n_jobs = n_jobs
self.data = None
self.thread = []
self.verbose = verbose
if page_range != None:
self.partitioner(page_range[0], page_range[1], n_jobs)
self.start_page = page_range[0]
self.end_page = page_range[1]
self.error_page_list = self.load("error_pages")
self.success_page_list = self.load("success_pages")
except Exception as e:
print(e)
self.close()
def partitioner(self, start, end, divide):
partition_sp = np.linspace(start - 1, end, divide + 1).astype(int)
# ํํฐ์
์ ๋ณด ์ ์ฅ์ ์์ฑ
self.partitions = {} # ํํฐ์
๋ณ ์คํฌ๋ฉ๋ฐ์ดํฐ ์ ์ฅ์
self.error_pages = {} # ํํฐ์
๋ณ ์๋ฌํ์ด์ง ์ ์ฅ
self.success_pages = {} # ํํฐ์
๋ณ ์ฑ๊ณตํ์ด์ง ์ ์ฅ
self.status = {} # ํํฐ์
๋ณ ์งํ์ํ ์ ์ฅ
self.successes = {} # ํํฐ์
๋ณ ์ฑ๊ณต๊ฑด์ ์ ์ฅ
self.processeds = {} # ํํฐ์
๋ณ ์ฒ๋ฆฌ๊ฑด์ ์ ์ฅ
self.errors = {} # ํํฐ์
๋ณ ์๋ฌ๊ฑด์ ์ ์ฅ
self.run_flags = {} # ํํฐ์
๋ณ ์คํ ์ฌ๋ถ ํ๋๊ทธ
self.stop_flags = {} # ํํฐ์
๋ณ ์ค๋จ ์ฌ๋ถ ํ๋๊ทธ
self.zip_flag = 0 # ํํฐ์
๋ณํฉ ์ฌ๋ถ ํ๋๊ทธ
self.drivers = {} # ํํฐ์
๋ณ ๋ธ๋ผ์ฐ์ ๋๋ผ์ด๋ฒ ์ ์ฅ
self.htmls = {} # ํํฐ์
๋ณ html ๋ฌธ์ ์ ์ฅ
self.soups = {} # ํํฐ์
๋ณ BeautifulSoup ๊ฐ์ฒด ์ ์ฅ
self.processes = {} # ๊ฐ ํํฐ์
์ ํ๋ก์ธ์ค ์ ์ฅ
# ํํฐ์
์ ์ฅ์๋ณ ์ด๊ธฐํ
for i in range(len(partition_sp) - 1):
# ํํฐ์
๋ณ ํค ์์ฑ (ํค๊ฐ์ ํํฐ์
ํ์ด์ง๋ฒ์ ํฌํจ)
partition_key = (partition_sp[i] + 1, partition_sp[i + 1])
self.open(partition_key) # ๋ธ๋ผ์ฐ์ ์คํ
self.partitions[partition_key] = pd.DataFrame()
self.error_pages[partition_key] = []
self.success_pages[partition_key] = []
self.status[partition_key] = "์ค๋น์๋ฃ"
self.successes[partition_key] = 0
self.processeds[partition_key] = 0
self.errors[partition_key] = 0
self.processes[partition_key] = None
self.run_flags[partition_key] = False
self.stop_flags[partition_key] = True
def start(self):
if self.verbose == True: print("{} ๊ฐ ํ๋ก์ธ์ค๋ก ์๋ํฉ๋๋ค.".format(len(self.partitions.keys())))
for partition_key in self.partitions:
self.status[partition_key] = "์งํ์ค"
self.processes[partition_key] = threading.Thread(target=self.crawl, args=(partition_key,))
self.run_flags[partition_key] = True
self.stop_flags[partition_key] = False
for process in self.processes.values() :
process.start()
# for process in self.processes.values() :
# process.join()
def restart(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None :
if part_nm > len(keys) : print("{}๋ฒ ํ๋ก์ธ์ค๋ ์์ต๋๋ค."); return;
partition_key = keys[part_nm + 1]
self.run_flags[partition_key] = True
self.status[partition_key] = "์งํ์ค"
print("{} ํ๋ก์ธ์ค ์ฌ์์".format(partition_key))
else :
for partition_key in keys :
self.run_flags[partition_key] = True
self.status[partition_key] = "์งํ์ค"
print("{} ํ๋ก์ธ์ค ์ฌ์์".format(partition_key))
def pause(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None :
if part_nm > len(keys) : print("{}๋ฒ ํ๋ก์ธ์ค๋ ์์ต๋๋ค."); return;
partition_key = keys[part_nm + 1]
self.run_flags[partition_key] = False
self.status[partition_key] = "์ผ์์ ์ง"
print("{} ํ๋ก์ธ์ค ์ผ์์ ์ง".format(partition_key))
else :
for partition_key in keys :
self.run_flags[partition_key] = False
self.status[partition_key] = "์ผ์์ ์ง"
print("{} ํ๋ก์ธ์ค ์ผ์์ ์ง".format(partition_key))
def stop(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None:
if part_nm > len(keys): print("{}๋ฒ ํ๋ก์ธ์ค๋ ์์ต๋๋ค."); return;
partition_key = keys[part_nm + 1]
self.stop_flags[partition_key] = True
self.status[partition_key] = "์ค๋จ"
print("{} ํ๋ก์ธ์ค ์ค๋จ".format(partition_key))
else:
for partition_key in keys:
self.stop_flags[partition_key] = True
self.status[partition_key] = "์ค๋จ"
print("{} ํ๋ก์ธ์ค ์ค๋จ".format(partition_key))
time.sleep(2)
self.close()
def set_verbose(self, verbose):
self.verbose = verbose
def open(self, partition_key):
self.drivers[partition_key] = self.set_driver(self.url)
self.htmls[partition_key] = self.set_html(partition_key)
self.soups[partition_key] = self.set_soup(partition_key)
print("{} ํ์ด์ง ๋ธ๋ผ์ฐ์ ๋ฅผ ์คํํ์ต๋๋ค.".format(partition_key))
def clear(self):
import shutil
try :
shutil.rmtree("tmpdata/{}".format(self.page_type))
print("๋ฐ์ดํฐ ์ญ์ ")
except FileNotFoundError as e :
print("๊ธฐ๋ก์ด ์์ต๋๋ค.")
def backup(self):
import shutil
from datetime import datetime
timestamp = datetime.strftime(datetime.now(), "%m%d_%H%M")
tmpdir = os.path.join(os.path.abspath(os.path.curdir), "tmpdata")
backupdir = os.path.join(os.path.abspath(os.path.curdir), "backup")
dstdir = os.path.join(backupdir, timestamp)
if not os.path.isdir(backupdir):
os.makedirs(backupdir)
try :
shutil.move(tmpdir, dstdir)
print("{} ๋ก ๋ฐ์ดํฐ๋ฅผ ๋ฐฑ์
ํ์ต๋๋ค.".format(
os.path.join(dstdir, self.page_type)))
except :
pass
def refresh(self, partition_key):
for i in range(self.n_jobs) :
self.htmls[partition_key] = self.set_html(partition_key)
self.soups[partition_key] = self.set_soup(partition_key)
def picker(self, partition_key, parant_tag, child_tag=None):
'''
์นํ์ด์ง์์ ๊ฒ์๋์ ์ ๋ณด๊ฐ ์๋ ํ๊ทธ๋ฅผ ์ค์ ํ๊ณ ์นํ์ด์ง ์ ์ฒด ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ค๋ ํจ์
:param parant_tag: ์์ ํ๊ทธ ์ค์ ์ธ์
:param child_tag: ํ์ ํ๊ทธ ์ค์ ์ธ์ (Default : None)
:return: listํ์
์ listํ์
๋ณ์
'''
tags = self.soups[partition_key].select(parant_tag)
results = []
for tag in tags :
if child_tag != None :
tag = tag.select(child_tag)
tag = [data.text.strip() for data in tag]
if tag == [] :
continue
results.append(tag)
return results
def fetch(self, partition_key, keyword):
'''
์ถ์ํ ํจ์ : ๋จ์ผ ๋ ์ฝ๋ ํฌ๋กค๋ง ํจ์
:param keyword: ๊ฒ์์ด
:return: ์์
'''
pass
def insert(self, input_data, col):
pass
def takeout(self):
'''
ํฌ๋กค๋งํ ๋ฐ์ดํฐ์
์ ๋ฆฌํดํ๋ ํจ์
:return: data ( ํ์
: ๋ฐ์ดํฐํ๋ ์ or ๋์
๋๋ฆฌ(๋ฐ์ดํฐํ๋ ์) )
'''
if self.n_jobs == 1:
return self.partitions.pop()
else:
if self.zip_flag == 0:
return self.partitions
else:
return self.data
def save(self):
self.data = pd.DataFrame()
for partition in self.partitions.values():
self.data = self.data.append(partition)
self.data = self.data.reset_index(drop=True)
print("๋ฐ์ดํฐ ๋ณํฉ")
self.record()
print("์คํฌ๋ฉ ๋ก๊ทธ๊ธฐ๋ก")
self.log()
self.zip_flag = 1
def monitor(self, second=2):
self.set_verbose(False)
while True:
try:
self.summary()
clear_output(wait=True)
time.sleep(second)
except KeyboardInterrupt:
break;
self.set_verbose(True)
print("๋ชจ๋ํฐ๋ง ์ข
๋ฃ")
def summary(self):
print("-" * 108)
for partition_key in self.partitions:
line = "{:>15} ์คํฌ๋ฉํ๋ก์ธ์ค | {:>5}% {} | ์ด {:>6}๊ฑด | ์ฑ๊ณต {:>6}๊ฑด | ์คํจ {:>6}๊ฑด".format(
str(partition_key),
("%.1f" % (self.processeds[partition_key] / (partition_key[1] - partition_key[0] + 1) * 100)),
self.status[partition_key],
partition_key[1] - partition_key[0] + 1,
self.successes[partition_key],
self.errors[partition_key],
)
print("|{:>82} |".format(line))
print("-" * 108)
total_processeds = 0
for i in self.processeds.values() : total_processeds += i
total_successes = 0
for i in self.successes.values(): total_successes += i
total_errors = 0
for i in self.errors.values(): total_errors += i
total_status = "์ค๋น์๋ฃ"
for status in self.status.values() :
if "์งํ์ค" in status : total_status = "์งํ์ค"
cnt = 0
for status in self.status.values() :
if "์ข
๋ฃ" in status : cnt +=1
if cnt == len(self.status.values()) :
total_status = "์ข
๋ฃ"
percentage = (total_processeds / (self.end_page - self.start_page + 1)) * 100
line = "{:>12} ์คํฌ๋ฉํ๋ก์ธ์ค | {:>5}% {} | ์ด {:>6}๊ฑด | ์ฑ๊ณต {:>6}๊ฑด | ์คํจ {:>6}๊ฑด".format(
"์ ์ฒด",
"%.1f" % percentage,
total_status,
self.end_page - self.start_page + 1,
total_successes,
total_errors,
)
print("|{:>80} |".format(line))
print("-" * 108)
def record(self):
filename = "total_{}_{}_{}".format(self.page_type, self.start_page, self.end_page)
try:
if not (os.path.isdir(os.path.join("tmpdata", self.page_type))):
os.makedirs(os.path.join("tmpdata", self.page_type))
if not (os.path.isdir(os.path.join("tmpdata", self.page_type, "data"))):
os.makedirs(os.path.join("tmpdata", self.page_type, "data"))
except OSError as e:
if e.errno != errno.EEXIST:
print("๋๋ ํ ๋ฆฌ ์์ฑ ์คํจ.")
raise
try :
with open("tmpdata/{}/data/{}.pkl".format(self.page_type, filename), "rb") as f:
dump_data = pickle.load(f)
except:
dump_data = pd.DataFrame()
dump_data = dump_data.append(self.data).reset_index(drop=True)
with open("tmpdata/{}/data/{}.pkl".format(self.page_type, filename), "wb") as f:
pickle.dump(dump_data, f)
#๊ธฐ์กด ๋ฐ์ดํฐ์ ๋ณํฉ
try :
file_data = pd.read_csv("tmpdata/{}/data/{}.csv".format(self.page_type, filename), encoding="utf8", index_col=False)
except FileNotFoundError :
file_data = pd.DataFrame()
file_data = file_data.append(self.data).reset_index(drop=True)
file_data.to_csv("tmpdata/{}/data/{}.csv".format(self.page_type, filename), encoding="utf8", index=False)
print("{} ๋ก ๋ฐ์ดํฐ๋ฅผ ์ ์ฅํ์ต๋๋ค.".format(os.path.join(os.path.abspath(os.path.curdir),"tmpdata",self.page_type, "data", filename + ".csv")))
def load(self, filename):
import pickle
try :
with open("tmpdata/{}/log/{}.pkl".format(self.page_type, filename), "rb") as f:
data = pickle.load(f)
return data
except :
return []
def crawl(self, partition_key):
pass
def scrap(self, partition_key):
pass
def set_page(self, partition_key, page_nm):
pass
def _check(self, attr) :
'''
ํด๋์ค ์์ฑ์ด ์กด์ฌํ๋์ง ๊ฒ์ฌํ๋ ํจ์(ํด๋์ค ๋ด๋ถ์ฌ์ฉ)
:param attr: ์์ฑ ๋ณ์
:return: ์์
'''
try:
getattr(self, attr)
except AttributeError:
raise RuntimeError("FAILED : {} ๋ฅผ ํ์ธํด์ฃผ์ธ์.".format(attr))
def set_soup(self, partition_key):
'''
BeautifulSoup ๊ฐ์ฒด๋ฅผ ์์ฑํ๋ Setter ํจ์
:param url: url ๋ฌธ์์ด ๊ฐ ์
๋ ฅ ๋ฐ๋ ์ธ์
:param browser: ํค๋๋ฆฌ์ค ๋ธ๋ผ์ฐ์ ์ง์ (Default : Chrome) #PhantomJs ์ฌ์ฉ๊ฐ๋ฅ
:return: ์์
'''
return BeautifulSoup(self.htmls[partition_key], 'html.parser')
def set_html(self, partition_key):
'''
๋ฌธ์์ด ํ์
html ๋ฌธ์๋ฅผ ์ ์ฅํ๋ Setter ํจ์
:param url:
:param browser:
:return: ์์
'''
return self.drivers[partition_key].page_source
def set_driver(self, url):
'''
selenium ํจํค์ง์ browser driver ๋ชจ๋์ ์ธํ
ํ๋ ํจ์
:param url: ๋ฌธ์์ดํ์
url ์ฃผ์๋ฅผ ์
๋ ฅ๋ฐ๋ ์ธ์
:param browser: ๋ธ๋ผ์ฐ์ ๋ฅผ ์ง์ ํ๋ ์ธ์ (Default : Chrome) # PhantomJS ๋๊ฐ๋ฅ
:return: ์์
'''
driver = None
option = Options()
option.add_argument('headless')
option.add_argument('window-size=1920x1080')
option.add_argument("disable-gpu")
# Headless์จ๊ธฐ๊ธฐ1
option.add_argument(
"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
option.add_argument("lang=ko_KR")
cur_dir = os.path.abspath(os.path.dirname(__file__))
browser_dir = os.path.join(cur_dir, "browser")
if self.browser == "Chrome":
browser_file = browser_dir + "/chromedriver.exe"
if self.headless == True :
driver = webdriver.Chrome(browser_file, chrome_options=option)
else :
driver = webdriver.Chrome(browser_file)
driver.get('about:blank')
driver.execute_script("Object.defineProperty(navigator, 'plugins', {get: function() {return[1, 2, 3, 4, 5]}})")
driver.execute_script("const getParameter = WebGLRenderingContext.getParameter;WebGLRenderingContext.prototype.getParameter = function(parameter) {if (parameter === 37445) {return 'NVIDIA Corporation'} if (parameter === 37446) {return 'NVIDIA GeForce GTX 980 Ti OpenGL Engine';}return getParameter(parameter);};")
else:
browser_file = browser_dir + "/PhantomJS.exe"
driver = webdriver.PhantomJS(browser_file)
driver.execute_script("Object.defineProperty(navigator, 'languages', {get: function() {return ['ko-KR', 'ko']}})")
driver.implicitly_wait(3)
driver.get(url)
return driver
def get_text(self, partition_key):
'''
์ธ์คํด์ค์ html ๋ณ์์ ํ
์คํธ ์ ๋ณด๋ฅผ ์ป์ด์ค๋ ํจ์
:return: ๋ฌธ์์ด ํ์
text
'''
text = ""
p = re.compile(r'(<.{1,5}/?>)(?P<content>[^<\n]+)(</.{1,5}>)', re.M)
m = p.finditer(self.htmls[partition_key])
lines = [line.group("content").strip() for line in m]
for line in lines :
text = text + "\n" + line
return text
def get_tags(self, partition_key):
'''
์ธ์คํด์ค์ html ๋ณ์์ ์ฌ์ฉ๋ tag ๋ฌธ์์ด ๋ฆฌ์คํธ๋ฅผ ๋ฆฌํดํ๋ ํจ์
:return: ๋ฌธ์์ด๋ค์ list ํ์
'''
alltags = self.soups[partition_key].find_all(True)
alltags = [tag.name for tag in alltags]
alltags = list(set(alltags))
return alltags
def get_attrs(self, partition_key):
'''
์ธ์คํด์ค์ html๋ณ์๊ฐ ๋ด๊ณ ์๋ ๋ฌธ์์ ์์ฑ๋ช
์ ๋ฌธ์์ด ๋ฆฌ์คํธ๋ก ๋ฐํํ๋ ํจ์
:return: ๋ฌธ์์ด list ํ์
'''
tags = self.soups[partition_key].find_all(True)
attrs_list = [[attr for attr in tag.attrs.keys()] for tag in tags]
attrs = []
for attr in attrs_list:
attrs.extend(attr)
attrs = list(set(attrs))
return attrs
def log(self):
try:
if not (os.path.isdir(os.path.join("tmpdata", self.page_type))):
os.makedirs(os.path.join("tmpdata", self.page_type))
if not (os.path.isdir(os.path.join("tmpdata", self.page_type, "log"))):
os.makedirs(os.path.join("tmpdata", self.page_type, "log"))
except OSError as e:
if e.errno != errno.EEXIST:
print("๋๋ ํ ๋ฆฌ ์์ฑ ์คํจ.")
raise
#์๋ฌํ์ด์ง ๊ธฐ๋ก
error_page_list = []
for partition in self.error_pages.values() :
error_page_list.extend(partition)
pd.DataFrame(error_page_list).to_csv("tmpdata/{}/log/{}_pages.csv".format(self.page_type, "error"), encoding="utf8")
with open("tmpdata/{}/log/{}_pages.pkl".format(self.page_type, "error"), "wb") as f:
pickle.dump(error_page_list, f)
print("{} ๋ก ๋ฐ์ดํฐ๋ฅผ ์ ์ฅํ์ต๋๋ค.".format(
os.path.join(os.path.abspath(os.path.curdir), "tmpdata", self.page_type, "log", "error_pages.csv")))
#์ฑ๊ณตํ์ด์ง ๊ธฐ๋ก
success_page_list = []
for partition in self.success_pages.values():
success_page_list.extend(partition)
pd.DataFrame(success_page_list).to_csv("tmpdata/{}/log/{}_pages.csv".format(self.page_type, "success"), encoding="utf8")
with open("tmpdata/{}/log/{}_pages.pkl".format(self.page_type, "success"), "wb") as f:
pickle.dump(success_page_list, f)
print("{} ๋ก ๋ฐ์ดํฐ๋ฅผ ์ ์ฅํ์ต๋๋ค.".format(
os.path.join(os.path.abspath(os.path.curdir), "tmpdata", self.page_type, "log", "success_pages.csv")))
def __del__(self) :
self.close()
print("ํฌ๋กค๋ฌ ์ข
๋ฃ")
class EPLCrawler(BigwingCrawler):
def __init__(self, url='about:blank', page_range=None, page_type="Lineup", browser='Chrome', headless=True, n_jobs=1, verbose=True):
super().__init__(url, page_range, page_type, browser, headless, n_jobs, verbose)
if page_type=="Lineup" or page_type=="Matchs" :
self.url = "https://www.premierleague.com/match/"
else : pass;
time.sleep(2)
def crawl(self, partition_key):
#ํ์ด์ง ์ปค์ ์ค์
cur_page = first_page = partition_key[0]; last_page = partition_key[1]
error_flag = False
#๋ฐ์ดํฐ์
ํ์
๊ฒฐ์
if self.page_type == "Lineup" : dataset = pd.DataFrame()
elif self.page_type == "Matchs" : dataset = pd.DataFrame()
else : pass
#๋ฐ์ดํฐ ์คํฌ๋ฉ ํ๋ก์ธ์ค
while cur_page < (last_page + 1) :
if cur_page in self.success_page_list : #์ด๋ฏธ ํฌ๋กค๋ง์ด ์ฑ๊ณตํ ํ์ด์ง๋ ๋์ด๊ฐ๊ธฐ
if cur_page < (last_page + 1) :
self.success_pages[partition_key].extend([cur_page])
self.processeds[partition_key] +=1
self.successes[partition_key] +=1
cur_page += 1
continue
else : break;
self.status[partition_key] = "{}๋ฒ ์คํฌ๋ฉ์ค".format(cur_page)
while self.run_flags[partition_key] == False : time.sleep(0.5) # ์ผ์์ ์ง
if self.stop_flags[partition_key] == True : break; # ์ค๋จ
try:
self.set_page(partition_key, cur_page)
# ์คํฌ๋ฉ
if self.page_type == "Lineup": # ๋ผ์ธ์
ํ์ด์ง ํฌ๋กค๋ฌ
data = self.scrap_lineup(partition_key)
elif self.page_type == "Matchs": # ๋งค์น ํ์ด์ง ํฌ๋กค๋ฌ
data = self.scrap_matchstats(partition_key)
else: pass;
data.insert(0, "Match_ID", cur_page) #ํ์ด์ง ๋๋ฒ ์คํฌํ
# ๋งค์น์ ๋ณด๊ฐ ๋ง์ด ๋ถ์กฑํ ๋ ์๋ฌ ์ฒดํฌ
if data.shape[1] < 10 :
error_flag = True
if self.verbose == True: print("{}๋ฒ ์คํฌ๋ฉ์คํจ.".format(cur_page))
else:
error_flag = False
if self.verbose == True: print("{}๋ฒ ์คํฌ๋ฉ์ฑ๊ณต.".format(cur_page))
# ๊ธฐ์กด๊ธฐ๋ก์ ์ถ๊ฐ
dataset = dataset.append(data).fillna("")
self.partitions[partition_key] = dataset.reset_index(drop=True)
except Exception as e:
if self.verbose == True : print("{} : {}๋ฒ ์คํฌ๋ฉ์คํจ".format(e, cur_page))
error_flag = True
#ํ์ฌ ํ์ด์ง ์คํฌ๋ฉ๊ฒฐ๊ณผ ๊ธฐ๋ก
self.processeds[partition_key] += 1
if error_flag == False :
self.successes[partition_key] += 1 # ์ฑ๊ณต๊ฑด์ ๊ธฐ๋ก
self.success_pages[partition_key].extend([cur_page]) # ์ฑ๊ณตํ์ด์ง ๊ธฐ๋ก
self.success_page_list.extend([cur_page])
else :
self.errors[partition_key] += 1 # ์คํจ๊ฑด์ ๊ธฐ๋ก
self.error_pages[partition_key].extend([cur_page]) # ์๋ฌํ์ด์ง ๊ธฐ๋ก
self.error_page_list.extend([cur_page])
cur_page += 1
#์คํฌ๋ฉ ์ํ ์ ์ฅ & ๋ฆฌํฌํธ
if self.verbose == True: print("({}, {}) ํ๋ก์ธ์ค ์คํฌ๋ฉ์๋ฃ".format(first_page, last_page))
self.status[partition_key] = "์๋ฃ" if self.stop_flags[partition_key] == True else "์ข
๋ฃ"
def close(self):
for partition_key in self.partitions:
try :
self.drivers[partition_key].close()
except : pass
try :
self.drivers[partition_key].quit()
except : pass
print("{} ๋ธ๋ผ์ฐ์ ๋ฅผ ์ข
๋ฃํ์ต๋๋ค.".format(partition_key))
def scrap_matchstats(self, partition_key):
# ๋งค์น ๊ธฐ๋ณธ ์ ๋ณด
matchInfo = self.drivers[partition_key].find_element_by_class_name("matchInfo").text.split("\n")
# ๋งค์น ํด๋ฝ ์ด๋ฆ
home_nm = self.drivers[partition_key].find_element_by_xpath(
"//*[@id='mainContent']/div/section/div[2]/section/div[3]/div/div/div[1]/div[1]/a[2]/span[1]").text
away_nm = self.drivers[partition_key].find_element_by_xpath(
"//*[@id='mainContent']/div/section/div[2]/section/div[3]/div/div/div[1]/div[3]/a[2]/span[1]").text
# ๊ฒฝ๊ธฐ ์ค์ฝ์ด
score = self.drivers[partition_key].find_element_by_xpath(
"//*[@id='mainContent']/div/section/div[2]/section/div[3]/div/div/div[1]/div[2]/div").text
dataset = self.picker(partition_key, "tr", "td")
cols = ["matchinfo_"+str(i+1) for i in range(len(matchInfo))] + ["home_team", "score", "away_team"] + ["home_" + data[1] for data in dataset] + ["away_" + data[1] for data in dataset]
vals = matchInfo + [home_nm, score, away_nm] + [data[0] for data in dataset] + [data[2] for data in dataset]
matchstats = pd.DataFrame(columns=cols)
matchstats.loc[0] = vals
return matchstats
def scrap_lineup(self, partition_key):
lineup = pd.DataFrame(
columns=["Team", "Number", "Name", "Goal", "Sub_On_Off", "Sub_Time", "Card", "Playing", "Position",
"Nationality"])
for team in range(2):
# ํฌ์ง์
๋ฆฌ์คํธ
position_list = [position.text for position in self.soups[partition_key].find_all("div", "matchLineupTeamContainer")[team].select("h3")]
groups = self.soups[partition_key].find_all("div", "matchLineupTeamContainer")[team].select("ul")
# ๊ฐ ๊ทธ๋ฃน๋ค
for group_idx, group in enumerate(groups):
players = groups[group_idx].find_all("li", "player")
# ๊ฐ ์ ์๋ค
for player in players:
player_info = []
team_nm = self.soups[partition_key].select("header.squadHeader > div.position")[team].find(text=True).strip()
player_info.append(team_nm) # ํ์ด๋ฆ
number = player.find("div", "number").get_text().replace("Shirt number ", "");
player_info.append(number) # ์ ์ ๋๋ฒ
info_tag = player.select("div.info")
for tag in info_tag:
nametag = tag.select(".name")[0]
name = nametag.find(text=True).strip();
player_info.append(name) # ์ ์์ด๋ฆ
try: # ๊ณจ์
p = re.compile(r'icn ball')
m = p.findall(str(nametag))
player_info.append(len(m))
except:
player_info.append(0)
try: # ๊ฒฝ๊ธฐ ์ธ์์
p = re.compile(r'sub-on|sub-off')
m = p.search(str(nametag))
if m.group(0) == "sub-on":
player_info.append("On")
elif m.group(0) == "sub-off":
player_info.append("Off")
except:
player_info.append("")
try: # ๊ต์ฒด ์๊ฐ
player_info.append(nametag.select("span.sub")[0].text)
except:
player_info.append("")
try: # ์นด๋ ์ฌ๋ถ
p = re.compile(r'yellow|red')
m = p.search(str(nametag))
if m.group(0) == "yellow":
player_info.append("Yellow")
elif m.group(0) == "red":
player_info.append("Red")
except:
player_info.append("")
try: # ์ฃผ์ /ํ๋ณด ์ฌ๋ถ
player_info.append("starter" if position_list[group_idx] != "Substitutes" or group_idx >= 4 else "substitutes")
except:
player_info.append("substitutes")
try: # ํฌ์ง์
player_info.append(tag.select(".position")[0].text.strip())
except:
player_info.append(position_list[group_idx])
try: # ๊ตญ๊ฐ
player_info.append(tag.select(".nationality")[0].text.strip())
except:
player_info.append("")
lineup.loc[lineup.shape[0]] = player_info
# ๊ฒฝ๊ธฐ์ ๋ณด
try:
matchinfo = [""] * 4
matchinfo_tmp = [info.text.replace("Att: ", "") for info in self.soups[partition_key].select("div.matchInfo > div")]
for idx, info in enumerate(matchinfo_tmp):
matchinfo[idx] = info
except :
matchinfo = [""] * 4
lineup.insert(0, "Match_Date", matchinfo[0])
lineup.insert(1, "Referee", matchinfo[1])
lineup.insert(2, "Stadium", matchinfo[2])
lineup.insert(3, "Attendence", matchinfo[3])
try:
score = self.soups[partition_key].select("div.score")[0].text
except:
score = ""
lineup.insert(4, "Score", score)
return lineup
def set_page(self, partition_key, page_nm) :
dst_url = self.url + str(page_nm)
self.drivers[partition_key].get(dst_url)
try:
if not (os.path.isdir(os.path.join("tmpdata", self.page_type))):
os.makedirs(os.path.join("tmpdata", self.page_type))
except OSError as e:
if e.errno != errno.EEXIST:
print("๋๋ ํ ๋ฆฌ ์์ฑ ์คํจ.")
raise
time.sleep(0.3)
if self.page_type == "Lineup" :
if self.drivers[partition_key].find_element_by_class_name("matchCentreSquadLabelContainer").text.strip() == 'Line-ups' :
self.drivers[partition_key].find_element_by_class_name("matchCentreSquadLabelContainer").click()
else : raise NameError('NoLineups')
elif self.page_type == "Matchs" :
self.drivers[partition_key].find_element_by_xpath(
"//*[@id='mainContent']/div/section/div[2]/div[2]/div[1]/div/div/ul/li[3]").click()
time.sleep(0.2)
self.refresh(partition_key)
```
#### File: lib/bigwing/search.py
```python
import re, os
def search_ext(dirname, ext) :
results = []
p = re.compile(".+[.]" + ext + "$")
for (path, dir, files) in os.walk(dirname):
for filename in files :
m = p.search(filename)
if m : results.append("%s/%s" % (path, m.group(0)))
return results
def search_file(dirname, keyword) :
results = []
p = re.compile(".*" + keyword + ".*", re.I)
for (path, dir, files) in os.walk(dirname):
for filename in files :
m = p.search(filename)
if m : results.append("%s/%s" % (path, m.group(0)))
return results
``` |
{
"source": "jihoon3327/bitcoin_autotrade",
"score": 3
} |
#### File: jihoon3327/bitcoin_autotrade/bitcoin_autotrade.py
```python
import time
import pyupbit
import datetime
import schedule
from fbprophet import Prophet
import json
access = "----------------------"
secret = "----------------------"
def sell_all(coin) :
balance = upbit.get_balance(coin)
price = pyupbit.get_current_price(coin)
if price * balance > 5000 :
upbit.sell_market_order(coin, balance)
def get_balance(ticker):
"""์๊ณ ์กฐํ"""
balances = upbit.get_balances()
for b in balances:
if b['currency'] == ticker:
if b['balance'] is not None:
return float(b['balance'])
else:
return 0
return 0
def get_current_price(ticker):
"""ํ์ฌ๊ฐ ์กฐํ"""
return pyupbit.get_orderbook(tickers=ticker)[0]["orderbook_units"][0]["ask_price"]
def searching_coin():
searching_list = []
coins = pyupbit.get_tickers(fiat="KRW")
while searching_list == []:
try:
for coin in coins:
k = pyupbit.get_ohlcv(coin, interval="minutes1", count=30)
if k[27:].describe()["volume"]["mean"]/k[7:10].describe()["volume"]["mean"] > 100:
searching_list.append(coin)
except TypeError as e:
print(e)
return searching_list[-1]
#๋ก๊ทธ์ธ
upbit = pyupbit.Upbit(access, secret)
print("autotrade start")
# ์๋๋งค๋งค ์์
while True:
try:
krw = get_balance("KRW")
#์ฝ์ธ ์ฐพ๊ธฐ & ๋งค์
if krw > 5000:
#์ํ ์กฐํ
best_coin = searching_coin()
t_coin = best_coin
print(t_coin)
upbit.buy_market_order(t_coin, krw*0.9995)
print("buy")
buy_price = pyupbit.get_current_price(t_coin)
buy_time = datetime.datetime.now()
else:
while True:
#์ฝ์ธ ํ๊ธฐ ์กฐ๊ฑด 1 ๋ง์ง 5%๋จน์ผ๋ฉด ํ๊ธฐ
if get_current_price(t_coin) > buy_price*1.02:
sell_all(t_coin)
time.sleep(1)
print("sell")
break
#์ฝ์ธ ํ๊ธฐ ์กฐ๊ฑด2 ๋ง์ง์ด -2%๋ฉด ํ๊ธฐ
elif get_current_price(t_coin) < buy_price*0.99 :
sell_all(t_coin)
time.sleep(1)
print("sell")
break
#์ฝ์ธ ํ๊ธฐ ์กฐ๊ฑด 3 10๋ถ์ด ์ง๋๋ฉด ํ๊ธฐ
elif datetime.datetime.now() > buy_time + datetime.timedelta(minutes=20):
sell_all(t_coin)
time.sleep(1)
print("sell")
break
else:
continue
except Exception as e:
print(e)
time.sleep(1)
``` |
{
"source": "jihoonerd/1985_Auto_Imports_Database",
"score": 3
} |
#### File: 1985_Auto_Imports_Database/code/data_loader.py
```python
import numpy as np
import pandas as pd
import config
features = ['symboling',
'normalized_losses',
'make',
'fuel_type',
'aspiration',
'num_of_doors',
'body_style',
'drive_wheels',
'engine_location',
'wheel_base',
'length',
'width',
'height',
'curb_weight',
'engine_type',
'num_of_cylinders',
'engine_size',
'fuel_system',
'bore',
'stroke',
'compression_ratio',
'horsepower',
'peak_rpm',
'city_mpg',
'highway_mpg',
'price']
def load_data():
"""Returns dataset replaced with null values('?') to np.NaN"""
data = pd.read_csv(config.DATA_DIR+'imports-85.data', names=features)
data = data.replace('?', np.NaN)
return data
def split_X_y(data):
y = pd.to_numeric(data["normalized_losses"])
X = data.drop("normalized_losses", axis=1)
return X, y
def apply_task_condition(data):
"""
Missing values: denoted by quotation marks (โ?โ). Skip data samples with missing values in the target.
Features to ignore: โsymbolingโ
"""
not_null = data[data.normalized_losses.notnull()]
conditioned = not_null.drop("symboling", axis=1)
return conditioned
```
#### File: 1985_Auto_Imports_Database/code/eda.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def display_basic_info(data):
print("Number of Instances: ", data.shape[0])
print("Number of Features : ", data.shape[1])
print(data.dtypes)
return None
def display_descriptive_statistics(data):
print(data.describe())
return None
def plot_target_feature(data):
plt.figure()
sns.distplot(pd.to_numeric(data.normalized_losses), rug=True, kde=True)
plt.title("Normalized Losses")
plt.show()
return None
def plot_makers(data):
plt.figure()
plt.title("Makers")
sns.countplot(x="make", data=data)
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
return None
def plot_fuel_types(data):
plt.figure()
plt.title("Fuel Types")
sns.countplot(x="fuel_type", data=data)
plt.tight_layout()
plt.show()
return None
def plot_aspiration(data):
plt.figure()
plt.title("Aspiration")
sns.countplot(x="aspiration", data=data)
plt.tight_layout()
plt.show()
return None
def plot_num_of_doors(data):
plt.figure()
plt.title("Num of Doors")
sns.countplot(x="num_of_doors", data=data)
plt.tight_layout()
plt.show()
return None
def plot_body_style(data):
plt.figure()
plt.title("Body Style")
sns.countplot(x="body_style", data=data)
plt.tight_layout()
plt.show()
return None
def plot_drive_wheels(data):
plt.figure()
plt.title("Drive Wheels")
sns.countplot(x="drive_wheels", data=data)
plt.tight_layout()
plt.show()
return None
def plot_engine_location(data):
plt.figure()
plt.title("Engine Location")
sns.countplot(x="engine_location", data=data)
plt.tight_layout()
plt.show()
return None
def plot_wheel_base(data):
plt.figure()
sns.distplot(data.wheel_base, rug=True, kde=True)
plt.title("Wheel Base")
plt.show()
return None
def plot_sizes(data):
f, (ax1, ax2, ax3) = plt.subplots(3, 1)
plt.suptitle("Size")
sns.distplot(data.length, rug=True, kde=True, ax=ax1)
sns.distplot(data.width, rug=True, kde=True, color='g', ax=ax2)
sns.distplot(data.height, rug=True, kde=True, color='m', ax=ax3)
plt.tight_layout()
plt.show()
return None
def plot_curb_weight(data):
plt.figure()
sns.distplot(data.curb_weight, rug=True, kde=True)
plt.title("Curb Weight")
plt.show()
return None
def plot_engine_properties(data):
f, ax = plt.subplots(3, 3, figsize=(12,10))
plt.suptitle("Engine Properties")
sns.countplot(x="engine_type", data=data, ax=ax[0, 0])
sns.countplot(x="num_of_cylinders", data=data, ax=ax[0, 1])
sns.distplot(data.engine_size, rug=True, kde=True, ax=ax[0, 2])
sns.countplot(x="fuel_system", data=data, ax=ax[1, 0])
sns.countplot(x="bore", data=data, ax=ax[1, 1])
plt.setp(ax[1, 1].get_xticklabels(), rotation=90, fontsize=8)
sns.countplot(x="stroke", data=data, ax=ax[1, 2])
plt.setp(ax[1, 2].get_xticklabels(), rotation=90, fontsize=8)
sns.distplot(data.compression_ratio, rug=True, kde=True, ax=ax[2, 0])
sns.countplot(x="horsepower", data=data, ax=ax[2, 1])
plt.setp(ax[2, 1].get_xticklabels(), rotation=90, fontsize=8)
sns.countplot(x="peak_rpm", data=data, ax=ax[2, 2])
plt.setp(ax[2, 2].get_xticklabels(), rotation=90, fontsize=8)
plt.tight_layout()
plt.show()
return None
def plot_mpg_properties(data):
f, ax = plt.subplots(2, 1)
plt.suptitle("MPG")
sns.distplot(data.city_mpg, rug=True, kde=True, ax=ax[0])
sns.distplot(data.highway_mpg, rug=True, kde=True, ax=ax[1])
plt.tight_layout()
plt.show()
return None
def plot_price(data):
plt.figure()
sns.distplot(pd.to_numeric(data.price), rug=True, kde=True)
plt.title("Price")
plt.show()
return None
``` |
{
"source": "jihoonerd/deep-reinforcement-learning-with-double-q-learning",
"score": 2
} |
#### File: ddqn/environment/atari_env.py
```python
import gym
import numpy as np
from ddqn.utils.atari_wrappers import make_atari, wrap_deepmind
class Environment:
def __init__(self, env_id, train=True):
clip_rewards = True if train else False
self.env = wrap_deepmind(make_atari(env_id), clip_rewards=clip_rewards, frame_stack=True)
def reset(self):
reset_state = self.env.reset()
return np.array(reset_state)
def render(self):
return self.env.render(mode='rgb_array')
def step(self, action):
next_state, reward, done, info = self.env.step(action)
return np.array(next_state), reward, done, info
def get_action_space_size(self):
return self.env.action_space.n
``` |
{
"source": "jihoonerd/Proximal-Policy-Optimization",
"score": 3
} |
#### File: Proximal-Policy-Optimization/ppo/memory.py
```python
import numpy as np
class Memory:
def __init__(self, batch_size: int):
self.batch_size = batch_size
self.states = []
self.actions = []
self.probs = []
self.values = []
self.rewards = []
self.dones = []
def store_memory(self, state, action, prob, value, reward, done):
self.states.append(state)
self.actions.append(action)
self.probs.append(prob)
self.values.append(value)
self.rewards.append(reward)
self.dones.append(done)
def clear_memory(self):
self.states.clear()
self.actions.clear()
self.probs.clear()
self.values.clear()
self.rewards.clear()
self.dones.clear()
def generate_batch_index(self):
n_states = len(self.states)
# This is for sampling a part of trajectory. (t_0, t_1, ..., t_{k+1})
start_idx = np.arange(0, n_states, self.batch_size)
idxs = np.arange(n_states, dtype=np.int32)
np.random.shuffle(idxs) # To mitigate correlation
batches = np.split(idxs, len(start_idx))
return batches
def get_memory(self):
return np.array(self.states), np.array(self.actions), np.array(self.probs), np.array(self.values), np.array(self.rewards), np.array(self.dones),
``` |
{
"source": "jihoonerd/Robot-Kinematics",
"score": 2
} |
#### File: src/viz/viz_manager.py
```python
import rospy
from geometry_msgs.msg import Point, Pose, Quaternion, Vector3
from interactive_markers.interactive_marker_server import (
InteractiveMarker, InteractiveMarkerServer)
from robot_kinematics.msg import IKMarker
from std_msgs.msg import ColorRGBA, Header
from visualization_msgs.msg import InteractiveMarkerControl, Marker
from viz import FRAME_ID
class VizManager:
def __init__(self):
self.im_server = None
self.ik_target_pub = None
self.init_im_server()
self.init_ik_target_pub()
self.im = dict()
def init_im_server(self):
self.im_server = InteractiveMarkerServer('im_server')
def init_ik_target_pub(self):
self.ik_target_pub = rospy.Publisher('rk_api/ik_target', IKMarker, queue_size=10)
def add_ik_target(self, name: str):
self.im[name] = InteractiveMarker(header=Header(frame_id=FRAME_ID), name=name, scale=0.25)
box_marker = Marker(
header=Header(frame_id=FRAME_ID, stamp=rospy.Time.now()),
type=Marker.CUBE,
action=Marker.ADD,
pose=Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1)),
scale=Vector3(0.1, 0.1, 0.1),
color=ColorRGBA(1, 1, 0, 0.5),
lifetime=rospy.Duration()
)
box_control = InteractiveMarkerControl(always_visible=True)
box_control.markers.append(box_marker)
self.im[name].controls.append(box_control)
move_x_control = InteractiveMarkerControl(
orientation=Quaternion(1, 0, 0, 1)
)
move_x_control.name = 'move_x'
move_x_control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
self.im[name].controls.append(move_x_control)
move_y_control = InteractiveMarkerControl(
orientation=Quaternion(0, 0, 1, 1)
)
move_y_control.name = 'move_y'
move_y_control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
self.im[name].controls.append(move_y_control)
move_z_control = InteractiveMarkerControl(
orientation=Quaternion(0, 1, 0, 1)
)
move_z_control.name = 'move_z'
move_z_control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
self.im[name].controls.append(move_z_control)
self.im_server.insert(self.im[name], self.process_feedback)
self.im_server.applyChanges()
def process_feedback(self, feedback):
link_id = feedback.marker_name
x = feedback.pose.position.x
y = feedback.pose.position.y
z = feedback.pose.position.z
target_pos = Vector3(x, y, z)
self.ik_target_pub.publish(IKMarker(link_id, target_pos))
``` |
{
"source": "jihoonerd/rviz-python-tutorial",
"score": 3
} |
#### File: using_markers/src/basic_shapes.py
```python
import rospy
import numpy as np
from std_msgs.msg import Header, ColorRGBA
from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
from visualization_msgs.msg import Marker
def marker():
pub = rospy.Publisher('visualization_marker', Marker, queue_size=10)
rospy.init_node('basic_shapes', anonymous=True)
rate = rospy.Rate(1) # 1Hz
shape = Marker.CUBE
while not rospy.is_shutdown():
marker = Marker(
header=Header(frame_id="my_frame", stamp=rospy.Time.now()),
ns="basic_shapes",
id=0,
type=shape,
action=Marker.ADD,
pose=Pose(Point(np.random.uniform(0, 3), np.random.uniform(0, 3), np.random.uniform(0, 3)),
Quaternion(0, 0, 0, 1)),
scale=Vector3(np.random.uniform(0.5, 3), np.random.uniform(
0.5, 3), np.random.uniform(0.5, 3)),
color=ColorRGBA(np.random.uniform(0, 1), np.random.uniform(
0, 1), np.random.uniform(0, 1), 1),
lifetime=rospy.Duration()
)
while pub.get_num_connections() < 1:
rospy.logwarn("Please create a subscriber to the marker")
rospy.sleep(1)
pub.publish(marker)
if shape is Marker.CUBE:
shape = Marker.SPHERE
elif shape is Marker.SPHERE:
shape = Marker.ARROW
elif shape is Marker.ARROW:
shape = Marker.CYLINDER
else:
shape = Marker.CUBE
rate.sleep()
if __name__ == '__main__':
try:
marker()
except rospy.ROSInterruptException:
pass
``` |
{
"source": "jihoonkang0829/Discrimination_Prediction",
"score": 3
} |
#### File: Discrimination_Prediction/data/data_organizer.py
```python
import os
import pandas as pd
import csv
from xlrd import XLRDError
def quarter_sum(column):
return sum([int(i) for i in column if type(i) == int or i.isdigit()])
years = []
for entry in os.listdir('.'):
if os.path.isdir(os.path.join('.', entry)):
years.append(entry)
years.sort()
corrupted_files = []
with open('data_format.csv', 'w') as file:
fieldnames = ['Quarter_start_date', 'State', 'Occurrence']
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for year in years:
for entry in os.listdir('.'):
if (entry == year):
reports = []
for state_report in os.listdir(entry):
reports.append(state_report)
reports.sort()
for state in reports:
file_path = year + '/' + state
try:
current_report = pd.read_excel(file_path)
except XLRDError:
corrupted_files.append(file_path + "\n")
continue
first_quarter = (quarter_sum(current_report['Unnamed: 8'].dropna().tolist()))
second_quarter = (quarter_sum(current_report['Unnamed: 9'].dropna().tolist()))
third_quarter = (quarter_sum(current_report['Unnamed: 10'].dropna().tolist()))
fourth_quarter = (quarter_sum(current_report['Unnamed: 11'].dropna().tolist()))
state = state[:len(state) - 4].capitalize()
writer.writerow({'Quarter_start_date': year + '.01.01', 'State': state, 'Occurrence': first_quarter})
writer.writerow({'Quarter_start_date': year + '.04.01', 'State': state, 'Occurrence': second_quarter})
writer.writerow({'Quarter_start_date': year + '.07.01', 'State': state, 'Occurrence': third_quarter})
writer.writerow({'Quarter_start_date': year + '.10.01', 'State': state, 'Occurrence': fourth_quarter})
corrupted_list = open("corrupted_files_list.txt",'r+')
corrupted_list.writelines(corrupted_files)
corrupted_list.close()
```
#### File: Discrimination_Prediction/data/helpers.py
```python
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from constants import *
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import torch
from torch.utils.data import TensorDataset, DataLoader, Dataset
from sklearn.model_selection import train_test_split
from typing import Tuple
from newspaper import Article
# Convert Training Data to PyTorch DataLoader
def get_dataloader(x_data,y_data,batch_size=32):
# Convert to Torch Tensors
x = torch.from_numpy(x_data).float()
# y = torch.from_numpy(y_data).long()
y = torch.from_numpy(y_data).float()
# TensorDataset & Loader
dataset = TensorDataset(x,y)
loader = DataLoader(dataset,batch_size=batch_size,shuffle=True,drop_last=True)
return loader
# Data processing/parsing helpers
def get_state_code(data: str) -> int:
"""
Convert web-scraped state data to state code.
Params
------
data: str
state name (i.e. 'Illinois', 'IL') to be converted.
Returns
-------
int:
converted state code
"""
data_lower = data.lower()
if not data_lower in US_STATE_CODE_DICT:
return -1
return US_STATE_CODE_DICT[data_lower]
def preprocess(data: pd.DataFrame) -> pd.DataFrame:
"""
Converts Quarter_start_date to datetime
Remove underscore from state and converts to lower case state
Params
------
data: pd.dataFrame
dataframe to be preprocessed
Returns
-------
data: pd.dataFrame
processed dataframe
"""
# Remove period
data['Quarter_start_date'] = data['Quarter_start_date'].str.replace('.', '')
# Convert to date_time
data['Quarter_start_date'] = pd.to_datetime(data['Quarter_start_date'], format='%Y%m%d')
# Replace underscore with a space
data['State'] = data['State'].str.replace('_', ' ')
# Conver to lowercase
data['State'] = data['State'].str.lower()
return data
# Model train helpers
def convert_data_to_matrix(data: pd.DataFrame, weight_casualty: bool = False, **kwargs) -> Tuple[np.array, np.array]:
"""
Convert csv data to matrix of shape(D, US_NUM_STATES),
where D = latest date - earliest date + 1 in days.
Params
------
data: pd.DataFrame
input data from csv file
weight_casualty: bool
boolean option to weight matrix by casualty of the event or not. Default is False.
**kwargs: keyword arguments
group_data: bool
boolean option to group the data by certain period of days or not
group_days: int
if group_data, number of days to group the data by
Returns
-------
np.array:
matrix of shape (D, US_NUM_STATES)
np.array:
1D array of shape (D, ). Contains date that each rows of ret_matrix represents.
"""
# data = preprocess(data)
data['Date'] = pd.to_datetime(data['Date'])
data = data.sort_values('Date')
# If data type is pd.Timestamp, convert to datetime.datetime
if isinstance(data.iloc[0, 0], pd.Timestamp):
data['Date'] = data['Date'].dt.to_pydatetime()
start_date, end_date = data.iloc[0, 0], data.iloc[-1, 0]
D = (end_date - start_date).days + 1
days_delta = 1
# If kwargs group_data and group_days are True and valid
if 'group_data' in kwargs.keys() and kwargs['group_data'] and \
'group_days' in kwargs.keys() and kwargs['group_days'] > 0:
days_delta = kwargs['group_days']
D = int(np.ceil(D / days_delta))
ret_matrix = np.zeros((D, US_NUM_STATES), dtype = int)
date_array = np.array([start_date + timedelta(days = i * days_delta) for i in range(D)])
for i in range(len(data)):
idx = (data.iloc[i, 0] - start_date).days // days_delta
state = get_state_code(data.iloc[i, 1])
weight = 1
if weight_casualty:
weight = data.iloc[i, 2] if data.iloc[i, 2] > 0 else 0
ret_matrix[idx, state] += weight
return ret_matrix, date_array
def get_x_y(matrix: np.array, lookback: int = 10) -> Tuple[np.array, np.array]:
'''
Generate features and label sets from data matrix
Params
------
matrix: np.array
Data matrix containing daily report of occurence
lookback: Integer
Number of previous days to consider in deciding output
Returns
-------
(x,y): Tuple(np.array, np.array)
Feature and label set given lookback date
'''
x,y=[],[]
N = len(matrix)
for i in range(N-lookback):
x.append(matrix[i:i+lookback])
y.append(matrix[i+lookback])
return np.array(x),np.array(y)
def split_data(x: np.array, y: np.array, test_size=0.25, random_state=32, batch=32):
range_ = range(len(y))
train_idx,_,_,_ = train_test_split(range_, y,test_size=test_size,random_state=random_state)
train_idx,test_idx,_,_ = train_test_split(train_idx, y[train_idx],test_size=test_size,random_state=random_state)
train_data = get_dataloader(x[train_idx],y[train_idx], batch)
test_data = get_dataloader(x[test_idx],y[test_idx], batch)
return train_data, test_data
# Data collecting helpers
def get_article_location(text: str) -> str:
"""
From the input article text, get most frequent occurence state in state code.
Params
------
text: str
article text to find the state
Returns
-------
str:
state name. Returns None if no state was detected.
"""
text = text.lower()
state_freq_array = np.zeros(US_NUM_STATES, dtype = int)
for i in range(50):
count = text.count(US_STATE_NAMES[i])
state_freq_array[i] = count
if np.max(state_freq_array) == 0:
return None
return US_STATE_NAMES[np.argmax(state_freq_array)]
def get_article_text(url: str) -> str:
"""
From the article url, returns the text of the article.html
Params
------
url: str
article url string to retrieve the text
Returns
-------
str:
article text
"""
article = Article(url)
article.download()
article.parse()
return article.text
def is_valid_article(date : datetime, state : str, date_start : datetime, date_end : datetime) -> bool:
"""
Determines if the metadata retrived from the article is valid.
Params
------
date: datetime.datetime
Published datetime of the article
state: str
detected state of the incident in the article
date_start: datetime.datetime
article search beginning timeframe
date_end: datetime.datetime
article search ending timeframe
Returns
-------
bool:
boolean value determining whether the article is valid or not
"""
return isinstance(state, str) and date >= date_start and date <= date_end
def format_datetime(dt: datetime) -> datetime:
"""
Helper function to format datetime to truncate time.
Params
------
dt: datetime.datetime
datetime object to truncate time
Returns
-------
datetime.datetime:
time-truncated datetime object
"""
return datetime(dt.year, dt.month, dt.day)
# Model train helpers
def get_pred(outs):
return torch.round(outs)
def calc_accuracy(pred, y):
y_sum = np.abs(torch.sum(y))
diff_sum = np.abs(torch.sum(torch.sub(pred, y)))
return 1 if y_sum == 0 else 1 - diff_sum / y_sum
def calc_strict_accuracy(pred, y):
total = y.flatten().size(0)
correct = (pred.flatten() == y.flatten()).sum().item()
return correct / total
if __name__== '__main__':
df = pd.read_csv('./data_format.csv')
ret = convert_data_to_matrix(df, True)
x,y = get_x_y(ret, 40)
train,test = split_data(x,y)
``` |
{
"source": "jihoonog/co_sim_platform",
"score": 3
} |
#### File: opendssdirect3.7/opendssdirect/Isource.py
```python
from __future__ import absolute_import
from ._utils import lib, get_string, get_string_array
from ._utils import codec
def AllNames():
"""(read-only) Array of strings containing names of all ISOURCE elements."""
return get_string_array(lib.ISources_Get_AllNames)
def Amps(*args):
"""Magnitude of the ISOURCE in amps"""
# Getter
if len(args) == 0:
return lib.ISources_Get_Amps()
# Setter
Value, = args
lib.ISources_Set_Amps(Value)
def AngleDeg(*args):
"""Phase angle for ISOURCE, degrees"""
# Getter
if len(args) == 0:
return lib.ISources_Get_AngleDeg()
# Setter
Value, = args
lib.ISources_Set_AngleDeg(Value)
def Count():
"""(read-only) Count: Number of ISOURCE elements."""
return lib.ISources_Get_Count()
def First():
"""(read-only) Set the First ISOURCE to be active; returns Zero if none."""
return lib.ISources_Get_First()
def Frequency(*args):
"""The present frequency of the ISOURCE, Hz"""
# Getter
if len(args) == 0:
return lib.ISources_Get_Frequency()
# Setter
Value, = args
lib.ISources_Set_Frequency(Value)
def Name(*args):
"""
(read) Get name of active ISOURCE
(write) Set Active ISOURCE by name
"""
# Getter
if len(args) == 0:
return get_string(lib.ISources_Get_Name())
# Setter
Value, = args
if type(Value) is not bytes:
Value = Value.encode(codec)
lib.ISources_Set_Name(Value)
def Next():
"""(read-only) Sets the next ISOURCE element to be the active one. Returns Zero if no more."""
return lib.ISources_Get_Next()
_columns = ["Amps", "AngleDeg", "Frequency", "Name"]
__all__ = [
"AllNames",
"Amps",
"AngleDeg",
"Count",
"First",
"Frequency",
"Name",
"Next",
]
```
#### File: opendssdirect3.7/opendssdirect/LoadShape.py
```python
from __future__ import absolute_import
from ._utils import (
lib,
get_string,
get_string_array,
get_float64_array,
prepare_float64_array,
)
from ._utils import codec
def New(Name):
if type(Name) is not bytes:
Name = Name.encode(codec)
return lib.LoadShapes_New(Name)
def Normalize():
lib.LoadShapes_Normalize()
def AllNames():
"""(read-only) Array of strings containing names of all Loadshape objects currently defined."""
return get_string_array(lib.LoadShapes_Get_AllNames)
def Count():
"""(read-only) Number of Loadshape objects currently defined in Loadshape collection"""
return lib.LoadShapes_Get_Count()
def First():
"""(read-only) Set the first loadshape active and return integer index of the loadshape. Returns 0 if none."""
return lib.LoadShapes_Get_First()
def HrInterval(*args):
"""Fixed interval time value, hours."""
# Getter
if len(args) == 0:
return lib.LoadShapes_Get_HrInterval()
# Setter
Value, = args
lib.LoadShapes_Set_HrInterval(Value)
def MinInterval(*args):
"""Fixed Interval time value, in minutes"""
# Getter
if len(args) == 0:
return lib.LoadShapes_Get_MinInterval()
# Setter
Value, = args
lib.LoadShapes_Set_MinInterval(Value)
def Name(*args):
"""
(read) Get the Name of the active Loadshape
(write) Set the active Loadshape by name
"""
# Getter
if len(args) == 0:
return get_string(lib.LoadShapes_Get_Name())
# Setter
Value, = args
if type(Value) is not bytes:
Value = Value.encode(codec)
lib.LoadShapes_Set_Name(Value)
def Next():
"""(read-only) Advance active Loadshape to the next on in the collection. Returns 0 if no more loadshapes."""
return lib.LoadShapes_Get_Next()
def Npts(*args):
"""
(read) Get Number of points in active Loadshape.
(write) Set number of points to allocate for active Loadshape.
"""
# Getter
if len(args) == 0:
return lib.LoadShapes_Get_Npts()
# Setter
Value, = args
lib.LoadShapes_Set_Npts(Value)
def PBase(*args):
# Getter
if len(args) == 0:
return lib.LoadShapes_Get_PBase()
# Setter
Value, = args
lib.LoadShapes_Set_PBase(Value)
def PMult(*args):
"""
(read) Array of Doubles for the P multiplier in the Loadshape.
(write) Array of doubles containing the P array for the Loadshape.
"""
# Getter
if len(args) == 0:
return get_float64_array(lib.LoadShapes_Get_Pmult)
# Setter
Value, = args
Value, ValuePtr, ValueCount = prepare_float64_array(Value)
lib.LoadShapes_Set_Pmult(ValuePtr, ValueCount)
def QBase(*args):
"""Base for normalizing Q curve. If left at zero, the peak value is used."""
# Getter
if len(args) == 0:
return lib.LoadShapes_Get_Qbase()
# Setter
Value, = args
lib.LoadShapes_Set_Qbase(Value)
def QMult(*args):
"""Array of doubles containing the Q multipliers."""
# Getter
if len(args) == 0:
return get_float64_array(lib.LoadShapes_Get_Qmult)
# Setter
Value, = args
Value, ValuePtr, ValueCount = prepare_float64_array(Value)
lib.LoadShapes_Set_Qmult(ValuePtr, ValueCount)
def TimeArray(*args):
"""Time array in hours correscponding to P and Q multipliers when the Interval=0."""
# Getter
if len(args) == 0:
return get_float64_array(lib.LoadShapes_Get_TimeArray)
# Setter
Value, = args
Value, ValuePtr, ValueCount = prepare_float64_array(Value)
lib.LoadShapes_Set_TimeArray(ValuePtr, ValueCount)
def UseActual(*args):
"""T/F flag to let Loads know to use the actual value in the curve rather than use the value as a multiplier."""
# Getter
if len(args) == 0:
return lib.LoadShapes_Get_UseActual() != 0
# Setter
Value, = args
lib.LoadShapes_Set_UseActual(Value)
def SInterval(*args):
# Getter
if len(args) == 0:
return lib.LoadShapes_Get_sInterval()
# Setter
Value, = args
lib.LoadShapes_Set_Sinterval(Value)
_columns = [
"HrInterval",
"MinInterval",
"Name",
"Npts",
"PBase",
"PMult",
"QBase",
"QMult",
"TimeArray",
"UseActual",
"SInterval",
]
__all__ = [
"New",
"Normalize",
"AllNames",
"Count",
"First",
"HrInterval",
"MinInterval",
"Name",
"Next",
"Npts",
"PBase",
"PMult",
"QBase",
"QMult",
"TimeArray",
"UseActual",
"SInterval",
]
```
#### File: opendssdirect3.7/opendssdirect/PDElements.py
```python
from __future__ import absolute_import
from ._utils import lib, get_string
from ._utils import codec
def AccumulatedL():
"""(read-only) accummulated failure rate for this branch on downline"""
return lib.PDElements_Get_AccumulatedL()
def Count():
"""(read-only) Number of PD elements (including disabled elements)"""
return lib.PDElements_Get_Count()
def FaultRate(*args):
"""Get/Set Number of failures per year. For LINE elements: Number of failures per unit length per year. """
# Getter
if len(args) == 0:
return lib.PDElements_Get_FaultRate()
# Setter
Value, = args
lib.PDElements_Set_FaultRate(Value)
def First():
"""(read-only) Set the first enabled PD element to be the active element. Returns 0 if none found."""
return lib.PDElements_Get_First()
def FromTerminal():
"""(read-only) Number of the terminal of active PD element that is on the "from" side. This is set after the meter zone is determined."""
return lib.PDElements_Get_FromTerminal()
def IsShunt():
"""(read-only) Variant boolean indicating of PD element should be treated as a shunt element rather than a series element. Applies to Capacitor and Reactor elements in particular."""
return lib.PDElements_Get_IsShunt() != 0
def Lambda():
"""(read-only) Failure rate for this branch. Faults per year including length of line."""
return lib.PDElements_Get_Lambda()
def Name(*args):
"""Get/Set name of active PD Element. Returns null string if active element is not PDElement type."""
# Getter
if len(args) == 0:
return get_string(lib.PDElements_Get_Name())
# Setter
Value, = args
if type(Value) is not bytes:
Value = Value.encode(codec)
lib.PDElements_Set_Name(Value)
def Next():
"""(read-only) Advance to the next PD element in the circuit. Enabled elements only. Returns 0 when no more elements."""
return lib.PDElements_Get_Next()
def NumCustomers():
"""(read-only) Number of customers, this branch"""
return lib.PDElements_Get_Numcustomers()
def ParentPDElement():
"""(read-only) Sets the parent PD element to be the active circuit element. Returns 0 if no more elements upline."""
return lib.PDElements_Get_ParentPDElement()
def RepairTime(*args):
"""Average repair time for this element in hours"""
# Getter
if len(args) == 0:
return lib.PDElements_Get_RepairTime()
# Setter
Value, = args
lib.PDElements_Set_RepairTime(Value)
def SectionID():
"""(read-only) Integer ID of the feeder section that this PDElement branch is part of"""
return lib.PDElements_Get_SectionID()
def TotalMiles():
"""(read-only) Total miles of line from this element to the end of the zone. For recloser siting algorithm."""
return lib.PDElements_Get_TotalMiles()
def TotalCustomers():
"""(read-only) Total number of customers from this branch to the end of the zone"""
return lib.PDElements_Get_Totalcustomers()
def PctPermanent(*args):
"""Get/Set percent of faults that are permanent (require repair). Otherwise, fault is assumed to be transient/temporary."""
# Getter
if len(args) == 0:
return lib.PDElements_Get_pctPermanent()
# Setter
Value, = args
lib.PDElements_Set_pctPermanent(Value)
_columns = [
"AccumulatedL",
"FaultRate",
"FromTerminal",
"IsShunt",
"Lambda",
"Name",
"NumCustomers",
"ParentPDElement",
"RepairTime",
"SectionID",
"TotalMiles",
"TotalCustomers",
"PctPermanent",
]
__all__ = [
"AccumulatedL",
"Count",
"FaultRate",
"First",
"FromTerminal",
"IsShunt",
"Lambda",
"Name",
"Next",
"NumCustomers",
"ParentPDElement",
"RepairTime",
"SectionID",
"TotalMiles",
"TotalCustomers",
"PctPermanent",
]
```
#### File: opendssdirect3.7/opendssdirect/Text.py
```python
from __future__ import absolute_import
from ._utils import lib, get_string, CheckForError
from ._utils import codec
def Command(*args):
"""Input command string for the DSS."""
# Getter
if len(args) == 0:
return get_string(lib.Text_Get_Command())
# Setter
Value, = args
if type(Value) is not bytes:
Value = Value.encode(codec)
lib.Text_Set_Command(Value)
CheckForError()
def Result():
"""(read-only) Result string for the last command."""
return get_string(lib.Text_Get_Result())
_columns = ["Command", "Result"]
__all__ = ["Command", "Result"]
```
#### File: opendssdirect3.7/opendssdirect/Vsources.py
```python
from __future__ import absolute_import
from ._utils import lib, get_string, get_string_array
from ._utils import codec
def AllNames():
"""(read-only) Names of all Vsource objects in the circuit"""
return get_string_array(lib.Vsources_Get_AllNames)
def AngleDeg(*args):
"""
(read) Phase angle of first phase in degrees
(write) phase angle in degrees
"""
# Getter
if len(args) == 0:
return lib.Vsources_Get_AngleDeg()
# Setter
Value, = args
lib.Vsources_Set_AngleDeg(Value)
def BasekV(*args):
"""Source voltage in kV"""
# Getter
if len(args) == 0:
return lib.Vsources_Get_BasekV()
# Setter
Value, = args
lib.Vsources_Set_BasekV(Value)
def Count():
"""(read-only) Number of Vsource Object"""
return lib.Vsources_Get_Count()
def First():
"""(read-only) Sets the first VSOURCE to be active; Returns 0 if none"""
return lib.Vsources_Get_First()
def Frequency(*args):
"""Source frequency in Hz"""
# Getter
if len(args) == 0:
return lib.Vsources_Get_Frequency()
# Setter
Value, = args
lib.Vsources_Set_Frequency(Value)
def Name(*args):
"""
(read) Get Active VSOURCE name
(write) Set Active VSOURCE by Name
"""
# Getter
if len(args) == 0:
return get_string(lib.Vsources_Get_Name())
# Setter
Value, = args
if type(Value) is not bytes:
Value = Value.encode(codec)
lib.Vsources_Set_Name(Value)
def Next():
"""(read-only) Sets the next VSOURCE object to be active; returns zero if no more"""
return lib.Vsources_Get_Next()
def Phases(*args):
"""Number of phases"""
# Getter
if len(args) == 0:
return lib.Vsources_Get_Phases()
# Setter
Value, = args
lib.Vsources_Set_Phases(Value)
def PU(*args):
"""
(read) Source pu voltage.
(write) Per-unit value of source voltage based on kV
"""
# Getter
if len(args) == 0:
return lib.Vsources_Get_pu()
# Setter
Value, = args
lib.Vsources_Set_pu(Value)
_columns = ["AngleDeg", "BasekV", "Frequency", "Name", "Phases", "PU"]
__all__ = [
"AllNames",
"AngleDeg",
"BasekV",
"Count",
"First",
"Frequency",
"Name",
"Next",
"Phases",
"PU",
]
```
#### File: opendssdirect3.7/opendssdirect/XYCurves.py
```python
from __future__ import absolute_import
from ._utils import lib, get_string, get_float64_array, prepare_float64_array
from ._utils import codec
def Count():
"""(read-only) Number of XYCurve Objects"""
return lib.XYCurves_Get_Count()
def First():
"""(read-only) Sets first XYcurve object active; returns 0 if none."""
return lib.XYCurves_Get_First()
def Name(*args):
"""
(read) Name of active XYCurve Object
(write) Get Name of active XYCurve Object
"""
# Getter
if len(args) == 0:
return get_string(lib.XYCurves_Get_Name())
# Setter
Value, = args
if type(Value) is not bytes:
Value = Value.encode(codec)
lib.XYCurves_Set_Name(Value)
def Next():
"""(read-only) Advances to next XYCurve object; returns 0 if no more objects of this class"""
return lib.XYCurves_Get_Next()
def Npts(*args):
"""Get/Set Number of points in X-Y curve"""
# Getter
if len(args) == 0:
return lib.XYCurves_Get_Npts()
# Setter
Value, = args
lib.XYCurves_Set_Npts(Value)
def XArray(*args):
"""Get/Set X values as a Array of doubles. Set Npts to max number expected if setting"""
# Getter
if len(args) == 0:
return get_float64_array(lib.XYCurves_Get_Xarray)
# Setter
Value, = args
Value, ValuePtr, ValueCount = prepare_float64_array(Value)
lib.XYCurves_Set_Xarray(ValuePtr, ValueCount)
def XScale(*args):
"""Factor to scale X values from original curve"""
# Getter
if len(args) == 0:
return lib.XYCurves_Get_Xscale()
# Setter
Value, = args
lib.XYCurves_Set_Xscale(Value)
def XShift(*args):
"""Amount to shift X value from original curve"""
# Getter
if len(args) == 0:
return lib.XYCurves_Get_Xshift()
# Setter
Value, = args
lib.XYCurves_Set_Xshift(Value)
def YArray(*args):
"""Get/Set Y values in curve; Set Npts to max number expected if setting"""
# Getter
if len(args) == 0:
return get_float64_array(lib.XYCurves_Get_Yarray)
# Setter
Value, = args
Value, ValuePtr, ValueCount = prepare_float64_array(Value)
lib.XYCurves_Set_Yarray(ValuePtr, ValueCount)
def YScale(*args):
"""
(read) Factor to scale Y values from original curve
(write) Amount to scale Y values from original curve. Represents a curve shift.
"""
# Getter
if len(args) == 0:
return lib.XYCurves_Get_Yscale()
# Setter
Value, = args
lib.XYCurves_Set_Yscale(Value)
def YShift(*args):
"""amount to shift Y valiue from original curve"""
# Getter
if len(args) == 0:
return lib.XYCurves_Get_Yshift()
# Setter
Value, = args
lib.XYCurves_Set_Yshift(Value)
def X(*args):
"""Set X value or get interpolated value after setting Y"""
# Getter
if len(args) == 0:
return lib.XYCurves_Get_x()
# Setter
Value, = args
lib.XYCurves_Set_x(Value)
def Y(*args):
"""
(read) Y value for present X or set this value then get corresponding X
(write) Set Y value or get interpolated Y value after setting X
"""
# Getter
if len(args) == 0:
return lib.XYCurves_Get_y()
# Setter
Value, = args
lib.XYCurves_Set_y(Value)
_columns = [
"Name",
"Npts",
"XArray",
"XScale",
"XShift",
"YArray",
"YScale",
"YShift",
"X",
"Y",
]
__all__ = [
"Count",
"First",
"Name",
"Next",
"Npts",
"XArray",
"XScale",
"XShift",
"YArray",
"YScale",
"YShift",
"X",
"Y",
]
```
#### File: TapControl/tapcontrol/simulator_demo.py
```python
import os
import sys
import csv
import mosaik
import argparse
from datetime import datetime
from pathlib import Path
#--- Base Directory
BASE_DIR = os.getcwd()
BASE_DIR = str((Path(BASE_DIR)).parent) + "/"
#--- OpenDSS warp scripts directory
DSS_EXE_PATH = BASE_DIR + 'TapControl/tapcontrol/'
#--- Path relative to OpenDSS scripts directory
TOPO_RPATH_FILE = 'data/IEEE13Nodeckt.dss'
NWL_RPATH_FILE = 'data/IEEE13Nodeckt_NodeWithLoad.csv'
ILPQ_RPATH_FILE = 'data/IEEE13Nodeckt_InelasticLoadPQ.csv'
ACTS_RPATH_FILE = 'data/IEEE13Nodeckt_Actives_Tap.csv'
#--- NS3 executables and library directory
NS3_EXE_PATH = BASE_DIR + 'NS3Mosaik'
NS3_LIB_PATH = BASE_DIR + 'ns-allinone-3.33/ns-3.33/build/lib'
#--- Paths relative to NS3 exec program directory
ADJMAT_RPATH_FILE = DSS_EXE_PATH + 'data/IEEE13Node-adjacency_matrix.txt'
COORDS_RPATH_FILE = DSS_EXE_PATH + 'data/IEEE13Node_BusXY.csv'
APPCON_RPATH_FILE = DSS_EXE_PATH + 'data/IEEE13Node_AppConnections_Tap.csv'
#--- Application config path
APPCON_FILE = DSS_EXE_PATH + 'data/IEEE13Node_AppConnections_Tap.csv'
ACTS_FILE = DSS_EXE_PATH + ACTS_RPATH_FILE
#--- Simulators configuration
SIM_CONFIG = {
'Collector': {
'python': 'simulator_collector:Collector',
},
'ControlSim': {
'python': 'simulator_controltap:ControlSim',
},
'PFlowSim': {
'python': 'simulator_pflow:PFlowSim',
},
'PktNetSim': {
'cmd': NS3_EXE_PATH + '/NS3MosaikSim %(addr)s',
'cwd': Path( os.path.abspath( os.path.dirname( NS3_EXE_PATH ) ) ),
'env': {
'LD_LIBRARY_PATH': NS3_LIB_PATH,
'NS_LOG': "SmartgridNs3Main=all",
}
},
}
#--- Simulation total time
END_TIME = 10000 # 10 secs
#--- Application connection links
appconLinks = {}
#--- Sensors and actuators parameters
global_step_size = 100
actParams = {}
#--- Mosaik Configuration
MOSAIK_CONFIG = {
'execution_graph': False,
'sim_progress': None,
'start_timeout': 600, # seconds
'stop_timeout' : 10, # seconds
}
#--- Load application connections
def readAppConnections(appcon_file):
global appconLinks
current_directory = os.path.dirname(os.path.realpath(__file__))
pathToFile = os.path.abspath(
os.path.join(current_directory, appcon_file)
)
if not os.path.isfile(pathToFile):
print('File LoadsPerNode does not exist: ' + pathToFile)
sys.exit()
else:
with open(pathToFile, 'r') as csvFile:
csvobj = csv.reader(csvFile)
appconLinks = {(rows[0], rows[1], rows[2]) for rows in csvobj}
csvFile.close()
def main():
#--- Process input arguments
parser = argparse.ArgumentParser(description='Run Smartgrid simulation')
parser.add_argument( '--appcon_file', type=str, help='application connections file', default = APPCON_FILE )
parser.add_argument( '--random_seed', type=int, help='ns-3 random generator seed', default=1 )
args = parser.parse_args()
print( 'Starting simulation with args: {0}'.format( vars( args ) ) )
readAppConnections(args.appcon_file)
#readActives(ACTS_FILE) -- not necessary in the moment
world = mosaik.World( sim_config=SIM_CONFIG, mosaik_config=MOSAIK_CONFIG, debug=False )
create_scenario( world, args )
world.run( until=END_TIME )
def create_scenario( world, args ):
#---
#--- Simulators configuration
#---
pflowsim = world.start('PFlowSim',
topofile = DSS_EXE_PATH + TOPO_RPATH_FILE,
nwlfile = DSS_EXE_PATH + NWL_RPATH_FILE,
ilpqfile = DSS_EXE_PATH + ILPQ_RPATH_FILE,
actsfile = DSS_EXE_PATH + ACTS_RPATH_FILE,
step_size = global_step_size,
loadgen_interval = 80,
verbose = 0)
pktnetsim = world.start( 'PktNetSim',
model_name = 'TransporterModel',
eid_prefix = 'Transp_',
adjmat_file = ADJMAT_RPATH_FILE,
coords_file = COORDS_RPATH_FILE,
appcon_file = APPCON_RPATH_FILE,
linkRate = "512Kbps",
linkDelay = "15ms",
linkErrorRate = "0.0001",
start_time = 0,
stop_time = END_TIME,
random_seed = args.random_seed,
verbose = 0,
tcpOrUdp = "tcp", # transport layer protocols: tcp/udp (udp only for single client)
network = "P2P" # network architecture: P2P/CSMA/P2Pv6/CSMAv6 (supported architectures)
)
controlsim = world.start('ControlSim',
eid_prefix='Control_',
control_delay = 1,
verbose = 0)
collector = world.start('Collector',
eid_prefix='Collector_',
verbose = 0,
out_list = False,
h5_save = True,
h5_panelname = 'Collector',
h5_storename='CollectorStore.hd5')
#---
#--- Simulators Instances configuration
#---
#--- Sensor instances
sensors = []
for client, server, role in appconLinks:
if (role == 'sensing'):
created_sensor = False
for sensor in sensors:
sensor_instance = 'Sensor_' + str(client)
if (sensor_instance == sensor.eid):
created_sensor = True
if not created_sensor:
sensors.append(pflowsim.Sensor(idt = client, step_size = global_step_size, verbose = 0))
#--- Controller instances for tap control
controllers = []
for client, server, role in appconLinks:
if (role == 'acting'):
created_control = False
for controller in controllers:
controller_instance = 'Control_' + str(client)
if (controller_instance == controller.eid):
created_control = True
if not created_control:
controllers.append(controlsim.RangeControl(idt=client, vset=2178, bw=13.6125, tdelay=60))
#--- Transporter instances (Pktnet)
transporters = []
for client, server, role in appconLinks:
created_transporter = False
for transporter in transporters:
transporter_instance = 'Transp_' + str(client) + '-' + str(server)
if (transporter_instance == transporter.eid):
created_transporter = True
if not created_transporter:
transporters.append(pktnetsim.Transporter(src=client, dst=server))
#--- Actuator instances
actuators = []
for client, server, role in appconLinks:
if (role == 'acting'):
created_actuator = False
for actuator in actuators:
actuator_instance = 'Actuator_' + str(server)
if (actuator_instance == actuator.eid):
created_actuator = True
if not created_actuator:
actuators.append(pflowsim.Actuator(idt=server, step_size=global_step_size, verbose=0))
#--- Monitor instances
monitor = collector.Monitor()
#--- Prober instance
probers = []
probers.append(pflowsim.Prober(idt = "611-V3", step_size = global_step_size, verbose = 0))
probers.append(pflowsim.Prober(idt = "650-T3", step_size = global_step_size, verbose = 0))
probers.append(pflowsim.Prober(idt = "611-Load", step_size = global_step_size, verbose = 0))
probers.append(pflowsim.Prober(idt = "650-VPu3", step_size = global_step_size, verbose = 0))
#---
#--- Simulators interconnections
#---
#--- Sensor to PktNet(Transporter)
for client, server, role in appconLinks:
if (role == 'sensing'):
sensor_instance = 'Sensor_' + str(client)
transporter_instance = 'Transp_' + str(client) + '-' + str(server)
for sensor in sensors:
if (sensor_instance == sensor.eid):
for transporter in transporters:
if (transporter_instance == transporter.eid):
world.connect(sensor, transporter, 'v', 't',
weak=True, initial_data={'v': None, 't': None})
print('Connect', sensor.eid, 'to', transporter.eid)
#--- PktNet(Transporter) to Controller
for client, server, role in appconLinks:
if (role == 'sensing'):
controller_instance = 'Control_' + str(server)
transporter_instance = 'Transp_' + str(client) + '-' + str(server)
for controller in controllers:
if (controller_instance == controller.eid):
for transporter in transporters:
if (transporter_instance == transporter.eid):
world.connect(transporter, controller, 'v', 't')
print('Connect', transporter.eid, 'to', controller.eid)
#--- Sensor to Controller
# for client, server, role in appconLinks:
# if (role == 'sensing'):
# for sensor in sensors:
# sensor_instance = 'Sensor_' + str(client)
# if (sensor_instance == sensor.eid):
# for controller in controllers:
# controller_instance = 'Control_' + str(server)
# if (controller_instance == controller.eid):
# world.connect(sensor, controller, 'v', 't')
# print('Connect', sensor.eid, 'to', controller.eid)
#--- Controller to PktNet
for client, server, role in appconLinks:
if (role == 'acting'):
controller_instance = 'Control_' + str(client)
transporter_instance = 'Transp_' + str(client) + '-' + str(server)
for controller in controllers:
if (controller_instance == controller.eid):
for transporter in transporters:
if (transporter_instance == transporter.eid):
world.connect(controller, transporter, 'v', 't',
weak=True, initial_data={'v': None, 't': None})
print('Connect', controller.eid, 'to', transporter.eid)
#--- PktNet(Transporter) to Actuator
for client, server, role in appconLinks:
if (role == 'acting'):
actuator_instance = 'Actuator_' + str(server)
transporter_instance = 'Transp_' + str(client) + '-' + str(server)
for actuator in actuators:
if (actuator_instance == actuator.eid):
for transporter in transporters:
if (transporter_instance == transporter.eid):
world.connect(transporter, actuator, 'v', 't')
print('Connect', transporter.eid, 'to', actuator.eid)
#--- Controller to Actuator
# for client, server, role in appconLinks:
# if (role == 'acting'):
# for controller in controllers:
# controller_instance = 'Control_' + str(client)
# if (controller_instance == controller.eid):
# for actuator in actuators:
# actuator_instance = 'Actuator_' + str(server)
# if (actuator_instance == actuator.eid):
# print('Connect', controller.eid, 'to', actuator.eid)
# world.connect(controller, actuator, 'v', 't',
# time_shifted=True, initial_data={'v': None, 't': None})
#---
#--- Simulators to Monitor
#---
#--- Sensor to Monitor
mosaik.util.connect_many_to_one(world, sensors, monitor, 'v', 't')
for sensor in sensors:
print('Connect', sensor.eid, 'to', monitor.sid)
#--- PktNet(Transporter) to Monitor
# mosaik.util.connect_many_to_one(world, transporters, monitor, 'v', 't')
# for transporter in transporters:
# print('Connect', transporter.eid, 'to', monitor.sid)
#--- Controller to Monitor
mosaik.util.connect_many_to_one(world, controllers, monitor, 'v', 't')
for controller in controllers:
print('Connect', controller.eid, 'to', monitor.sid)
#--- Actuator to Monitor
mosaik.util.connect_many_to_one(world, actuators, monitor, 'v', 't')
for actuator in actuators:
print('Connect', actuator.eid, 'to', monitor.sid)
#--- Prober to Monitor
mosaik.util.connect_many_to_one(world, probers, monitor, 'v', 't')
for prober in probers:
print('Connect', prober.eid, 'to', monitor.sid)
if __name__ == '__main__':
sim_start_time = datetime.now()
# Run the simulation.
main()
delta_sim_time = datetime.now() - sim_start_time
print( 'simulation took {} seconds'.format( delta_sim_time.total_seconds() ) )
```
#### File: TapControl/tapcontrol/simulator_scope.py
```python
import pandas as pd
import sys
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
#--- select dataset file
if len(sys.argv) > 1:
storename = sys.argv[1]
else:
storename = 'CollectorStore.hd5'
#--- Load data
store = pd.HDFStore(storename)
df = store['Collector']
store.close()
#--- select sets
df_sets = []
df_names = []
max_t = 0
min_t = 100000000
for col in df:
df_sets.append(df[col])
df_names.append(col)
if (max_t < max(df[col]['t'])): max_t = max(df[col]['t'])
if (min_t > min(df[col]['t'])): min_t = min(df[col]['t'])
fig, axs = plt.subplots(len(df_sets)+1)
plt.tight_layout()
for i in range(len(df_sets)):
axs[i].plot(df_sets[i]['t'], df_sets[i]['v'], '.')
axs[i].set_xlim(-5, max_t+5)
axs[i].set_title(df_names[i], loc='center')
axs[i].grid(b=True, which='both', axis='both')
axs[len(df_sets)].axis('off')
axcolor = 'lightgoldenrodyellow'
axUpper = plt.axes([0.15, 0.10, 0.65, 0.03], facecolor=axcolor)
axLower = plt.axes([0.15, 0.15, 0.65, 0.03], facecolor=axcolor)
sUpper = Slider(axUpper, 'Upper', min_t+1, max_t, valinit = max_t - 10, valstep=1)
sLower = Slider(axLower, 'Lower', min_t, max_t-1, valinit = min_t + 10, valstep=1)
def update(val):
top = sUpper.val
bottom = sLower.val
if (top < bottom):
top = max_t
bottom = min_t
for i in range(len(df_sets)):
axs[i].set_xlim(bottom-5, top+5)
fig.canvas.draw_idle()
sUpper.on_changed(update)
sLower.on_changed(update)
plt.show()
``` |
{
"source": "jihoon-seo/thrift",
"score": 2
} |
#### File: test/py/TestFrozen.py
```python
from DebugProtoTest import Srv
from DebugProtoTest.ttypes import CompactProtoTestStruct, Empty, Wrapper
from DebugProtoTest.ttypes import ExceptionWithAMap, MutableException
from thrift.Thrift import TFrozenDict
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TCompactProtocol
import collections
import unittest
class TestFrozenBase(unittest.TestCase):
def _roundtrip(self, src, dst):
otrans = TTransport.TMemoryBuffer()
optoro = self.protocol(otrans)
src.write(optoro)
itrans = TTransport.TMemoryBuffer(otrans.getvalue())
iproto = self.protocol(itrans)
return dst.read(iproto) or dst
def test_dict_is_hashable_only_after_frozen(self):
d0 = {}
self.assertFalse(isinstance(d0, collections.Hashable))
d1 = TFrozenDict(d0)
self.assertTrue(isinstance(d1, collections.Hashable))
def test_struct_with_collection_fields(self):
pass
def test_set(self):
"""Test that annotated set field can be serialized and deserialized"""
x = CompactProtoTestStruct(set_byte_map={
frozenset([42, 100, -100]): 99,
frozenset([0]): 100,
frozenset([]): 0,
})
x2 = self._roundtrip(x, CompactProtoTestStruct())
self.assertEqual(x2.set_byte_map[frozenset([42, 100, -100])], 99)
self.assertEqual(x2.set_byte_map[frozenset([0])], 100)
self.assertEqual(x2.set_byte_map[frozenset([])], 0)
def test_map(self):
"""Test that annotated map field can be serialized and deserialized"""
x = CompactProtoTestStruct(map_byte_map={
TFrozenDict({42: 42, 100: -100}): 99,
TFrozenDict({0: 0}): 100,
TFrozenDict({}): 0,
})
x2 = self._roundtrip(x, CompactProtoTestStruct())
self.assertEqual(x2.map_byte_map[TFrozenDict({42: 42, 100: -100})], 99)
self.assertEqual(x2.map_byte_map[TFrozenDict({0: 0})], 100)
self.assertEqual(x2.map_byte_map[TFrozenDict({})], 0)
def test_list(self):
"""Test that annotated list field can be serialized and deserialized"""
x = CompactProtoTestStruct(list_byte_map={
(42, 100, -100): 99,
(0,): 100,
(): 0,
})
x2 = self._roundtrip(x, CompactProtoTestStruct())
self.assertEqual(x2.list_byte_map[(42, 100, -100)], 99)
self.assertEqual(x2.list_byte_map[(0,)], 100)
self.assertEqual(x2.list_byte_map[()], 0)
def test_empty_struct(self):
"""Test that annotated empty struct can be serialized and deserialized"""
x = CompactProtoTestStruct(empty_struct_field=Empty())
x2 = self._roundtrip(x, CompactProtoTestStruct())
self.assertEqual(x2.empty_struct_field, Empty())
def test_struct(self):
"""Test that annotated struct can be serialized and deserialized"""
x = Wrapper(foo=Empty())
self.assertEqual(x.foo, Empty())
x2 = self._roundtrip(x, Wrapper)
self.assertEqual(x2.foo, Empty())
def test_frozen_exception(self):
exc = ExceptionWithAMap(blah='foo')
with self.assertRaises(TypeError):
exc.blah = 'bar'
mutexc = MutableException(msg='foo')
mutexc.msg = 'bar'
self.assertEqual(mutexc.msg, 'bar')
def test_frozen_exception_serialization(self):
result = Srv.declaredExceptionMethod_result(
xwamap=ExceptionWithAMap(blah="error"))
deserialized = self._roundtrip(
result, Srv.declaredExceptionMethod_result())
self.assertEqual(result, deserialized)
class TestFrozen(TestFrozenBase):
def protocol(self, trans):
return TBinaryProtocol.TBinaryProtocolFactory().getProtocol(trans)
class TestFrozenAcceleratedBinary(TestFrozenBase):
def protocol(self, trans):
return TBinaryProtocol.TBinaryProtocolAcceleratedFactory(fallback=False).getProtocol(trans)
class TestFrozenAcceleratedCompact(TestFrozenBase):
def protocol(self, trans):
return TCompactProtocol.TCompactProtocolAcceleratedFactory(fallback=False).getProtocol(trans)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestFrozen))
suite.addTest(loader.loadTestsFromTestCase(TestFrozenAcceleratedBinary))
suite.addTest(loader.loadTestsFromTestCase(TestFrozenAcceleratedCompact))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
``` |
{
"source": "jihoonson/druid",
"score": 2
} |
#### File: docs/_bin/generate-license.py
```python
import yaml
import json
import os
import sys
from html.parser import HTMLParser
class DependencyReportParser(HTMLParser):
# This class parses the given html file to find all dependency reports under "Project dependencies"
# and "Projection transparent dependencies" sections.
# The parser works based on the state machine and its state is updated whenever it reads a new tag.
# The state changes as below:
#
# none -> h2_start -> project_dependencies_start -> h3_start -> compile_start -> table_start -> row_start -> th_start / td_start -> th_end / td_end -> row_end -> table_end -> compile_end -> h3_end -> project_dependencies_end -> h2_end -> none
attr_index = 0
group_id = None
artifact_id = None
version = None
classifier = None
dep_type = None
license = None
state = "none"
dep_to_license = None
compatible_license_names = None
include_classifier = False
druid_module_name = None
def __init__(self, druid_module_name, compatible_license_names):
HTMLParser.__init__(self)
self.state = "none"
self.druid_module_name = druid_module_name
self.compatible_license_names = compatible_license_names
def parse(self, f):
self.dep_to_license = {}
self.feed(f.read())
return self.dep_to_license
def handle_starttag(self, tag, attrs):
# print("current: {}, start tag: {}, attrs:{} ".format(self.state, tag, attrs))
if self.state == "none":
if tag == "h2":
self.state = "h2_start"
if self.state == "h2_start":
if tag == "a":
for attr in attrs:
if attr[0] == "name" and (attr[1] == "Project_Dependencies" or attr[1] == "Project_Transitive_Dependencies"):
self.state = "project_dependencies_start"
self.include_classifier = False
if self.state == "h2_end":
if tag == "h3":
self.state = "h3_start"
if self.state == "h3_start":
if tag == "a":
for attr in attrs:
if attr[0] == "name" and attr[1] == "compile":
self.state = "compile_start"
if self.state == "h3_end":
if tag == "table":
self.state = "table_start"
if self.state == "table_start":
if tag == "tr":
self.state = "row_start"
self.clear_attr()
if self.state == "row_end":
if tag == "tr":
self.state = "row_start"
self.clear_attr()
if self.state == "row_start":
if tag == "td":
self.state = "td_start"
elif tag == "th":
self.state = "th_start"
if self.state == "th_end":
if tag == "th":
self.state = "th_start"
if self.state == "td_end":
if tag == "td":
self.state = "td_start"
def handle_endtag(self, tag):
# print("current: {}, end tag: {}".format(self.state, tag))
if self.state == "project_dependencies_start":
if tag == "a":
self.state = "project_dependencies_end"
if self.state == "h2_start":
if tag == "h2":
self.state = "h2_end"
if self.state == "project_dependencies_end":
if tag == "h2":
self.state = "h2_end"
if self.state == "compile_start":
if tag == "a":
self.state = "compile_end"
if self.state == "compile_end":
if tag == "h3":
self.state = "h3_end"
if self.state == "table_start":
if tag == "table":
self.state = "none"
if self.state == "td_start":
if tag == "td":
self.state = "td_end"
self.attr_index = self.attr_index + 1
if self.state == "th_start":
if tag == "th":
self.state = "th_end"
if self.state == "row_start":
if tag == "tr":
self.state = "row_end"
if self.state == "th_end":
if tag == "tr":
self.state = "row_end"
if self.state == "td_end":
if tag == "tr":
self.state = "row_end"
# print(json.dumps({"groupId": self.group_id, "artifactId": self.artifact_id, "version": self.version, "classifier": self.classifier, "type": self.dep_type, "license": self.license}))
if self.group_id.find("org.apache.druid") < 0:
self.dep_to_license[get_dep_key(self.group_id, self.artifact_id, self.version)] = (self.license, self.druid_module_name)
if self.state == "row_end":
if tag == "table":
self.state = "none"
def handle_data(self, data):
if self.state == "td_start":
self.set_attr(data)
elif self.state == "th_start":
if data.lower() == "classifier":
self.include_classifier = True
def clear_attr(self):
self.group_id = None
self.artifact_id = None
self.version = None
self.classifier = None
self.dep_type = None
self.license = None
self.attr_index = 0
def set_attr(self, data):
#print("set data: {}".format(data))
if self.attr_index == 0:
self.group_id = data
elif self.attr_index == 1:
self.artifact_id = data
elif self.attr_index == 2:
self.version = get_version_string(data)
elif self.attr_index == 3:
if self.include_classifier:
self.classifier = data
else:
self.dep_type = data
elif self.attr_index == 4:
if self.include_classifier:
self.dep_type = data
else:
self.set_license(data)
elif self.attr_index == 5:
if self.include_classifier:
self.set_license(data)
else:
raise Exception("Unknown attr_index [{}]".format(self.attr_index))
else:
raise Exception("Unknown attr_index [{}]".format(self.attr_index))
def set_license(self, data):
if data.upper().find("GPL") < 0:
if self.license != 'Apache License version 2.0':
self.license = self.compatible_license_names[data]
outfile = None
def get_dep_key(group_id, artifact_id, version):
return (group_id, artifact_id, version)
def build_compatible_license_names():
compatible_licenses = {}
compatible_licenses['Apache License, Version 2.0'] = 'Apache License version 2.0'
compatible_licenses['The Apache Software License, Version 2.0'] = 'Apache License version 2.0'
compatible_licenses['Apache 2.0'] = 'Apache License version 2.0'
compatible_licenses['Apache 2'] = 'Apache License version 2.0'
compatible_licenses['Apache License 2.0'] = 'Apache License version 2.0'
compatible_licenses['Apache Software License - Version 2.0'] = 'Apache License version 2.0'
compatible_licenses['The Apache License, Version 2.0'] = 'Apache License version 2.0'
compatible_licenses['Apache License version 2.0'] = 'Apache License version 2.0'
compatible_licenses['Apache License Version 2.0'] = 'Apache License version 2.0'
compatible_licenses['Apache License Version 2'] = 'Apache License version 2.0'
compatible_licenses['Apache License v2.0'] = 'Apache License version 2.0'
compatible_licenses['Apache License, version 2.0'] = 'Apache License version 2.0'
compatible_licenses['Public Domain'] = 'Public Domain'
compatible_licenses['BSD-2-Clause License'] = 'BSD-2-Clause License'
compatible_licenses['BSD-3-Clause License'] = 'BSD-3-Clause License'
compatible_licenses['New BSD license'] = 'BSD-3-Clause License'
compatible_licenses['BSD'] = 'BSD-3-Clause License'
compatible_licenses['The BSD License'] = 'BSD-3-Clause License'
compatible_licenses['BSD licence'] = 'BSD-3-Clause License'
compatible_licenses['BSD License'] = 'BSD-3-Clause License'
compatible_licenses['BSD-like'] = 'BSD-3-Clause License'
compatible_licenses['The BSD 3-Clause License'] = 'BSD-3-Clause License'
compatible_licenses['Revised BSD'] = 'BSD-3-Clause License'
compatible_licenses['New BSD License'] = 'BSD-3-Clause License'
compatible_licenses['ICU License'] = 'ICU License'
compatible_licenses['SIL Open Font License 1.1'] = 'SIL Open Font License 1.1'
compatible_licenses['CDDL 1.1'] = 'CDDL 1.1'
compatible_licenses['CDDL/GPLv2+CE'] = 'CDDL 1.1'
compatible_licenses['CDDL + GPLv2 with classpath exception'] = 'CDDL 1.1'
compatible_licenses['CDDL License'] = 'CDDL 1.1'
compatible_licenses['Eclipse Public License 1.0'] = 'Eclipse Public License 1.0'
compatible_licenses['The Eclipse Public License, Version 1.0'] = 'Eclipse Public License 1.0'
compatible_licenses['Eclipse Public License - Version 1.0'] = 'Eclipse Public License 1.0'
compatible_licenses['Eclipse Public License, Version 1.0'] = 'Eclipse Public License 1.0'
compatible_licenses['Mozilla Public License Version 2.0'] = 'Mozilla Public License Version 2.0'
compatible_licenses['Mozilla Public License, Version 2.0'] = 'Mozilla Public License Version 2.0'
compatible_licenses['Creative Commons Attribution 2.5'] = 'Creative Commons Attribution 2.5'
compatible_licenses['Creative Commons CC0'] = 'Creative Commons CC0'
compatible_licenses['CC0'] = 'Creative Commons CC0'
compatible_licenses['The MIT License'] = 'MIT License'
compatible_licenses['MIT License'] = 'MIT License'
compatible_licenses['-'] = '-'
return compatible_licenses
def module_to_upper(module):
extensions_offset = module.lower().find("extensions")
if extensions_offset < 0:
return module.upper()
elif extensions_offset == 0:
return module[0:len("extensions")].upper() + module[len("extensions"):len(module)]
else:
raise Exception("Expected extensions at 0, but {}".format(extensions_offset))
def print_outfile(string):
print(string, file=outfile)
def print_error(string):
print(string, file=sys.stderr)
def get_version_string(version):
if type(version) == str:
return version
else:
return str(version)
def print_license_phrase(license_phrase):
remaining = license_phrase
while len(remaining) > 0:
# print("remaining: {}".format(remaining))
# print("len: {}".format(len(remaining)))
if len(remaining) > 120:
chars_of_200 = remaining[0:120]
phrase_len = chars_of_200.rfind(" ")
if phrase_len < 0:
raise Exception("Can't find whitespace in {}".format(chars_of_200))
print_outfile(" {}".format(remaining[0:phrase_len]))
remaining = remaining[phrase_len:]
else:
print_outfile(" {}".format(remaining))
remaining = ""
def is_non_empty(dic, key):
if key in dic and dic[key] is not None:
if type(dic[key]) == str:
return len(dic[key]) > 0
else:
return True
else:
return False
def print_license(license):
license_phrase = "This product"
if license['license_category'] == "source":
license_phrase += " contains"
elif license['license_category'] == "binary":
license_phrase += " bundles"
license_phrase += " {}".format(license['name'])
if is_non_empty(license, 'version'):
license_phrase += " version {}".format(license['version'])
if is_non_empty(license, 'copyright'):
license_phrase += ", copyright {}".format(license['copyright'])
if is_non_empty(license, 'additional_copyright_statement'):
license_phrase += ", {}".format(license['additional_copyright_statement'])
if license['license_name'] != 'Apache License version 2.0':
license_phrase += " which is available under {}".format(license['license_name'])
if is_non_empty(license, 'additional_license_statement'):
license_phrase += ", {}".format(license['additional_license_statement'])
if is_non_empty(license, 'license_file_path'):
license_file_list = []
if type(license['license_file_path']) == list:
license_file_list.extend(license['license_file_path'])
else:
license_file_list.append(license['license_file_path'])
if len(license_file_list) == 1:
license_phrase += ". For details, see {}".format(license_file_list[0])
else:
license_phrase += ". For details, "
for each_file in license_file_list:
if each_file == license_file_list[-1]:
license_phrase += ", and {}".format(each_file)
elif each_file == license_file_list[0]:
license_phrase += "see {}".format(each_file)
else:
license_phrase += ", {}".format(each_file)
license_phrase += "."
print_license_phrase(license_phrase)
if 'source_paths' in license:
for source_path in license['source_paths']:
if type(source_path) is dict:
for class_name, path in source_path.items():
print_outfile(" {}:".format(class_name))
print_outfile(" * {}".format(path))
else:
print_outfile(" * {}".format(source_path))
if 'libraries' in license:
for library in license['libraries']:
if type(library) is not dict:
raise Exception("Expected dict but got {}[{}]".format(type(library), library))
if len(library) > 1:
raise Exception("Expected 1 groupId and artifactId, but got [{}]".format(library))
for group_id, artifact_id in library.items():
print_outfile(" * {}:{}".format(group_id, artifact_id))
def find_druid_module_name(dirpath):
ext_start = dirpath.find("/ext/")
if ext_start > 0:
# Found an extension
subpath = dirpath[(len("/ext/") + ext_start):]
ext_name_end = subpath.find("/")
if ext_name_end < 0:
raise Exception("Can't determine extension name from [{}]".format(dirpath))
else:
return subpath[0:ext_name_end]
else:
# Druid core
return "core"
def check_licenses(license_yaml, dependency_reports_root):
# Build a dictionary to facilitate comparing reported licenses and registered ones.
# These dictionaries are the mapping of (group_id, artifact_id, version) to license_name.
# Build reported license dictionary.
reported_dep_to_licenses = {}
compatible_license_names = build_compatible_license_names()
for dirpath, dirnames, filenames in os.walk(dependency_reports_root):
for filename in filenames:
if filename == "dependencies.html":
full_path = os.path.join(dirpath, filename)
# Determine if it's druid core or an extension
druid_module_name = find_druid_module_name(dirpath)
print_error("Parsing {}".format(full_path))
with open(full_path) as report_file:
parser = DependencyReportParser(druid_module_name, compatible_license_names)
reported_dep_to_licenses.update(parser.parse(report_file))
if len(reported_dep_to_licenses) == 0:
raise Exception("No dependency reports are found")
print_error("Found {} reported licenses\n".format(len(reported_dep_to_licenses)))
# Build registered license dictionary.
registered_dep_to_licenses = {}
skipping_licenses = {}
with open(license_yaml) as registry_file:
licenses_list = list(yaml.load_all(registry_file))
for license in licenses_list:
if 'libraries' in license:
for library in license['libraries']:
if type(library) is not dict:
raise Exception("Expected dict but got {}[{}]".format(type(library), library))
if len(library) > 1:
raise Exception("Expected 1 groupId and artifactId, but got [{}]".format(library))
for group_id, artifact_id in library.items():
if 'version' not in license:
raise Exception("version is missing in {}".format(license))
if 'license_name' not in license:
raise Exception("name is missing in {}".format(license))
if 'skip_dependency_report_check' in license and license['skip_dependency_report_check']:
if 'version' not in license:
version = "-"
else:
version = get_version_string(license['version'])
skipping_licenses[get_dep_key(group_id, artifact_id, version)] = license
else:
registered_dep_to_licenses[get_dep_key(group_id, artifact_id, get_version_string(license['version']))] = compatible_license_names[license['license_name']]
if len(registered_dep_to_licenses) == 0:
raise Exception("No registered licenses are found")
# Compare licenses in registry and those in dependency reports.
mismatched_licenses = []
missing_licenses = []
unchecked_licenses = []
# Iterate through registered licenses and check if its license is same with the reported one.
for key, registered_license in registered_dep_to_licenses.items():
if key in reported_dep_to_licenses: # key is (group_id, artifact_id, version)
reported_license_druid_module = reported_dep_to_licenses[key]
reported_license = reported_license_druid_module[0]
druid_module = reported_license_druid_module[1]
if reported_license is not None and reported_license != "-" and reported_license != registered_license:
group_id = key[0]
artifact_id = key[1]
version = key[2]
mismatched_licenses.append((druid_module, group_id, artifact_id, version, reported_license, registered_license))
# If we find any mismatched license, stop immediately.
if len(mismatched_licenses) > 0:
print_error("Error: found {} mismatches between reported licenses and registered licenses".format(len(mismatched_licenses)))
for mismatched_license in mismatched_licenses:
print_error("druid_module: {}, groupId: {}, artifactId: {}, version: {}, reported_license: {}, registered_license: {}".format(mismatched_license[0], mismatched_license[1], mismatched_license[2], mismatched_license[3], mismatched_license[4], mismatched_license[5]))
print_error("")
# Let's find missing licenses, which are reported but missing in the registry.
for key, reported_license_druid_module in reported_dep_to_licenses.items():
if reported_license_druid_module[0] != "-" and key not in registered_dep_to_licenses and key not in skipping_licenses:
missing_licenses.append((reported_license_druid_module[1], key[0], key[1], key[2], reported_license_druid_module[0]))
if len(missing_licenses) > 0:
print_error("Error: found {} missing licenses. These licenses are reported, but missing in the registry".format(len(missing_licenses)))
for missing_license in missing_licenses:
print_error("druid_module: {}, groupId: {}, artifactId: {}, version: {}, license: {}".format(missing_license[0], missing_license[1], missing_license[2], missing_license[3], missing_license[4]))
print_error("")
# Let's find unchecked licenses, which are registered but missing in the report.
# These licenses should be checked manually.
for key, registered_license in registered_dep_to_licenses.items():
if key not in reported_dep_to_licenses:
unchecked_licenses.append((key[0], key[1], key[2], registered_license))
elif reported_dep_to_licenses[key][0] == "-":
unchecked_licenses.append((key[0], key[1], key[2], registered_license))
if len(unchecked_licenses) > 0:
print_error("Warn: found {} unchecked licenses. These licenses are registered, but not found in dependency reports.".format(len(unchecked_licenses)))
print_error("These licenses must be checked manually.")
for unchecked_license in unchecked_licenses:
print_error("groupId: {}, artifactId: {}, version: {}, reported_license: {}".format(unchecked_license[0], unchecked_license[1], unchecked_license[2], unchecked_license[3]))
print_error("")
if len(mismatched_licenses) > 0 or len(missing_licenses) > 0:
sys.exit(1)
def print_license_name_underbar(license_name):
underbar = ""
for _ in range(len(license_name)):
underbar += "="
print_outfile("{}\n".format(underbar))
def generate_license(apache_license_v2, license_yaml):
# Generate LICENSE.BINARY file
print_error("=== Generating the contents of LICENSE.BINARY file ===\n")
# Print Apache license first.
print_outfile(apache_license_v2)
with open(license_yaml) as registry_file:
licenses_list = list(yaml.load_all(registry_file))
# Group licenses by license_name, license_category, and then module.
licenses_map = {}
for license in licenses_list:
if license['license_name'] not in licenses_map:
licenses_map[license['license_name']] = {}
licenses_of_name = licenses_map[license['license_name']]
if license['license_category'] not in licenses_of_name:
licenses_of_name[license['license_category']] = {}
licenses_of_category = licenses_of_name[license['license_category']]
if license['module'] not in licenses_of_category:
licenses_of_category[license['module']] = []
licenses_of_module = licenses_of_category[license['module']]
licenses_of_module.append(license)
for license_name, licenses_of_name in sorted(licenses_map.items()):
print_outfile(license_name)
print_license_name_underbar(license_name)
for license_category, licenses_of_category in licenses_of_name.items():
for module, licenses in licenses_of_category.items():
print_outfile("{}/{}".format(license_category.upper(), module_to_upper(module)))
for license in licenses:
print_license(license)
print_outfile("")
print_outfile("")
# TODO: add options: debug mode
if len(sys.argv) != 5:
sys.stderr.write("usage: {} <path to apache license file> <path to license.yaml> <root to maven dependency reports> <path to output file>".format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[1]) as apache_license_file:
apache_license_v2 = apache_license_file.read()
license_yaml = sys.argv[2]
dependency_reports_root = sys.argv[3]
with open(sys.argv[4], "w") as outfile:
check_licenses(license_yaml, dependency_reports_root)
generate_license(apache_license_v2, license_yaml)
``` |
{
"source": "JiHooooo/FixMatch-pytorch",
"score": 2
} |
#### File: FixMatch-pytorch/datasets/custom_dataset.py
```python
import os
import glob
import random
import logging
from PIL import Image
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from torchvision import datasets, transforms
from torch.utils.data import Dataset
from .data_utils import get_onehot
from .augmentation.randaugment import RandAugment
import copy
from .ssl_dataset import SSL_Dataset
_extension = ['jpg','png','bmp']
_label_name = ['airplane','automobile','ship', 'truck']
_model_mean = [0.485,0.456,0.406]
_model_std = [0.229,0.224,0.225]
def get_transform(train=True, image_size=224, crop_ratio=0.1, normalize_flag=True):
transforms_list = []
transforms_list.append(transforms.Resize((image_size,image_size)))
if train:
transforms_list.extend([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(image_size, padding=int(image_size*crop_ratio))
])
if normalize_flag:
transforms_list.extend([
transforms.ToTensor(),
transforms.Normalize(_model_mean, _model_std),
])
else:
transforms_list.extend([
transforms.ToTensor(),
])
return transforms.Compose(transforms_list)
class SelfDataset(Dataset):
"""
SelfDataset returns a pair of image and labels (targets).
This class supports strong augmentation for Fixmatch,5
and return both weakly and strongly augmented images.
"""
def __init__(self,
folder_path,
ssl_dataset_flag = False,
transforms=None,
use_strong_transform=False,
strong_transforms=None,
onehot=False,
*args, **kwargs):
"""
Args
folder_path: the folder where the training images are saved
##### folder structure
folder -
label1:
image1
image2
...
label2:
...
#####
ssl_dataset_flag : whether the images which are in the folder have reliable label
transforms: basic transformation of data
use_strong_transform: If True, this dataset returns both weakly and strongly augmented images.
strong_transform: list of transformation functions for strong augmentation
onehot: If True, label is converted into onehot vector.
"""
super(SelfDataset, self).__init__()
self.transforms = transforms
self.ssl_dataset_flag = ssl_dataset_flag
self.num_classes = len(_label_name)
self.label_names = _label_name
self.use_strong_transform = use_strong_transform
self.onehot = onehot
#read all image path
if isinstance(folder_path, str):
image_path_list, image_label_list = \
self.load_image_label_from_folder(folder_path)
elif isinstance(folder_path, list):
image_path_list = []
image_label_list = []
for folder_path_one in folder_path:
one_folder_image_path_list, one_folder_image_label_list = self.load_image_label_from_folder(folder_path_one)
image_path_list.extend(one_folder_image_path_list)
image_path_list.extend(one_folder_image_label_list)
else:
raise TypeError('The type of folder path should be str or list')
if len(image_path_list) == 0:
raise ValueError("Don't find suitable image file")
if 'shuffle_seed' in kwargs.keys() and 'lb_image_num' in kwargs.keys() and not self.ssl_dataset_flag:
self.image_path_list, self.image_label_list = \
self.split_lb_ulb(image_path_list, image_label_list,
seed=kwargs['shuffle_seed'], lb_num=kwargs['lb_image_num'],
lb_flag=not use_strong_transform)
elif self.ssl_dataset_flag:
self.image_label_list = image_label_list
self.image_path_list = image_path_list
else:
self.image_path_list, self.image_label_list = \
self.split_lb_ulb(image_path_list, image_label_list)
if use_strong_transform:
if strong_transforms is None:
self.strong_transforms = copy.deepcopy(transforms)
self.strong_transforms.transforms.insert(0, RandAugment(3,5))
else:
self.strong_transforms = strong_transforms
@staticmethod
def split_lb_ulb(image_path_list, image_label_list, seed=None, lb_num=None, lb_flag=True):
# image_path_list [[image_path_of_typeA],[...], ...]
# image_label_list [[image_label_of_typeA],[...], ...]
total_image_pathes = []
total_image_labels = []
for image_pathes, image_labels in zip(image_path_list, image_label_list):
if lb_num is None or seed is None or lb_num <0 or lb_num > len(image_pathes):
total_image_pathes.extend(image_pathes)
total_image_labels.extend(image_labels)
else:
random.seed(seed)
random.shuffle(image_pathes)
random.seed(seed)
random.shuffle(image_labels)
if lb_flag:
total_image_pathes.extend(image_pathes[:lb_num])
total_image_labels.extend(image_labels[:lb_num])
else:
total_image_pathes.extend(image_pathes[lb_num:])
total_image_labels.extend(image_labels[lb_num:])
return total_image_pathes, total_image_labels
def load_image_label_from_folder(self, folder_path):
if not self.ssl_dataset_flag:
sub_folder_list = os.listdir(folder_path)
image_path_list = []
image_label_list = []
for label_folder in sub_folder_list:
if label_folder in self.label_names:
image_pathes_one_folder = self.load_image_of_one_folder('%s/%s'%(folder_path, label_folder))
image_path_list.append(image_pathes_one_folder)
image_label_list.append([self.label_names.index(label_folder) for _ in image_pathes_one_folder])
else:
image_path_list = self.load_image_of_one_folder(folder_path)
image_label_list = [-1 for _ in image_path_list]
return image_path_list, image_label_list
@staticmethod
def load_image_of_one_folder(folder_path):
image_pathes = []
for root, dirs, files in os.walk(folder_path):
for file_name in files:
if os.path.splitext(file_name)[1][1:] in _extension:
image_pathes.append('%s/%s'%(root, file_name))
return image_pathes
def __getitem__(self, idx):
"""
If strong augmentation is not used,
return weak_augment_image, target
else:
return weak_augment_image, strong_augment_image, target
"""
#set idx-th target
if self.image_label_list is None:
target = None
else:
target_ = self.image_label_list[idx]
target = target_ if not self.onehot else get_onehot(self.num_classes, target_)
#set augmented images
#load image
image_path = self.image_path_list[idx]
image = Image.open(image_path).convert('RGB')
if self.transforms is None:
return transforms.ToTensor()(image), target
else:
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
img_w = self.transforms(image)
if not self.use_strong_transform:
return img_w, target
else:
return img_w, self.strong_transforms(image), target
def __len__(self):
return len(self.image_path_list)
class SelfDataset_fold(SelfDataset):
def __init__(self,
csv_path,
ssl_dataset_flag = False,
transforms=None,
use_strong_transform=False,
strong_transforms=None,
onehot=False,
train_flag=True,
fold_num = 0,
*args, **kwargs):
"""
Args
csv_path: the csv file where the training images are saved
##### folder structure
folder -
label1:
image1
image2
...
label2:
...
#####
ssl_dataset_flag : whether the images which are in the folder have reliable label
transforms: basic transformation of data
use_strong_transform: If True, this dataset returns both weakly and strongly augmented images.
strong_transform: list of transformation functions for strong augmentation
onehot: If True, label is converted into onehot vector.
"""
self.transforms = transforms
self.ssl_dataset_flag = ssl_dataset_flag
self.num_classes = len(_label_name)
self.use_strong_transform = use_strong_transform
self.onehot = onehot
#read all image path
df_info = pd.read_csv(csv_path)
print('the label name : ' + str(_label_name))
self.label_names = [str(i) for i in _label_name]
if train_flag:
selected_df_info = df_info[df_info['fold'] != fold_num]
else:
selected_df_info = df_info[df_info['fold'] == fold_num]
#delete the image whose label is not included in label name
selected_df_info = selected_df_info[selected_df_info['label'].isin(self.label_names)]
image_path_list = []
image_label_list = []
for label_one in self.label_names:
selected_df_info_one_label = selected_df_info[selected_df_info['label'] == label_one]
image_path_list.append(list(selected_df_info_one_label['image_path']))
image_label_list_ori = list(selected_df_info_one_label['label'])
image_label_list.append([self.label_names.index(i) for i in image_label_list_ori])
if len(image_path_list) == 0:
raise ValueError("Don't find suitable image file")
if 'shuffle_seed' in kwargs.keys() and 'lb_image_num' in kwargs.keys() and not self.ssl_dataset_flag:
self.image_path_list, self.image_label_list = \
self.split_lb_ulb(image_path_list, image_label_list,
seed=kwargs['shuffle_seed'], lb_num=kwargs['lb_image_num'],
lb_flag=not use_strong_transform)
elif self.ssl_dataset_flag:
self.image_label_list = image_label_list
self.image_path_list = image_path_list
else:
self.image_path_list, self.image_label_list = \
self.split_lb_ulb(image_path_list, image_label_list)
if use_strong_transform:
if strong_transforms is None:
self.strong_transforms = copy.deepcopy(transforms)
self.strong_transforms.transforms.insert(0, RandAugment(3,5))
else:
self.strong_transforms = strong_transforms
class SelfDataset_multi(Dataset):
def __init__(self, csv_path,transforms = None,
seed=0, lb_num=0, lb_flag=True):
image_path_list, image_label_list = self.Image_Info_from_df(csv_path)
self.image_path_list, self.image_label_list = self.split_lb_ulb(
image_path_list, image_label_list,
seed = seed, lb_num=lb_num, lb_flag=lb_flag
)
self.transforms = transforms
self.label_num = len(_label_name)
self.label_names = _label_name
if not lb_flag:
self.strong_transforms = copy.deepcopy(transforms)
self.strong_transforms.transforms.insert(0, RandAugment(3,5))
self.use_strong_transform = True
else:
self.use_strong_transform = False
def __len__(self):
return len(self.image_path_list)
@staticmethod
def Image_Info_from_df(df_path):
try:
df = pd.read_csv(df_path,encoding="cp932")
except:
df = pd.read_csv(df_path,encoding="utf-8")
logging.info('load csv with utf-8 encoding method')
else:
logging.info('load csv with cp932 encoding method')
image_path_list = []
image_label_list = []
for index in range(len(df)):
#input image name
image_info_one = [df.iloc[index]['image_path'],]
for label in self.label_names:
image_info_one.append(int(df.iloc[index][label]))
image_path_list.append(image_info_one[0])
image_label_list.append(image_info_one[1:])
return image_path_list, image_label_list
@staticmethod
def split_lb_ulb(image_path_list, image_label_list, seed=0, lb_num=0, lb_flag=True):
if lb_num <= 0 or lb_num >= len(image_path_list):
output_image_path_list = image_path_list
output_image_label_list = image_label_list
else:
random.seed(seed)
random.shuffle(image_path_list)
random.seed(seed)
random.shuffle(image_label_list)
if lb_flag:
output_image_path_list = image_path_list[:lb_num]
output_image_label_list = image_label_list[:lb_num]
else:
output_image_path_list = image_path_list[lb_num:]
output_image_label_list = image_label_list[lb_num:]
return output_image_path_list, output_image_label_list
def __getitem__(self, idx):
image_path = self.image_path_list[idx]
image = Image.open(image_path)
image = np.array(image, dtype=np.uint8)
if self.image_label_list is None:
target = np.zeros(self.label_num, dtype=np.float32)
else:
target = self.image_label_list[idx]
labels = np.zeros(self.label_num, dtype=np.float32)
for index_label in range(self.label_num):
if target[index_label] > 0:
labels[index_label] = 1
labels = torch.from_numpy(labels)
if self.transforms is None:
return transforms.ToTensor()(image), labels
else:
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
img_w = self.transforms(image)
if not self.use_strong_transform:
return img_w, labels
else:
return img_w, self.strong_transforms(image), labels
```
#### File: FixMatch-pytorch/datasets/DistributedProxySampler.py
```python
import math
import torch
from torch.utils.data.distributed import DistributedSampler
class DistributedProxySampler(DistributedSampler):
"""Sampler that restricts data loading to a subset of input sampler indices.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Input sampler is assumed to be of constant size.
Arguments:
sampler: Input data sampler.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, sampler, num_replicas=None, rank=None):
super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)
self.sampler = sampler
def __iter__(self):
# deterministically shuffle based on epoch
torch.manual_seed(self.epoch)
indices = list(self.sampler)
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
if len(indices) != self.total_size:
raise RuntimeError("{} vs {}".format(len(indices), self.total_size))
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
if len(indices) != self.num_samples:
raise RuntimeError("{} vs {}".format(len(indices), self.num_samples))
return iter(indices)
``` |
{
"source": "jihoro/my-ResDAVEnet-VQ",
"score": 2
} |
#### File: my-ResDAVEnet-VQ/dataloaders/image_caption_dataset.py
```python
import json
import librosa
import numpy as np
import os
import os.path
import scipy.signal
import torch
import torch.nn.functional
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import Dataset
from dataloaders.utils import compute_spectrogram
class ImageCaptionDataset(Dataset):
def __init__(self, dataset_json_file, audio_conf=None, image_conf=None):
"""
Dataset that manages a set of paired images and audio recordings
:param dataset_json_file
:param audio_conf: Dictionary containing the sample rate, window and
the window length/stride in seconds, and normalization to perform (optional)
:param image_transform: torchvision transform to apply to the images (optional)
"""
with open(dataset_json_file, 'r') as fp:
data_json = json.load(fp)
self.data = data_json['data']
# self.image_base_path = data_json.get('image_base_path', '')
# self.audio_base_path = data_json.get('audio_base_path', '')
self.image_base_path = "/content/flickr8k_spoken_captions/imgs"
self.audio_base_path = "/content/flickr8k_spoken_captions/wavs"
self.audio_conf = audio_conf if audio_conf else {}
self.image_conf = image_conf if image_conf else {}
# image transforms
crop_size = self.image_conf.get('crop_size', 224)
center_crop = self.image_conf.get('center_crop', False)
if center_crop:
self.image_resize_and_crop = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()])
else:
self.image_resize_and_crop = transforms.Compose(
[transforms.RandomResizedCrop(crop_size), transforms.ToTensor()])
RGB_mean = self.image_conf.get('RGB_mean', [0.485, 0.456, 0.406])
RGB_std = self.image_conf.get('RGB_std', [0.229, 0.224, 0.225])
self.image_normalize = transforms.Normalize(mean=RGB_mean, std=RGB_std)
def _LoadAudio(self, path):
y, sr = librosa.load(path, None)
logspec, n_frames = compute_spectrogram(y, sr, self.audio_conf)
return logspec, n_frames
def _LoadImage(self, impath):
img = Image.open(impath).convert('RGB')
img = self.image_resize_and_crop(img)
img = self.image_normalize(img)
return img
def __getitem__(self, index):
"""
returns: image, audio, nframes
where image is a FloatTensor of size (3, H, W)
audio is a FloatTensor of size (N_freq, N_frames) for spectrogram, or (N_frames) for waveform
nframes is an integer
"""
datum = self.data[index]
audio, nframes = self._LoadAudio(os.path.join(self.audio_base_path, datum['wav']))
image = self._LoadImage(os.path.join(self.image_base_path, datum['image']))
return image, audio, nframes
def __len__(self):
return len(self.data)
``` |
{
"source": "jihuacao/Putil",
"score": 3
} |
#### File: Putil/base/arg_base.py
```python
import Putil.base.dict_base as pdb
from abc import ABCMeta, abstractmethod
import argparse
class ProjectArg(metaclass=ABCMeta):
def __init__(self, parser=None, *args, **kwargs):
self._parser = argparse.ArgumentParser() if parser is None else parser
self._save_dir = kwargs.get('save_dir', None)
self._level = kwargs.get('log_level', None)
self._debug = kwargs.get('debug_mode', None)
self._config = kwargs.get('config', None)
self._parser.add_argument('--save_dir', action='store', dest='save_dir', default=self._save_dir, help='this param specified the dir to save the result, the default is {0}'.format(self._save_dir)) if self._save_dir is not None else None
self._parser.add_argument('--log_level', action='store', dest='log_level', default=self._level, help='this param specified the log level, the default is {0}'.format(self._level)) if self._level is not None else None
self._parser.add_argument('--debug_mode', action='store_true', dest='debug_mode', default=self._debug, help='this param set the program mode if the program contain a debug method, the default is {0}'.format(self._debug)) if self._debug is True else None
self._parser.add_argument('--config', action='store', dest='config', default=self._config, help='this param set the config file path for the program if needed, the default is {0}'.format(self._config)) if self._config is not None else None
pass
@property
def parser(self):
return self._parser
pass
def args_pack(args):
'''
this function pack the args into a string with format: key1-value1_key2-value2
'''
collection = args.__dict__
return pdb.dict_back(collection)
def args_log(args, logger):
collection = args.__dict__
pdb.dict_log(collection, logger)
pass
```
#### File: Putil/base/arg_operation.py
```python
import os
import json
import argparse
##@brief save the arg(type: Namespace) to a file
# @note save \'unserized_obj\' while unserized object exist in the arg
# @param[in] args the namespace
# @param[in] file the full path of the target file
def args_save(args, file):
with open(file, 'w') as fp:
json.dump(args.__dict__, fp, indent=4, default=lambda unserized_obj: 'unserized_obj', check_circular=True, sort_keys=True)
pass
def args_extract(file):
args = argparse.Namespace()
with open(file, 'r') as fp:
args.__dict__ = json.load(fp)
return args
def args_merge(*args, **kwargs):
'''
@brief merge the args
@note ๅฐ็ดขๅผ็argsๅผไผๅ
@param[in] kwargs
'invert_order': bool
if invert_order is True, the args would be reverted
if invert_order is not True, the order of args would be kept
'''
if len(args) == 0:
return None
else:
invert_order = kwargs.get('invert_order', False)
arg_dicts = [arg.__dict__ for arg in (args if invert_order is False else args[::-1])][::-1]
[arg_dicts[0].update(ad) for index, ad in enumerate(arg_dicts)]
return argparse.Namespace(**arg_dicts[0])
```
#### File: Putil/calc/iou.py
```python
import numpy as np
from colorama import init, Fore, Back, Style
"""
Summary:
IoU Calculate:
the IoU between Box_1 and Box_2:
F(x): if x < 0 , =0; else, =x;
IoU: F(max_top - min_bottom) * F(min_right - max_left)
max_top = max(Box_1_top_y, Box_2_top_y)
min_bottom = min(Box_1_bottom_y, Box_2_bottom_y)
min_right = min(Box_1_right_x, Box_2_right_x)
max_left = max(Box_1_left_x, Box_2_left_x)
base on different parameter , generate different way to calculate the max_top, min_bottom, min_right and
max_left
max_top
===
|
----|--
Box_1<---| | |--->min_right
| ----|-----------
IoU<---|--|////| |
max_left<---|--|////| |
| |////| |
--|---- |
| | |
| | |-----> Box_2
===| |
min_bottom----------------
"""
def __iou_chw(rect1, rect2):
"""
calculate the IoU between rect1 and rect2, use the [center_y, center_x, height, width]
:param rect1:
:param rect2:
:return:
"""
y1, x1, h1, w1 = rect1
y2, x2, h2, w2 = rect2
if (abs(x1 - x2) < ((w1 + w2) / 2.0)) and (abs(y1 - y2) < ((h1 + h2) / 2.0)):
left = max((x1 - (w1 / 2.0)), (x2 - (w2 / 2.0)))
upper = max((y1 - (h1 / 2.0)), (y2 - (h2 / 2.0)))
right = min((x1 + (w1 / 2.0)), (x2 + (w2 / 2.0)))
bottom = min((y1 + (h1 / 2.0)), (y2 + (h2 / 2.0)))
inter_w = abs(left - right)
inter_h = abs(upper - bottom)
inter_square = inter_w * inter_h
union_square = (w1 * h1) + (w2 * h2) - inter_square
iou = inter_square / union_square * 1.0
inter_rect = [(upper + bottom) * 0.5, (left + right) * 0.5, bottom - upper, right - left]
else:
iou = 0
inter_rect = [None, None, None, None]
pass
return iou, inter_rect
pass
def __to_chw(*rects, **options):
TP = options.pop('TP', False)
LHW = options.pop('LHW', False)
CHW = options.pop('CHW', False)
assert np.count_nonzero([TP, LHW, CHW]) == 1, \
'TP, LHW, CHW should have only one True, but {0}'.format(np.count_nonzero([TP, LHW, CHW]))
assert len(rects) >= 1, 'no input rect'
get = []
if TP:
[get.append([(i[0] + i[2]) * 0.5, (i[1] + i[3]) * 0.5, i[2] - i[0], i[3] - i[1]]) for i in rects]
return get
if LHW:
[get.append([i[0] + 0.5 * i[2], i[1] + 0.5 * i[3], i[2], i[3]]) for i in rects]
return get
if CHW:
return rects
pass
def calc_iou(*rects, **options):
"""
ๅคไธชrects่ฎก็ฎiouๅญๅจ้่ฏฏ
่ฎก็ฎไธ็ปrects็iou
:param rects: ไธ็ปrects
:param options:
:keyword TP : rectsไฝฟ็จไธค็น๏ผๅทฆไธ๏ผ ๅณไธ๏ผ่กจ็คบ [left_y, left_x, right_y, right_x]
:keyword LHW : rectsไฝฟ็จๅทฆไธไธ้ซๅฎฝ่กจ็คบ [left_y, left_x, height, width]
:keyword CHW : rectsไฝฟ็จไธญๅฟ็นไธ้ซๅฎฝ่กจ็คบ [center_y, center_x, height, width]
:return:
"""
# fixme:ๅคไธชrects่ฎก็ฎiouๅญๅจerror
TP = options.pop('TP', False)
LHW = options.pop('LHW', False)
CHW = options.pop('CHW', False)
rects = __to_chw(*rects, TP=TP, LHW=LHW, CHW=CHW)
inter_rect = rects[0]
iou = None
for i in range(1, len(rects)):
iou, inter_rect_new = __iou_chw(inter_rect, rect2=rects[i])
if None in inter_rect_new:
return iou
else:
inter_rect = inter_rect_new
return iou
pass
"""
Implement calc_iou_matrix_thw:
base on center_y_x and height width, there is algorithm:
max_top: max(-0.5 * group1_h, group_2_y - 0.5 * group2_h)
min_bottom: min(0.5 * group1_h, group_2_y + 0.5 * group2_h)
min_right: min(0.5 * group1_w, group2_x + 0.5 * group2_w)
max_left: min(-0.5 * group1_w, group2_x - 0.5 * group2_w)
use[[center_y, center_x, height, width], ....] as an example:
in order to create the IoU matrix
we should create group1_Box_M IoU group2_Box_N
we make group1 data repeat n cross row
just like:
-0.5 * group1_h:
[[group1_box_1_top_y, ..n.., group1_box_1_top_y],
[group1_box_2_top_y, ..n.., group1_box_2_top_y],
:
m
:,
[group1_box_m_top_y, ..n.., group1_box_m_top_y],
]
we make group2 data repeat m cross col
and group2 just make more one process transpose
and then use the algorithm
get then max_top, min_bottom, min_right, max_left Matrix
and then make element which lower than zeros zeroed
finally generate a m x n IoU matrix
"""
def calc_iou_matrix_ohw(
group1,
group2,
group1_h_index=2,
group1_w_index=3,
group2_y_index=0,
group2_x_index=1,
group2_h_index=2,
group2_w_index=3
):
"""
this function is for standard group1 IoU random group2
which means that the box in the group1 have the same center_y_x, and group2 carry the data
[offset_y, offset_x, height, width]๏ผ offset means the offset pixel to the standard box center
calculate the IoU matrix base on group1 and group2 which carry the parameter top_y, top_x, height and width
:param group1: [[height, width], ....] according to default group1_*_index
:param group2: [[offset_y, offset_x, height, width], ...] according to default group2_*_index
:param group1_h_index: parameter represent the index of h in group1
:param group1_w_index: parameter represent the index of 2 in group1
:param group2_y_index: parameter represent the index of y in group2
:param group2_x_index: parameter represent the index of x in group2
:param group2_h_index: parameter represent the index of h in group2
:param group2_w_index: parameter represent the index of w in group2
:return:
group1_box_0 iou group2_box_0, group1_box_0 iou group2_box_1, ..., group1_box_0 iou group2_box_(n - 1), group1_box_0 iou group2_box_n
, , , ,
group1_box_1 iou group2_box_0, ... , ..., ... , group1_box_1 iou group2_box_n
,
... ...
,
... ...
,
... ...
,
group1_box_m iou group2_box_0, ... , ..., ... , group1_box_m iou group2_box_n
"""
g_1_matrix = np.array(group1)
g_2_matrix = np.array(group2)
group_1_amount = len(g_1_matrix)
group_2_amount = len(g_2_matrix)
g_1_area_cross_row = (g_1_matrix[:, group1_h_index] * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_2_area_cross_col = (g_2_matrix[:, group2_h_index] * g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
g_1_top_y_matrix_cross_row = (-0.5 * g_1_matrix[:, group1_h_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_bottom_y_matrix_cross_row = (0.5 * g_1_matrix[:, group1_h_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_right_x_matrix_cross_row = (0.5 * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_left_x_matrix_cross_row = (-0.5 * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_2_top_y_matrix_cross_col = (g_2_matrix[:, group2_y_index] - 0.5 * g_2_matrix[:, group2_h_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_bottom_y_matrix_cross_col = (g_2_matrix[:, group2_y_index] + 0.5 * g_2_matrix[:, group2_h_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
g_2_right_x_matrix_cross_col = (g_2_matrix[:, group2_x_index] + 0.5 * g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_left_x_matrix_cross_col = (g_2_matrix[:, group2_x_index] - 0.5 * g_2_matrix[:, group2_x_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
# calculate the overlap box
max_top = np.max(np.concatenate((np.expand_dims(g_1_top_y_matrix_cross_row, -1),
np.expand_dims(g_2_top_y_matrix_cross_col, -1)), -1), -1)
min_bottom = np.min(np.concatenate((np.expand_dims(g_1_bottom_y_matrix_cross_row, -1),
np.expand_dims(g_2_bottom_y_matrix_cross_col, -1)), -1), -1)
min_right = np.min(np.concatenate((np.expand_dims(g_1_right_x_matrix_cross_row, -1),
np.expand_dims(g_2_right_x_matrix_cross_col, -1)), -1), -1)
max_left = np.max(np.concatenate((np.expand_dims(g_1_left_x_matrix_cross_row, -1),
np.expand_dims(g_2_left_x_matrix_cross_col, -1)), -1), -1)
# calculate cross area
crossed_height = min_bottom - max_top
crossed_width = min_right - max_left
# apply ReLU
crossed_height[crossed_height < 0] = 0
crossed_width[crossed_width < 0] = 0
iou_area = crossed_height * crossed_width
iou = iou_area / (g_1_area_cross_row + g_2_area_cross_col - iou_area)
return iou
pass
"""
Implement calc_iou_matrix_thw:
base on center_y_x and height width, there is algorithm:
max_top: max(group1_y, group_2_y)
min_bottom: min(group1_y + group1_h, group_2_y + group2_h)
min_right: min(group_1_x + group1_w, group2_x + group2_w)
max_left: min(group_1_x, group2_x)
use[[center_y, center_x, height, width], ....] as an example:
in order to create the IoU matrix
we should create group1_Box_M IoU group2_Box_N
we make group1 data repeat n cross row
just like:
group1_y:
[[group1_box_1_top_y, ..n.., group1_box_1_top_y],
[group1_box_2_top_y, ..n.., group1_box_2_top_y],
:
m
:,
[group1_box_m_top_y, ..n.., group1_box_m_top_y],
]
we make group2 data repeat m cross col
and group2 just make more one process transpose
and then use the algorithm
get then max_top, min_bottom, min_right, max_left Matrix
and then make element which lower than zeros zeroed
finally generate a m x n IoU matrix
"""
def calc_iou_matrix_thw(
group1,
group2,
group1_y_index=0,
group1_x_index=1,
group1_h_index=2,
group1_w_index=3,
group2_y_index=0,
group2_x_index=1,
group2_h_index=2,
group2_w_index=3
):
"""
calculate the IoU matrix base on group1 and group2 which carry the parameter top_y, top_x, height and width
:param group1: [[top_y, top_x, height, width], ....] according to default group1_*_index
:param group2: [[top_y, top_x, height, width], ...] according to default group2_*_index
:param group1_y_index: parameter represent the index of y in group1
:param group1_x_index: parameter represent the index of x in group1
:param group1_h_index: parameter represent the index of h in group1
:param group1_w_index: parameter represent the index of 2 in group1
:param group2_y_index: parameter represent the index of y in group2
:param group2_x_index: parameter represent the index of x in group2
:param group2_h_index: parameter represent the index of h in group2
:param group2_w_index: parameter represent the index of w in group2
:return:
group1_box_0 iou group2_box_0, group1_box_0 iou group2_box_1, ..., group1_box_0 iou group2_box_(n - 1), group1_box_0 iou group2_box_n
, , , ,
group1_box_1 iou group2_box_0, ... , ..., ... , group1_box_1 iou group2_box_n
,
... ...
,
... ...
,
... ...
,
group1_box_m iou group2_box_0, ... , ..., ... , group1_box_m iou group2_box_n
"""
g_1_matrix = np.array(group1)
g_2_matrix = np.array(group2)
group_1_amount = len(g_1_matrix)
group_2_amount = len(g_2_matrix)
g_1_area_cross_row = (g_1_matrix[:, group1_h_index] * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_2_area_cross_col = (g_2_matrix[:, group2_h_index] * g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
g_1_bottom_y_matrix_cross_row = (g_1_matrix[:, group1_y_index] + g_1_matrix[:, group1_h_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_top_y_matrix_cross_row = (g_1_matrix[:, group1_y_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_right_x_matrix_cross_row = (g_1_matrix[:, group1_x_index] + g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_left_x_matrix_cross_row = (g_1_matrix[:, group1_x_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_2_bottom_y_matrix_cross_col = (g_2_matrix[:, group2_y_index] + g_2_matrix[:, group2_h_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_top_y_matrix_cross_col = (g_2_matrix[:, group2_y_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
g_2_right_x_matrix_cross_col = (g_2_matrix[:, group2_x_index] + g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_left_x_matrix_cross_col = (g_2_matrix[:, group2_x_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
# calculate the overlap box
min_bottom = np.min(
np.concatenate(
(np.expand_dims(g_1_bottom_y_matrix_cross_row, -1),
np.expand_dims(g_2_bottom_y_matrix_cross_col, -1)),
-1
),
-1
)
max_top = np.max(
np.concatenate(
(np.expand_dims(g_1_top_y_matrix_cross_row, -1),
np.expand_dims(g_2_top_y_matrix_cross_col, -1)),
-1
),
-1
)
min_right = np.min(
np.concatenate(
(np.expand_dims(g_1_right_x_matrix_cross_row, -1),
np.expand_dims(g_2_right_x_matrix_cross_col, -1)),
-1
),
-1
)
max_left = np.max(
np.concatenate(
(np.expand_dims(g_1_left_x_matrix_cross_row, -1),
np.expand_dims(g_2_left_x_matrix_cross_col, -1)),
-1
),
-1
)
# calculate cross area
crossed_height = min_bottom - max_top
crossed_width = min_right - max_left
# apply ReLU
crossed_height[crossed_height < 0] = 0
crossed_width[crossed_width < 0] = 0
iou_area = crossed_height * crossed_width
iou = iou_area / (g_1_area_cross_row + g_2_area_cross_col - iou_area)
return iou
pass
"""
Implement calc_iou_matrix_chw:
base on center_y_x and height width, there is algorithm:
max_top: max(group1_y - 0.5 * group1_h, group_2_y - 0.5 * group2_h)
min_bottom: min(group1_y + 0.5 * group1_h, group_2_y + 0.5 * group2_h)
min_right: min(group_1_x + 0.5 * group1_w, group2_x + 0.5 * group2_w)
max_left: min(group_1_x - 0.5 * group1_w, group2_x - 0.5 * group2_w)
use[[center_y, center_x, height, width], ....] as an example:
in order to create the IoU matrix
we should create group1_Box_M IoU group2_Box_N
we make group1 data repeat n cross row
just like:
group1_y - 0.5 * group1_h:
[[group1_box_1_top_y, ..n.., group1_box_1_top_y],
[group1_box_2_top_y, ..n.., group1_box_2_top_y],
:
m
:,
[group1_box_m_top_y, ..n.., group1_box_m_top_y],
]
we make group2 data repeat m cross col
and group2 just make more one process transpose
and then use the algorithm
get then max_top, min_bottom, min_right, max_left Matrix
and then make element which lower than zeros zeroed
finally generate a m x n IoU matrix
"""
def calc_iou_matrix_chw(
group1,
group2,
group1_y_index=0,
group1_x_index=1,
group1_h_index=2,
group1_w_index=3,
group2_y_index=0,
group2_x_index=1,
group2_h_index=2,
group2_w_index=3
):
"""
calculate the IoU matrix base on group1 and group2 which carry the parameter center_y, center_x, height and width
:param group1: [[center_y, center_x, height, width], ....] according to default group1_*_index
:param group2: [[center_y, center_x, height, width], ...] according to default group2_*_index
:param group1_y_index: parameter represent the index of y in group1
:param group1_x_index: parameter represent the index of x in group1
:param group1_h_index: parameter represent the index of h in group1
:param group1_w_index: parameter represent the index of 2 in group1
:param group2_y_index: parameter represent the index of y in group2
:param group2_x_index: parameter represent the index of x in group2
:param group2_h_index: parameter represent the index of h in group2
:param group2_w_index: parameter represent the index of w in group2
:return:
group1_box_0 iou group2_box_0, group1_box_0 iou group2_box_1, ..., group1_box_0 iou group2_box_(n - 1), group1_box_0 iou group2_box_n
, , , ,
group1_box_1 iou group2_box_0, ... , ..., ... , group1_box_1 iou group2_box_n
,
... ...
,
... ...
,
... ...
,
group1_box_m iou group2_box_0, ... , ..., ... , group1_box_m iou group2_box_n
"""
g_1_matrix = np.array(group1)
g_2_matrix = np.array(group2)
group_1_amount = len(g_1_matrix)
group_2_amount = len(g_2_matrix)
g_1_area_cross_row = (g_1_matrix[:, group1_h_index] * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_2_area_cross_col = (g_2_matrix[:, group2_h_index] * g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
g_1_bottom_y_matrix_cross_row = (g_1_matrix[:, group1_y_index] + 0.5 * g_1_matrix[:, group1_h_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_top_y_matrix_cross_row = (g_1_matrix[:, group1_y_index] - 0.5 * g_1_matrix[:, group1_h_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_right_x_matrix_cross_row = (g_1_matrix[:, group1_x_index] + 0.5 * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_left_x_matrix_cross_row = (g_1_matrix[:, group1_x_index] - 0.5 * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_2_bottom_y_matrix_cross_col = (g_2_matrix[:, group2_y_index] + 0.5 * g_2_matrix[:, group2_h_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_top_y_matrix_cross_col = (g_2_matrix[:, group2_y_index] - 0.5 * g_2_matrix[:, group2_h_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_right_x_matrix_cross_col = (g_2_matrix[:, group2_x_index] + 0.5 * g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_left_x_matrix_cross_col = (g_2_matrix[:, group2_x_index] - 0.5 * g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
# calculate the overlap box
min_bottom = np.min(np.concatenate((np.expand_dims(g_1_bottom_y_matrix_cross_row, -1), np.expand_dims(g_2_bottom_y_matrix_cross_col, -1)), -1), -1)
max_top = np.max(np.concatenate((np.expand_dims(g_1_top_y_matrix_cross_row, -1), np.expand_dims(g_2_top_y_matrix_cross_col, -1)), -1), -1)
min_right = np.min(np.concatenate((np.expand_dims(g_1_right_x_matrix_cross_row, -1), np.expand_dims(g_2_right_x_matrix_cross_col, -1)), -1), -1)
max_left = np.max(np.concatenate((np.expand_dims(g_1_left_x_matrix_cross_row, -1), np.expand_dims(g_2_left_x_matrix_cross_col, -1)), -1), -1)
# calculate cross area
crossed_height = min_bottom - max_top
crossed_width = min_right - max_left
# apply ReLU
crossed_height[crossed_height < 0] = 0
crossed_width[crossed_width < 0] = 0
iou_area = crossed_height * crossed_width
iou = iou_area / (g_1_area_cross_row + g_2_area_cross_col - iou_area)
return iou
pass
def __test_calc_iou():
rect1 = [0, 0, 10, 10]
rect2 = [5, 5, 10, 10]
rect3 = [75, 75, 100, 100]
iou = calc_iou(rect1, rect2, LHW=True)
try:
assert iou == 25.0 / 175
return True
except Exception:
print(Fore.RED + '>>:should be {0}, but {1}'.format(25.0 / 175, iou))
return False
pass
if __name__ == '__main__':
# todo:test iou calc
try:
assert __test_calc_iou()
print(Fore.GREEN + '-------------test_calc_iou: pass-------------')
except Exception:
print(Fore.RED + '-------------test_calc_iou: failed-------------')
pass
pass
```
#### File: Putil/data/cifar.py
```python
from sys import version_info
import numpy as np
from enum import Enum
import os
if version_info.major == 3:
import pickle as pickle
elif version_info.major == 2:
import cPickle as pickle
import Putil.data.common_data as pd
class Cifar(pd.CommonDataWithAug):
class Level(Enum):
SuperClass=0
FineClass=1
def __init__(
self,
stage,
root_dir,
use_rate,
sub_data,
remain_strategy,
level,
):
pd.CommonDataWithAug.__init__(self, use_rate=use_rate, sub_data=sub_data, remain_strategy=remain_strategy)
self._root_dir = root_dir
self._level = level
self._stage = stage
self._python_root_path = os.path.join(self._root_dir, 'cifar-100-python')
self._train_file_path = os.path.join(self._python_root_path, 'train')
self._val_file_path = os.path.join(self._python_root_path, 'val')
self._test_file_path = os.path.join(self._python_root_path, 'test')
self._meta_file_path = os.path.join(self._python_root_path, 'meta')
self._data_path = self._test_file_path
with open(self._data_path, 'rb') as fp:
self._dict = pickle.load(fp, encoding='bytes')
self._data_field = list(range(0, self._dict[b'data'].shape[0]))
with open(self._meta_file_path, 'rb') as fp:
self._meta = pickle.load(fp, encoding='bytes')
self._class_to_name = self._meta[b'corase_label_names'] if level == Cifar100.Level.SuperClass else self._meta[b'fine_label_names']
self._label_dict = self._dict[b'corase_labels' if level == Cifar100.Level.SuperClass else b'fine_labels']
pass
def _generate_from_origin_index(self, index):
data = self._dict[b'data'][index, :]
data = np.reshape(data, [3, 32, 32])
label = self._label_dict[index]
return data, label,
def objname(self, index):
return str(self._class_to_name[index], encoding='utf8')
##@brief
# @note
class Cifar100(Cifar):
##@brief
# @note
# @param[in]
# @param[in]
# @return
def __init__(
self,
stage,
root_dir,
use_rate,
sub_data,
remain_strategy,
level,
):
Cifar.__init__(self, stage=stage, root_dir=root_dir, use_rate=use_rate, sub_data=sub_data, remain_strategy=remain_strategy, level=level)
pass
def _restart_process(self, restart_param):
'''
process while restart the data, process in the derived class and called by restart_data
restart_param: the argv which the derived class need, dict
'''
pass
def _inject_operation(self, inject_param):
'''
operation while the epoch_done is False, process in the derived class and called by inject_operation
injecct_param: the argv which the derived class need, dict
'''
pass
pass
```
#### File: Putil/data/convert_to_input.py
```python
from abc import ABCMeta, abstractmethod
from enum import Enum
class IOConvertor(metaclass=ABCMeta):
class IODirection(Enum):
InputConvertion = 0
OutputConvertion = 1
Unknow = 2
pass
@abstractmethod
def __call__(self, *args):
'''
@brief call this obj return the data for input
@note
@param[in] args
the input data
'''
def __init__(self, io):
'''
@brief
@note
@param[in] io
IOConvertor.IODirection, represent the direction of the data
InputConvertion: change the data from GeneralData(which could be generated by DataCommon) to NetIn
OutputConvertion: change the data from NetOut to GeneralData(which could be used by DataCommon)
Unknow: do nothing
'''
self._io = io
pass
pass
ConvertToInput = IOConvertor
class IOConvertorNoOp(ConvertToInput):
def __init__(self):
ConvertToInput.__init__(self, IOConvertor.IODirection.Unknow)
def __call__(self, *args):
'''
@brief call this obj return the data for input
@note
@param[in] args
the input data
'''
return args
pass
ConvertToInputNoOp = IOConvertorNoOp
Encode = IOConvertor
```
#### File: Putil/data/io_convertor_with_torch_module.py
```python
import torch
from torch.nn import Module
from abc import abstractmethod
import Putil.base.logger as plog
from Putil.data.io_convertor import IOConvertor
class IOConvertorModule(IOConvertor, Module):
def __init__(self, io):
Module.__init__(self)
IOConvertor.__init__(self, io)
pass
def __call__(self, *args):
return self.forward(*args)
@abstractmethod
def forward(self, *args):
pass
```
#### File: data/vision_common_convert/image_convertor.py
```python
from enum import Enum
import cv2
import numpy as np
import Putil.base.logger as plog
import Putil.data.convert_to_input as convert_to_input
import Putil.function.gaussian as Gaussion
bbox_convertor_logger = plog.PutilLogConfig('bbox_convertor').logger()
bbox_convertor_logger.setLevel(plog.DEBUG)
BBoxConvertToCenterBoxLogger = bbox_convertor_logger.getChild('BBoxConvertToCenterBox')
BBoxConvertToCenterBoxLogger.setLevel(plog.DEBUG)
class ImageConvertToInputMethod(convert_to_input.ConvertToInput):
def __init__(self):
pass
def __call__(self, *args):
image = args[0]
image_id = args[1]
return image, np.array(image_id)
```
#### File: data/vision_data_aug/pepperand_salt_augment.py
```python
import Putil.data.augment as paug
import numpy as np
class PepperandSaltAugment(paug.Augment):
def __init__(self):
paug.Augment.__init__(self)
pass
def augment(self, data):
'''
'''
dc = []
dc.append(data)
for mu in self._config['mu']:
for sigma in self._config['sigma']:
noise = np.random.normal(mu, sigma, data.size)
noise = np.reshape(noise, data.shape)
dc.append(noise)
pass
pass
ret = np.concatenate(dc, axis=0)
return ret
pass
pass
```
#### File: deep_learning/base/aug_factory.py
```python
import Putil.base.logger as plog
logger = plog.PutilLogConfig('aug_factory').logger()
logger.setLevel(plog.DEBUG)
import Putil.demo.deep_learning.base.aug as standard
from util import aug as project
def aug_factory(args, property_type='', **kwargs):
'''
@brief
@note
@param[in]
args
aug_source: the aug source follow the format source_one-source_two-source-three-...
aug_name: the aug name follow the format aug_one-aug_two-aug_three-...
'''
model = '{}.{}'.format(args.aug_sources[property_type], args.aug_names[property_type])
logger.info('aug: {}|{}'.format(model, property_type))
return eval('{}(args, property_type)'.format(model))
def aug_arg_factory(parser, source, name, property_type='', **kwargs):
#import pdb; pdb.set_trace()
model = '{}.{}Arg'.format(source, name)
logger.info('aug_arg: {}|{}'.format(model, property_type))
eval('{}(parser, property_type)'.format(model))
```
#### File: deep_learning/base/auto_stop.py
```python
from abc import abstractmethod, ABCMeta
import Putil.base.logger as plog
logger = plog.PutilLogConfig('auto_stop').logger()
logger.setLevel(plog.DEBUG)
from Putil.trainer.auto_stop import auto_stop as AutoStop
from Putil.trainer.auto_stop import AutoStop as _DefaultAutoStop
def common_auto_stop_arg(parser, property_type='', **kwargs):
pass
def DefaultAutoStop(args, property_type='', **kwargs):
'''
@param[in] args
args.auto_stop_patience
args.auto_stop_mode
'''
def generate_default_auto_stop():
return _DefaultAutoStop.generate_AutoStop_from_args(args, property_type, **kwargs)
return generate_default_auto_stop
def DefaultAutoStopArg(parser, property_type='', **kwargs):
_DefaultAutoStop.generate_args(parser, property_type, **kwargs)
```
#### File: base/backbone_impl/backbone.py
```python
def common_backbone_arg(parser, property_type='', **kwargs):
parser.add_argument('--{}backbone_arch'.format(property_type), type=str, default='', action='store', \
help='specify the arch of the backbone, such 19 for backbone_name with vgg')
parser.add_argument('--{}backbone_downsample_rate'.format(property_type), type=int, default=None, action='store', \
help='specify the downsample rate for the backbone')
parser.add_argument('--{}backbone_pretrained'.format(property_type), default=False, action='store_true', \
help='load the pretrained backbone weight or not')
parser.add_argument('--{}backbone_weight_path'.format(property_type), type=str, default='', action='store', \
help='specify the pre-trained model for the backbone, use while in finetune mode, '\
'if the weight is specify, the backbone weight would be useless')
pass
class Backbone:
##@brief
# @param[in] args.backbone_pretrained
def __init__(self, args, property_type='', **kwargs):
self._args = args
self._backbone_pretrained = eval('args.{}backbone_pretrained'.format(property_type))
self._backbone_arch = eval('args.{}backbone_arch'.format(property_type))
self._backbone_weight_path = eval('args.{}backbone_weight_path'.format(property_type))
pass
def get_backbone_pretrained(self):
return self._backbone_pretrained
backbone_pretrained = property(get_backbone_pretrained)
def get_backbone_arch(self):
return self._backbone_arch
backbone_arch = property(get_backbone_arch)
def get_backbone_weight_path(self):
return self._backbone_weight_path
backbone_weight_path = property(get_backbone_weight_path)
pass
##@brief base common Backbone for 2D data
# @
class VisionBackbone(Backbone):
##@brief
# @param[in] args for the Backbone
# @param[in] args.backbone_downsample_rate specify the downsample rate for the backbone
def __init__(self, args, property_type='', **kwargs):
Backbone.__init__(self, args, property_type, **kwargs)
self._backbone_downsample_rate = eval('args.{}backbone_downsample_rate'.format(property_type))
pass
pass
##@brief base common Backbone for 2D data
# @
class DDBackbone(VisionBackbone):
##@brief
# @param[in] args for the Backbone
# @param[in] args.backbone_downsample_rate specify the downsample rate for the backbone
def __init__(self, args, property_type='', **kwargs):
VisionBackbone.__init__(self, args, property_type, **kwargs)
pass
pass
class DDBackboneWithResolution(DDBackbone):
def __init__(self, args, property_type='', **kwargs):
DDBackbone.__init__(self, args, property_type, **kwargs)
self._resolution_output = dict()
pass
def get_resolution_output(self):
return self._resolution_output
resolution_output = property(get_resolution_output)
```
#### File: deep_learning/base/data_sampler_factory.py
```python
import Putil.base.logger as plog
logger = plog.PutilLogConfig('data_sampler_factory').logger()
logger.setLevel(plog.DEBUG)
from Putil.demo.deep_learning.base import data_sampler as standard
from util import data_sampler as project
def data_sampler_factory(args, data_sampler_source, data_sampler_name, property_type='', **kwargs):
if args.framework == 'torch':
pass
else:
raise NotImplementedError('data_loader of framework: {} is not implemented'.format(args.framework))
data_sampler = '{}.{}'.format(data_sampler_source, data_sampler_name)
return eval('{}(args, property_type, **kwargs)'.format(data_sampler))
def data_sampler_arg_factory(parser, source, name, property_type='', **kwargs):
arg = '{}.{}Arg'.format(source, name)
logger.info('data_sampler_arg: {}'.format(arg))
return eval('{}(parser, property_type, **kwargs)'.format(arg))
```
#### File: deep_learning/base/dataset_factory.py
```python
import Putil.base.logger as plog
logger = plog.PutilLogConfig('data_factory').logger()
logger.setLevel(plog.DEBUG)
import Putil.demo.deep_learning.base.dataset as standard
from util import dataset as project
def dataset_factory(args, property_type='', **kwargs):
'''
@brief
@note
@param[in]
args
data_name: the main type of the data
data_source: from standard or project
'''
model = '{}.{}'.format(args.dataset_sources[property_type], args.dataset_names[property_type])
logger.info('dataset: {}'.format(model))
return eval('{}(args, property_type, **kwargs)'.format(model))
def dataset_arg_factory(parser, source, name, property_type='', **kwargs):
arg = '{}.{}Arg'.format(source, name)
logger.info('dataset_arg: {}'.format(arg))
return eval('{}(parser, property_type, **kwargs)'.format(arg))
```
#### File: base/decode/center_net.py
```python
from ..decode import Decode
from torch.nn import Module
class CenterNetDecode(Decode, Module):
def __init__(self, args):
Decode.__init__(self, args)
Module.__init__(self)
self._threshold = args.center_net_decode_threshold
pass
def forward(self, x):
pass
pass
```
#### File: deep_learning/base/decode.py
```python
import copy
from abc import abstractmethod
from torch.nn import Module
##@brief
# @note
class Decode:
'''
@brief
@note ่งฃ็ ๆจกๅ่พๅบ๏ผ็ๆ็ดๆฅ้็จ็ปๆ
'''
def __init__(self, args, property_type='', **kwargs):
self._fit_to_decode_input = kwargs.get('fit_to_indicator_input', None)
pass
##@brief decode the data
# @note ๅฝoutputไธบNone็ๆถๅ๏ผ่กจ็คบ้่ฟdatas่ฟ่ก่งฃๆ
# @param[in] datas datasetไบง็็datas
# @param[in] output model็่พๅบ
def __call__(self, datas, output=None):
kargs = self._fit_to_decode_input(datas, output) if self._fit_to_decode_input is not None else (datas, output)
return self._call_impl(*kargs)
@abstractmethod
def _call_impl(self, *kargs, **kwargs):
pass
pass
def CenterNetDecode(args):
pass
def CenterNetDecodeArg(parser, property_type='', **kwargs):
parser.add_argument('--{}center_net_decode_threshold'.format(property_type), type=float, default=0.1, action='store', \
)
pass
class _DefaultDecode(Decode, Module):
def __init__(self, args, property_type='', **kwargs):
Decode.__init__(self, args, property_type, **kwargs)
Module.__init__(self)
pass
def _call_impl(self, *kargs, **kwargs):
if kargs[1] is None:
return kargs[0]
else:
return kargs[1]
pass
pass
def DefaultDecode(args, property_type='', **kwargs):
temp_args = copy.deepcopy(args)
def generate_default_decode():
return _DefaultDecode(args)
return generate_default_decode
def DefaultDecodeArg(parser, property_type='', **kwargs):
pass
```
#### File: deep_learning/base/encode_factory.py
```python
import Putil.base.logger as plog
logger = plog.PutilLogConfig('encode_factory').logger()
logger.setLevel(plog.DEBUG)
import Putil.demo.deep_learning.base.encode as standard
from util import encode as project
def encode_factory(args, property_type='', **kwargs):
'''
@brief
@note
@param[in]
args
encode_name: the main type of the encode
encode_source: from standard or project
'''
if args.framework == 'torch':
pass
else:
raise NotImplementedError('encode of framework: {} is not implemented'.format(args.framework))
model = '{0}.{1}'.format(args.encode_sources[property_type], args.encode_names[property_type])
logger.info('encode: {}|{}'.format(model, property_type))
return eval('{}(args, property_type, **kwargs)'.format(model))
def encode_arg_factory(parser, source, name, property_type='', **kwargs):
arg = '{}.{}Arg'.format(source, name)
logger.info('encode_arg: {}'.format(arg))
return eval('{}(parser, property_type, **kwargs)'.format(arg))
```
#### File: deep_learning/base/factory.py
```python
class Factory:
def __init__(self):
pass
def arg_factory():
print('r')
pass
arg_factory = staticmethod(arg_factory)
class t(Factory):
def __init__(self):
Factory.__init__(self)
pass
t.arg_factory()
```
#### File: deep_learning/base/horovod.py
```python
def horovod(framework):
if framework == 'torch':
import horovod.torch as hvd
return hvd
elif framework == 'tf':
import horovod.tensorflow as hvd
return hvd
pass
def horovod_arg(parser):
parser.add_argument('--hvd_reduce_mode', type=str, action='store', default='Average', \
help='the reduce mode for horovod, supports Average,Sum and AdaSum')
parser.add_argument('--hvd_compression_mode', type=str, action='store', default='none', \
help='the compression mode for horovod, supports none, mro and fp16')
pass
def horovod_reduce_mode_is_adasum(args):
return args.hvd_reduce_mode == 'AdaSum'
def horovod_reduce_mode_is_average(args):
return args.hvd_reduce_mode == 'Average'
def horovode_reduce_mode_is_sum(args):
return args.hvd_reduce_mode == 'Sum'
```
#### File: deep_learning/base/indicator_factory.py
```python
import Putil.base.logger as plog
logger = plog.PutilLogConfig('indicator_factory').logger()
logger.setLevel(plog.DEBUG)
import Putil.demo.deep_learning.base.indicator as standard
from util import indicator as project
def indicator_factory(args, source, name, property_type='', **kwargs):
'''
@brief
@note
@param[in]
args
indicator_name: the main type of the indicator
indicator_source: from standard or project
'''
if args.framework == 'torch':
pass
else:
raise NotImplementedError('indicator of framework: {} is not implemented'.format(args.framework))
model = '{0}.{1}'.format(source, name)
return eval('{}(args, property_type, **kwargs)'.format(model))
##@brief
# @note
# @param[in] source
def indicator_arg_factory(parser, source, name, property_type='', **kwargs):
arg = '{}.{}Arg'.format(source, name)
logger.info('indicator_arg: {}'.format(arg))
return eval('{}(parser, property_type, **kwargs)'.format(arg))
```
#### File: deep_learning/base/indicator_statistic.py
```python
from abc import abstractmethod
from torch.nn import Module
import copy
class IndicatorStatistic:
'''
@brief
@note ๆฅๆถ่ฎญ็ป้ถๆฎต็trainๆ่
evaluateๆฏไธชstep็decode่พๅบ๏ผๅๆๅไธไธชstep่พๅบ็ป่ฎกๆๆ
'''
def __init__(self, args, property_type='', **kwargs):
pass
@abstractmethod
def add_step_output(self, *input):
'''
@brief
@note ๆฏไธชstep็decode่พๅบไผ ๅ
ฅๅฐ่ฏฅๅฝๆฐ๏ผ่ฟ่ก็ป่ฎกๅค็
'''
pass
@abstractmethod
def statistic_out(self):
'''
@brief
@note ๆๅไธไธชstep่ฟ่กๅฎ๏ผdecode่พๅบไผ ๅฐadd_step_outputไนๅๅฐฑๅฏไปฅ่ฐ็จๆญคๅฝๆฐ่ฟ่ก็ป่ฎก๏ผ่พๅบไธไธชๅ
ทๆ<ๆฏ่พๅฝๆฐ>ๅฎ็ฐ็ๅฏน่ฑก
'''
pass
pass
class _DefaultIndicatorStatistic(IndicatorStatistic):
def __init__(self, args, property_type='', **kwargs):
IndicatorStatistic.__init__(self, args, property_type, **kwargs)
pass
pass
def DefaultIndicatorStatistic(args, property_type='', **kwargs):
temp_args = copy.deepcopy(args)
def generate_default_indicator_statistic():
return _DefaultIndicatorStatistic(temp_args, property_type, **kwargs)
return generate_default_indicator_statistic
def DefaultIndicatorStatisticArg(parser, property_type='', **kwargs):
pass
```
#### File: deep_learning/base/loss_factory.py
```python
import Putil.base.logger as plog
logger = plog.PutilLogConfig('loss_factory').logger()
logger.setLevel(plog.DEBUG)
import Putil.demo.deep_learning.base.loss as standard
from util import loss as project
def loss_factory(args, source, name, property_type='', **kwargs):
'''
@brief
@note
@param[in]
args
loss_name: the main type of the loss
loss_source: from standard or project
'''
if args.framework == 'torch':
pass
else:
raise NotImplementedError('loss of framework: {} is not implemented'.format(args.framework))
model = '{0}.{1}'.format(source, name)
return eval('{}(args, property_type, **kwargs)'.format(model))
def loss_arg_factory(parser, source, name, property_type='', **kwargs):
arg = '{}.{}Arg'.format(source, name)
logger.info('loss_arg: {}'.format(arg))
return eval('{}(parser, property_type, **kwargs)'.format(arg))
```
#### File: base/second_stage_bounding_box_prediction/dcn_feature_calibration.py
```python
๏ปฟ# coding=utf-8
import torch
class RotateRectangleDCNFeatureCalibration(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
pass
```
#### File: deep_learning/base/util.py
```python
import re
from enum import Enum
import shutil
import numpy as np
from colorama import Fore
import torch
import os
#from Putil.base import logger as plog
#logger = plog.PutilLogConfig('util').logger()
#logger.setLevel(plog.DEBUG)
from Putil.demo.deep_learning.base import horovod
import Putil.base.save_fold_base as psfb
##@brief ไปฃ่กจ็ๆฌๆฌก่ฟ่กๆฏๅจไปไนๆจกๅผไธ
# @note ่ฟไธStageไธๅ๏ผStageๅจไธๆฌก่ฟ่กไธญๅฏ่ฝไผๆไธๅ็้ถๆฎตStage๏ผ
# ๆฏๅฆTrainEvaluate่กจ็คบๅจRunStage.Trainไธญ็Evaluate้ถๆฎต
class RunStage(Enum):
Train=0
Evaluate=1
Test=2
def find_repeatable_environ(base_name):
temp = set([k if re.search(base_name, k) is not None else None for k in os.environ.keys()])
temp.remove(None)
return temp
def get_relatived_environ(base_name):
return {property_type.replace(base_name, ''): os.environ[property_type] for property_type in find_repeatable_environ(base_name)}
def complete_environ(source_dict, target_dict, default_content):
# ๅฎๅtarget_dictไธญ็ผบๅฐ่source_dictไธญๅญๅจ็็ฑปๅ
[None if property_type in target_dict.keys() else target_dict.update({property_type: default_content}) \
for property_type, name in source_dict.items()]
pass
def empty_tensor_factory(framework, **kwargs):
def generate_empty_tensor_factory_func():
if framework == 'torch':
# tensor operation
def torch_generate_empty_tensor():
return torch.Tensor([])
return torch_generate_empty_tensor
else:
raise NotImplementedError('empty_tensor_factory in framework: {} is Not Implemented'.format(args.framework))
pass
return generate_empty_tensor_factory_func
def string_to_torch_tensor(_str, code='utf-16'):
return torch.from_numpy(np.frombuffer(_str.encode(code), dtype=get_np_dtype_from_code(code)))
def get_np_dtype_from_code(code):
return np.int16 if code == 'utf-16' else np.int8 if code == 'utf-8' else None
def get_code_from_np_dtype(np_dtype):
return ('utf-16', np.uint16) if np_dtype == np.int16 else ('utf-8', np.uint8) if np_dtype == np.int8 else None
def torch_tensor_to_string(tensor, code='utf-16'):
n = tensor.numpy()
return n.astype(get_code_from_np_dtype(n.dtype)[1]).tobytes().decode(get_code_from_np_dtype(n.dtype)[0])
def make_sure_the_save_dir(name, run_stage, save_dir, weight_path, weight_epoch, debug, framework):
hvd = horovod.horovod(framework)
if run_stage == RunStage.Train:
if weight_path == '' or weight_epoch is None and hvd.rank() == 0:
bsf = psfb.BaseSaveFold(
use_date=True if not debug else False, \
use_git=True if not debug else False, \
should_be_new=True if not debug else False, \
base_name='{}{}'.format(name if name is not '' else 'Unnamed', '-debug' if debug else ''))
bsf.mkdir(save_dir)
save_dir = bsf.FullPath
code = 'utf-16'
save_dir_tensor = string_to_torch_tensor(save_dir, code)
save_dir_tensor = hvd.broadcast_object(save_dir_tensor, 0, 'save_dir')
save_dir = torch_tensor_to_string(save_dir_tensor, code)
elif hvd.rank() == 0 and weight_path is not None and weight_epoch is not None:
save_dir = os.path.dirname(weight_path)
code = 'utf-16'
save_dir_tensor = string_to_torch_tensor(save_dir, code)
save_dir_tensor = hvd.broadcast_object(save_dir_tensor, 0, 'save_dir')
save_dir = torch_tensor_to_string(save_dir_tensor, code)
elif hvd.rank() != 0:
code = 'utf-16'
save_dir_tensor = string_to_torch_tensor(save_dir, code)
save_dir_tensor = hvd.broadcast_object(save_dir_tensor, 0, 'save_dir')
save_dir = torch_tensor_to_string(save_dir_tensor, code)
else:
raise RuntimeError('this should not happend')
print(Fore.GREEN + 'rank {} final get save dir: {}'.format(hvd.rank(), save_dir) + Fore.RESET)
return save_dir
pass
def generate_train_time_dir_name(train_time):
return 'train_time-{}'.format(train_time)
def subdir_base_on_train_time(root_dir, train_time, prefix):
'''
@brief ไพๆฎๆ น็ฎๅฝไธๅฏนๅบ็train_time็ๆๅญ็ฎๅฝๅ็งฐ
'''
return os.path.join(root_dir, '{}{}'.format('' if prefix == '' else '{}-'.format(prefix), generate_train_time_dir_name(train_time)))
def train_time_matched(train_time, subdir):
res = re.search(generate_train_time_dir_name(train_time), subdir)
return res is not None, res
#def get_train_time_from_subdir(subdir):
# return
def _tensorboard_event_and_the_train_time(file_name):
_split_by_point = file_name.split('.')
is_event = _split_by_point[0] == 'events' and _split_by_point[1] == 'out' and _split_by_point[2] == 'tfevents'
train_time = int(file_name.split('-')[-1]) if is_event else None
return is_event, train_time
def _get_trained_result(path, train_time):
'''
@brief ไพๆฎๆไพ็train_timeไธๆ น็ฎๅฝ่ทๅ็ธๅ
ณไธtrain_timeๆฌก่ฎญ็ป็็ปๆๅ
ๅฎน๏ผๆไพ็ปclean_train_result็ญ่ฟ่กๅค็
'''
files = []
dirs = []
#
#dirs.append(os.path.join(path, subdir_base_on_train_time(path, train_time)))
#
_contents = os.listdir(path)
for _content in _contents:
matched, res = train_time_matched(train_time, _content)
dirs.append(os.path.join(path, _content)) if matched else None
pass
return dirs, files
def clean_train_result(path, train_time):
dirs, files = _get_trained_result(path, train_time)
#sync = hvd.broadcast_object(torch.BoolTensor([True]), 0, 'sync_before_checking_remove_file')
_remove = input(Fore.RED + 'remove the files: {} dir: {} (y/n):'.format(files, dirs))
[os.remove(_file) for _file in files] if _remove in ['y', 'Y'] else None
[shutil.rmtree(_dir) for _dir in dirs] if _remove in ['Y', 'y'] else None
pass
def fix_one_env_param(param):
if isinstance(param, bool):
return param
elif isinstance(param, str):
if param in ['False', 'false']:
return False
elif param in ['True', 'ture']:
return True
elif param in ['None', 'none']:
return None
elif param in ['Train', 'train', 'Evaluate', 'evaluate', 'Test', 'test']:
if param in ['Train', 'train']:
return RunStage.Train
elif param in ['Evaluate', 'evaluate']:
return RunStage.Evaluate
else:
return RunStage.Test
pass
elif param in ['Torch', 'torch']:
return 'torch'
elif param in ['tf', 'tensorflow']:
return 'tf'
else:
return param
pass
elif isinstance(param, None.__class__):
return param
else:
raise NotImplementedError('fix param with type {} is not implemented'.format(param.__class__.__name__))
pass
def fix_env_param(param):
check_multi_param = param.split('.')
if len(check_multi_param) != 1:
temp_params = []
for param in check_multi_param:
temp_params.append(fix_one_env_param(param))
pass
return temp_params
else:
return fix_one_env_param(param)
pass
def print_env_param(param, env_name):
print(Fore.GREEN + 'param: {}:{} | type: {}'.format(env_name, param, param.__class__.__name__) + Fore.RESET)
def make_sure_the_train_time(run_stage, save_dir, framework):
hvd = horovod.horovod(framework)
if run_stage == RunStage.Train:
#if hvd.rank() == 0 and args.weight_epoch is None and args.weight_epoch is None:
# print(Fore.GREEN + 'get the untrained train time is 0' + Fore.RESET)
# args.train_time = 0
# train_time_tensor = torch.IntTensor([args.train_time])
# train_time_tensor = hvd.broadcast_object(train_time_tensor, 0, 'train_time')
#elif hvd.rank() == 0 and (args.weight_path != '') and (args.weight_epoch is not None):
if hvd.rank() == 0:# and (args.weight_path != '') and (args.weight_epoch is not None):
item_list = os.listdir(save_dir)
max_time = 0
for _item in item_list:
if os.path.isdir(os.path.join(save_dir, _item)):
name_part = _item.split('-')
if name_part[-2] == 'train_time':
max_time = max(max_time, int(name_part[-1]))
train_time = max_time + 1
print(Fore.GREEN + 'get the trained train time is {}'.format(train_time) + Fore.RESET)
train_time_tensor = torch.IntTensor([train_time])
train_time_tensor = hvd.broadcast_object(train_time_tensor, 0, 'train_time')
train_time = train_time
elif hvd.rank() != 0:
print(Fore.GREEN + 'wait for the root rank share the train_time' + Fore.RESET)
train_time_tensor = torch.IntTensor([-1])
train_time_tensor = hvd.broadcast_object(train_time_tensor, 0, 'train_time')
train_time = train_time_tensor.item()
else:
raise RuntimeError('this should not happend')
pass
print(Fore.GREEN + 'rank {} final get train time: {}'.format(hvd.rank(), train_time) + Fore.RESET)
return train_time
pass
pass
```
#### File: demo/deep_learning/test_reload_plog_lib.py
```python
import Putil.base.logger as plog
logger = plog.PutilLogConfig('a').logger()
logger.setLevel(plog.DEBUG)
import test_reload_plog_lib_lib as lib
# ่ฟไธช้ๅธธๅ
ณ้ฎ
def a():
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.fatal('fatal')
lib.b()
```
#### File: deep_learning/util/encode.py
```python
from Putil.demo.deep_learning.base.encode import Encode
class DefaultEncode(Encode):
def __init__(self):
Encode.__init__(self, args)
def __call__(self, *input):
raise NotImplementedError('DefaultEncode is not implemented')
pass
```
#### File: deep_learning/util/fit_to_indicator_input.py
```python
import copy
from abc import abstractmethod, ABCMeta
import Putil.base.logger as plog
logger = plog.PutilLogConfig('fit_data_to_input').logger()
logger.setLevel(plog.DEBUG)
from Putil.demo.deep_learning.base import util
##@brief the FitToIndicatorInput ๆไพไธไธชๆนๆณ๏ผๆฅๆถdatasๅoutput๏ผ
# datasไปฃ่กจ็Dataset็่พๅบ๏ผoutputไปฃ่กจ็ๆจกๅ็่พๅบ๏ผ็ถๅ็ๆ็ฎๆ ๆฐๆฎ๏ผไผ ่พ็ปLoss๏ผ่ฎก็ฎๆๅคฑ๏ผ่ฏฅๅฏน่ฑกๅจLossไธญ่ฟ่ก่ฐ็จ
class FitToIndicatorInput(metaclass=ABCMeta):
##@brief
# @param[in] args
# @param[in] property_type
# @param[in] kwargs
def __init__(self, args, property_type='', **kwargs):
pass
def __call__(self, datas, output):
return self._call_impl(datas, output)
@abstractmethod
def _call_impl(self, *kargs, **kwargs):
pass
class _DefaultFitToIndicatorInput(FitToIndicatorInput):
def __init__(self, args, property_type='', **kwargs):
FitToIndicatorInput.__init__(self, args, property_type, **kwargs)
self._args = args
pass
def _call_impl(self, *kargs, **kwargs):
'''
@brief generate the input for the backbone
'''
return kargs[0][1], kargs[1]
def DefaultFitToIndicatorInput(args, property_type='', **kwargs):
'''
@param[in] args
'''
temp_args = copy.deepcopy(args)
def generate_default_fit_data_to_input():
return _DefaultFitToIndicatorInput(args, property_type, **kwargs)
return generate_default_fit_data_to_input
def DefaultFitToIndicatorInputArg(parser, property_type='', **kwargs):
pass
```
#### File: deep_learning/util/indicator.py
```python
from Putil.demo.deep_learning.base.indicator import Indicator
class DefaultIndicator(Indicator):
def __init__(self, args):
Indicator.__init__(self, args)
pass
def forward(self, *input):
raise NotImplementedError('DefaultIndicator is not implemented')
```
#### File: Putil/ILSVRC2012/ILSVRCS_statistic.py
```python
import os
import sys
# === import project path ===
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
# ===========================
import base.default_excutable_argument as dea
import pandas as pd
from PIL import Image
import os
import copy
import queue
import threading
import multiprocessing
from multiprocessing import Manager
from multiprocessing import Pool
from multiprocessing import cpu_count
multiprocessing.freeze_support()
ILSVRC_train_root = '/data/ILSVRC2012/train'
save_to = '/data/process_data/caojihua/ILSVRC/'
# collect information into a PD
statistic_sample = 'statistic_sample.csv'
statistic_label = 'statistic_label.csv'
run_time_message = 'statistic_info.txt'
process_amount = cpu_count()
argument = dea.Argument()
parser = argument.parser
parser.add_argument(
'--train_root',
action='store',
dest='TrainRoot',
type=str,
default='',
help='if this flag is set, the program just test train in perform'
)
args = parser.parse_args()
def ifp_listening(ifp, queue):
while True:
msg = queue.get()
if msg == 'end':
ifp.write('killed')
break
ifp.write(msg)
ifp.flush()
pass
ifp.close()
pass
def ifp_write(queue, msg):
queue.put(msg)
pass
def read_image_information(class_dir, sample_list, image_info_queue, ifp_queue):
for sample_element in sample_list:
sample_dir = os.path.join(class_dir, sample_element)
try:
im = Image.open(sample_dir)
width, height = im.size
channel = im.layers
image_info_queue.put(
[False, {'image_name': sample_element, 'height': height, 'width': width, 'channel': channel}])
del im
except Exception as ex:
ifp_write(ifp_queue, '{0} failed {1}\n'.format(sample_dir, ex.args))
pass
pass
image_info_queue.put([True, {}])
pass
def deal_with_class(classes, ifp_queue):
df_for_label = pd.DataFrame(columns=['class_dir', 'reflect_name'])
df_for_sample = pd.DataFrame(columns=['class', 'image_name', 'height', 'width', 'channel'])
l = 0
while l < len(classes):
class_element = classes[l]
try:
print('deal with {0}'.format(class_element))
df_for_label = df_for_label.append({'class_dir': class_element, 'reflect_name': class_element},
ignore_index=True)
class_dir = os.path.join(ILSVRC_train_root, class_element)
sample_list = os.listdir(class_dir)
# add to queue
image_info_queue = queue.Queue()
read_thread = threading.Thread(target=read_image_information,
args=(class_dir, sample_list, image_info_queue, ifp_queue))
read_thread.start()
base_dict = {'class': class_element}
sample_ = list()
while True:
element = image_info_queue.get()
if element[0] is False:
base_dict.update(element[1])
sample_.append(copy.deepcopy(base_dict))
pass
else:
break
pass
pass
read_thread.join()
df_for_sample = df_for_sample.append(sample_, ignore_index=True)
del sample_
pass
except Exception as ex:
ifp_write(ifp_queue, '{0}\n'.format(ex.args))
pass
l += 1
print('pod: {0}, deal {1}, remain: {2}'.format(os.getpid(), l, len(classes) - l))
pass
print('done:{0}'.format(classes))
return df_for_sample, df_for_label
def deal_with_ilsvrc(info_save_to, sample_save_to, label_save_to):
global process_amount
class_list = os.listdir(ILSVRC_train_root)
# seperate class_list to process_amount parts
seperate_class_list = []
if process_amount > len(class_list):
process_amount = len(class_list)
else:
pass
base_len = len(class_list) // process_amount
end_len = len(class_list) % process_amount + base_len
start = 0
length = base_len
for i in range(0, process_amount):
seperate_class_list.append(class_list[start: length])
start = start + base_len
if i != process_amount - 2:
length = start + base_len
pass
else:
length = start + end_len
assert(sum([len(i) for i in seperate_class_list]) == len(class_list))
ifp_queue = Manager().Queue()
process_list = []
pool = Pool(processes=process_amount)
with open(info_save_to, 'w') as ifp:
pool.apply_async(ifp_listening, args=(ifp, ifp_queue))
for scl in seperate_class_list:
# process = pool.apply_async(test, args=(1,))
process = pool.apply_async(deal_with_class, args=(scl, ifp_queue))
# process.start()
process_list.append(process)
pass
pool.close()
pool.join()
pass
sample_pd_collection = []
label_pd_collection = []
for pl in process_list:
s, l = pl.get()
sample_pd_collection.append(s)
label_pd_collection.append(l)
pass
label_pd = pd.concat(label_pd_collection, ignore_index=True)
sample_pd = pd.concat(sample_pd_collection, ignore_index=True)
label_pd.to_csv(label_save_to)
sample_pd.to_csv(sample_save_to)
pass
if __name__ == '__main__':
deal_with_ilsvrc(os.path.join(save_to, run_time_message), os.path.join(save_to, statistic_sample),
os.path.join(save_to, statistic_label))
```
#### File: Putil/np/util.py
```python
import numpy as np
from Putil.tf import util
import Putil.loger as plog
import sys
root_logger = plog.PutilLogConfig("NpUtil").logger()
root_logger.setLevel(plog.DEBUG)
NpTypeLogger = root_logger.getChild("NpTypeLogger")
NpTypeLogger.setLevel(plog.DEBUG)
_np_type = {
0.32: np.float32,
32: np.int32,
0.64: np.float64,
64: np.int64,
8: np.uint8,
-8: np.int8,
0.16: np.float16,
16: np.int16
}
class np_type:
def __init__(self, label):
try:
self._type = _np_type[label]
except KeyError as e:
NpTypeLogger.error('key : {0} is not supported\n{1}'.format(label, e))
sys.exit()
self._label = label
pass
@property
def Type(self):
return self._type
def to_tf(self):
return util._tf_type[self._label]
pass
```
#### File: jihuacao/Putil/path.py
```python
import os
def touch_dir(wanted_dir):
not_exist_collection = []
while os.path.exists(wanted_dir) is not True and wanted_dir != '':
wanted_dir, step = os.path.split(wanted_dir)
not_exist_collection.append(step)
pass
while len(not_exist_collection) != 0:
step = not_exist_collection.pop()
wanted_dir = os.path.join(wanted_dir, step)
os.mkdir(wanted_dir)
pass
pass
```
#### File: test/base/test_save_fold_base.py
```python
import Putil.base.save_fold_base as psfb
import os
def test_base_save_fold():
root_dir = './test/test_generation/base/test_save_fold'
bsf = psfb.BaseSaveFold(use_git=True, use_date=True, base_name='test_base_save_fold', should_be_new=True)
bsf.mkdir(root_dir)
assert os.path.split(bsf.FullPath)[0] == root_dir
os.rmdir(bsf.FullPath)
pass
if __name__ == '__main__':
test_base_save_fold()
pass
```
#### File: test/data/conftest.py
```python
import pytest
def pytest_addoption(parser):
parser.addoption("--username", action="store", help="input useranme")
parser.addoption('--cifar_root_dir', action='store', default='', help='the root dir for the cifar100')
pass
# ่งฃๆๆนๆณ
@pytest.fixture
def params(request):
params = {}
params['cifar_root_dir'] = request.config.getoption('--cifar_root_dir')
return params
```
#### File: test/data/test_cifar.py
```python
import numpy as np
import cv2
import pytest
from Putil.data.cifar import Cifar100
from Putil.trainer import util
from Putil.data.aug import AugFuncNoOp, AugNode
from Putil.data.convert_to_input import ConvertToInputNoOp
from Putil.data.data_type_adapter import DataTypeAdapterNoOp
from Putil.data.torch_151_data.dataloader import DataLoader
from Putil.data.torch_151_data.sampler import BatchSampler, Sampler, RandomSampler
def test_cirfar100(params):
cifar = Cifar100(util.Stage.Train, params['cifar_root_dir'], 1.0, None, Cifar100.RemainStrategy.Drop, Cifar100.Level.FineClass)
root_node = AugNode(AugFuncNoOp())
root_node.add_child(AugNode(AugFuncNoOp()))
root_node.freeze_node()
cifar.set_aug_node_root(root_node)
cifar.set_convert_to_input_method(ConvertToInputNoOp())
cifar.set_data_type_adapter(DataTypeAdapterNoOp())
d, l, = cifar[0]
cv2.imwrite('test/data/result/test_cifar100/read_one.jpg', np.transpose(d, (1, 2, 0)), cv2.IMWRITE_PAM_FORMAT_RGB)
sampler = BatchSampler(RandomSampler(cifar), 8, True)
data_loader = DataLoader(cifar, sampler=sampler)
pass
```
#### File: test/data/test_poisson.py
```python
import cv2
import os
import numpy as np
code_patch_dir = '/data2/process_data/caojihua/data/code_patches/'
background_dir = '/data2/Public_Data/COCO/unzip_data/2017/train2017'
codes = os.listdir(code_patch_dir)
bgs = os.listdir(background_dir)
#In[]
def read_img(code_patch_dir, background_dir, code_name, bg_name):
code_path = os.path.join(code_patch_dir, code_name)
code_img = cv2.imread(code_path, cv2.IMREAD_GRAYSCALE)
bg_path = os.path.join(background_dir, bg_name)
bg_img = cv2.imread(bg_path, cv2.IMREAD_GRAYSCALE)
return code_img, bg_img
code_img, bg_img = read_img(code_patch_dir, background_dir, codes[1], bgs[0])
code_img = cv2.resize(code_img, (bg_img.shape[1] // 4, bg_img.shape[0] // 4))
print(code_img.shape)
print(bg_img.shape)
import matplotlib.pyplot as plt
plt.imshow(code_img, cmap='gray')
plt.show()
plt.imshow(bg_img, cmap='gray')
plt.show()
#In[]
code_img_bgr = cv2.cvtColor(code_img, cv2.COLOR_GRAY2BGR)
bg_img_bgr = cv2.cvtColor(bg_img, cv2.COLOR_GRAY2BGR)
mask = 255 * np.ones(code_img_bgr.shape, code_img_bgr.dtype)
center = (bg_img_bgr.shape[0] // 2 , bg_img_bgr.shape[1] // 2)
result = cv2.seamlessClone(code_img_bgr, bg_img_bgr, mask, center, cv2.NORMAL_CLONE)
plt.imshow(result)
plt.show()
#In[]
code_img_bgr = cv2.cvtColor(code_img, cv2.COLOR_GRAY2BGR)
bg_img_bgr = cv2.cvtColor(bg_img, cv2.COLOR_GRAY2BGR)
code_img_bgr_fill_to_bg = np.zeros(bg_img_bgr.shape, bg_img_bgr.dtype)
h_begin = code_img_bgr_fill_to_bg.shape[0] // 2 - code_img_bgr.shape[0] // 2
w_begin = code_img_bgr_fill_to_bg.shape[1] // 2 - code_img_bgr.shape[1] // 2
code_img_bgr_fill_to_bg[h_begin: code_img_bgr.shape[0] + h_begin, w_begin: code_img_bgr.shape[1] + w_begin, :] = code_img_bgr
plt.imshow(code_img_bgr_fill_to_bg)
plt.show()
mask = 255 * np.ones(bg_img_bgr.shape, bg_img_bgr.dtype)
mask[h_begin: code_img_bgr.shape[0] + h_begin, w_begin: code_img_bgr.shape[1] + w_begin, :] = 0
mask[0: h_begin - code_img_bgr.shape[0] // 2, 0: w_begin - code_img_bgr.shape[1] // 2, :] = 0
bg_img_bgr[h_begin: code_img_bgr.shape[0] + h_begin, w_begin: code_img_bgr.shape[1] + w_begin, :] = 0
plt.imshow(mask)
plt.show()
center = (bg_img_bgr.shape[0] // 2 , bg_img_bgr.shape[1] // 2)
result = cv2.seamlessClone(bg_img_bgr, code_img_bgr_fill_to_bg, mask, center, cv2.MIXED_CLONE)
print(result.shape)
plt.imshow(result)
plt.show()
#In[]
print(cv2.seamlessClone.__doc__)
#In[]
import numpy as np
from PIL import Image # Python image Library
from scipy import sparse
from scipy.sparse import linalg
class SeamlessEditingTool:
def __init__(self, ref, target, mask):
self.ref = np.array(Image.open(ref))
self.target = np.array(Image.open(target))
self.mask = np.array(Image.open(mask))
self.height, self.width, blank = self.ref.shape
# (width, height)-tuple
self.newImage = Image.new('RGB', (self.width, self.height))
# index of mask
# map coordinate of pixels to be calculated to index_map according to
# mask
self.maskidx2Corrd = []
# map coordinates of neigbourhoods to mask indices
self.Coord2indx = -1 * np.ones([self.height, self.width])
# True if q \in N_p \bigcap \Sigma
# False elsewise
# at boundary
self.if_strict_interior = [] # left, right, top, botton
idx = 0
for i in range(self.height):
for j in range(self.width):
if self.mask[i, j, 0] == 255:
self.maskidx2Corrd.append([i, j])
self.if_strict_interior.append([
i > 0 and self.mask[i - 1, j, 0] == 255,
i < self.height - 1 and self.mask[i + 1, j, 0] == 255,
j > 0 and self.mask[i, j - 1, 0] == 255,
j < self.width - 1 and self.mask[i, j + 1, 0] == 255
])
self.Coord2indx[i][j] = idx
idx += 1
# number of mask
N = idx
self.b = np.zeros([N, 3])
self.A = np.zeros([N, N])
def create_possion_equation(self):
# Using the finite difference method
N = self.b.shape[0]
for i in range(N):
# for every pixel in interior and boundary
self.A[i, i] = 4
x, y = self.maskidx2Corrd[i]
if self.if_strict_interior[i][0]:
self.A[i, int(self.Coord2indx[x - 1, y])] = -1
if self.if_strict_interior[i][1]:
self.A[i, int(self.Coord2indx[x + 1, y])] = -1
if self.if_strict_interior[i][2]:
self.A[i, int(self.Coord2indx[x, y - 1])] = -1
if self.if_strict_interior[i][3]:
self.A[i, int(self.Coord2indx[x, y + 1])] = -1
# Row-based linked list sparse matrix
# This is an efficient structure for
# constructing sparse matrices incrementally.
self.A = sparse.lil_matrix(self.A, dtype=int)
for i in range(N):
flag = np.mod(
np.array(self.if_strict_interior[i], dtype=int) + 1, 2)
x, y = self.maskidx2Corrd[i]
for j in range(3):
self.b[i, j] = 4 * self.ref[x, y, j] - self.ref[x - 1, y, j] - \
self.ref[x + 1, y, j] - self.ref[x,
y - 1, j] - self.ref[x, y + 1, j]
self.b[i, j] += flag[0] * self.target[x - 1, y, j] + \
flag[1] * self.target[x + 1, y, j] + flag[2] * \
self.target[x, y - 1, j] + \
flag[3] * self.target[x, y + 1, j]
def possion_solver(self):
self.create_possion_equation()
# Use Conjugate Gradient iteration to solve A x = b
x_r = linalg.cg(self.A, self.b[:, 0])[0]
x_g = linalg.cg(self.A, self.b[:, 1])[0]
x_b = linalg.cg(self.A, self.b[:, 2])[0]
self.newImage = self.target
for i in range(self.b.shape[0]):
x, y = self.maskidx2Corrd[i]
self.newImage[x, y, 0] = np.clip(x_r[i], 0, 255)
self.newImage[x, y, 1] = np.clip(x_g[i], 0, 255)
self.newImage[x, y, 2] = np.clip(x_b[i], 0, 255)
self.newImage = Image.fromarray(self.newImage)
return self.newImage
if __name__ == "__main__":
test = 0
if test == 1:
ref = "mona-source.jpg"
target = "mona-target.jpg"
mask = "mona-mask.jpg"
tools = SeamlessEditingTool(ref, target, mask)
newImage = tools.possion_solver()
newImage.save('mona-leber-final.jpg')
else:
ref = "sealion-source.jpg"
target = "duck-target.jpg"
mask = "duck-mask.jpg"
tools = SeamlessEditingTool(ref, target, mask)
newImage = tools.possion_solver()
newImage.save('duck-sealion-final.jpg')
```
#### File: vision_data_aug/detection/test_rectangle.py
```python
import os
image_path = os.path.join(os.path.split(os.path.split(os.path.abspath(__file__))[0])[0], 'test_image.jpg')
#In[]:
import numpy as np
import os
import random
import cv2
import Putil.data.aug as pAug
from Putil.data.common_data import CommonDataWithAug
from Putil.data.vision_data_aug.detection.rectangle import HorizontalFlip as BH
from Putil.data.vision_data_aug.image_aug import HorizontalFlip as IH
from Putil.data.vision_data_aug.detection.rectangle import HorizontalFlipCombine as HFC
from Putil.data.vision_data_aug.detection.rectangle import RandomResampleCombine as RRC
from Putil.data.vision_data_aug.detection.rectangle import RandomTranslateConbine as RTC
from Putil.data.vision_data_aug.detection.rectangle import RandomRotateCombine as RRB
from Putil.data.vision_data_aug.detection.rectangle import RandomShearCombine as RSC
from Putil.data.vision_data_aug.detection.rectangle import VerticalFlipCombine as VFC
from Putil.data.vision_data_aug.detection.rectangle import RandomHSVCombine as RHC
from Putil.data.vision_data_aug.detection.rectangle import SizeFloatCombine as SFC
from Putil.data.aug import AugFunc
image_wh = (800, 800)
class Data(CommonDataWithAug):
def _restart_process(self, restart_param):
'''
process while restart the data, process in the derived class and called by restart_data
restart_param: the argv which the derived class need, dict
'''
pass
def _inject_operation(self, inject_param):
'''
operation while the epoch_done is False, process in the derived class and called by inject_operation
injecct_param: the argv which the derived class need, dict
'''
pass
def __init__(self):
CommonDataWithAug.__init__(self)
self._data_field = [0]
def _generate_from_origin_index(self, index):
image = np.zeros(shape=[image_wh[1], image_wh[0], 3], dtype=np.uint8)
assert image is not None
begin = 20
bboxes = [
[begin, begin, image.shape[1] // 2 - begin, image.shape[0] // 2 - begin],
[begin, begin + image.shape[0] // 2, image.shape[1] // 2 - 2 * begin, image.shape[0] // 2 - 2 * begin],
[begin + image.shape[1] // 2, begin, image.shape[1] // 2 - 2 * begin, image.shape[0] // 2 - 2 * begin],
[begin + image.shape[1] // 2, begin + image.shape[0] // 2, image.shape[1] // 2 - 2 * begin, image.shape[0] // 2 - 2 * begin]
] # LTWHCR
color = {0: (125, 0, 0), 1: (0, 125, 0), 2: (0, 0, 125), 3: (125, 125, 0)}
for index, bbox in enumerate(bboxes):
image[bbox[1]: bbox[1] + bbox[3], bbox[0]: bbox[0] + bbox[2], :] = color[index]
bboxes = np.array(bboxes, dtype=np.float64).tolist()
image = (image / 255).astype(np.float32)
return image, bboxes
class CombineAugFuncHF(AugFunc):
def __init__(self):
AugFunc.__init__(self)
self._aug = HFC()
pass
def __call__(self, *args):
image = args[0]
bboxes = args[1]
image, bboxes = self._aug(image, bboxes)
return image, bboxes
@property
def name(self):
return self._aug.name
pass
class CombineAugFuncVF(AugFunc):
def __init__(self):
AugFunc.__init__(self)
self._aug = VFC()
pass
def __call__(self, *args):
image = args[0]
bboxes = args[1]
image, bboxes = self._aug(image, bboxes)
return image, bboxes
@property
def name(self):
return self._aug.name
pass
class CombineAugFuncRRC(AugFunc):
def __init__(self):
AugFunc.__init__(self)
self._aug = RRC(scale=1)
pass
def __call__(self, *args):
image = args[0]
bboxes = args[1]
image, bboxes = self._aug(image, bboxes)
return image, bboxes
@property
def name(self):
return self._aug.name
pass
class CombineAugFuncRTC(AugFunc):
def __init__(self):
AugFunc.__init__(self)
self._aug = RTC(translate=0.5)
pass
def __call__(self, *args):
image = args[0]
bboxes = args[1]
image, bboxes = self._aug(image, bboxes)
return image, bboxes
@property
def name(self):
return self._aug.name
pass
class CombineAugFuncRRB(AugFunc):
def __init__(self):
AugFunc.__init__(self)
self._aug = RRB(50)
pass
def __call__(self, *args):
image = args[0]
bboxes = args[1]
image, bboxes = self._aug(image, bboxes)
return image, bboxes
@property
def name(self):
return self._aug.name
pass
class CombineAugFuncRSC(AugFunc):
def __init__(self):
AugFunc.__init__(self)
self._aug = RSC(0.9)
pass
def __call__(self, *args):
image = args[0]
bboxes = args[1]
image, bboxes = self._aug(image, bboxes)
return image, bboxes
@property
def name(self):
return self._aug.name
pass
class CombineAugFuncRHC(pAug.AugFunc):
def __init__(self):
self._aug = RHC(0.0, 50.0, 50.0)
pass
def __call__(self, *args):
image = args[0]
bboxes = args[1]
image, bboxes = self._aug(image, bboxes)
return image, bboxes
@property
def name(self):
return self._aug.name
class CombineAugFuncSFC(pAug.AugFunc):
def __init__(self):
self._aug = SFC((0.5, 1.0), (0.5, 1.0))
pass
def __call__(self, *args):
image = args[0]
bboxes = args[1]
image, bboxes = self._aug(image, bboxes)
return image, bboxes
root_node = pAug.AugNode(pAug.AugFuncNoOp())
root_node.add_child(pAug.AugNode(pAug.AugFuncNoOp()))
HFNode = root_node.add_child(pAug.AugNode(CombineAugFuncHF()))
#HFNode.add_child(pAug.AugNode(CombineAugFuncRRC()))
#HFNode.add_child(pAug.AugNode(pAug.AugFuncNoOp()))
VFNode = root_node.add_child(pAug.AugNode(CombineAugFuncVF()))
RRCNode = root_node.add_child(pAug.AugNode(CombineAugFuncRRC()))
#RRCNode.add_child(pAug.AugNode(CombineAugFuncHF()))
#RRCNode.add_child(pAug.AugNode(pAug.AugFuncNoOp()))
RTCNode = root_node.add_child(pAug.AugNode(CombineAugFuncRTC()))
RRBNode = root_node.add_child(pAug.AugNode(CombineAugFuncRRB()))
RSCNode = root_node.add_child(pAug.AugNode(CombineAugFuncRSC()))
RHCNode = root_node.add_child(pAug.AugNode(CombineAugFuncRHC()))
SFCNode = root_node.add_child(pAug.AugNode(CombineAugFuncSFC()))
root_node.freeze_node()
for index in range(0, len(root_node)):
node = root_node[index]
print('name: {0}'.format(node.func.name))
pass
data = Data()
data.set_aug_node_root(root_node)
import matplotlib.pyplot as plt
import matplotlib.patches as patches
print(len(data))
rect_color = ['m', 'c', 'y', 'w']
for index in range(0, len(data)):
image, bboxes = data[index]
#print(bboxes)
#print(image.shape)
print(np.max(image))
assert image.shape == (image_wh[0], image_wh[1], 3), 'image shape: {0}'.format(image.shape)
plt.imshow(image[:, :, ::-1])
currentAxis=plt.gca()
for i, bbox in enumerate(bboxes):
#cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 0), thickness=5)
rect = patches.Rectangle(bbox[0: 2], bbox[2], bbox[3], linewidth=2, edgecolor=rect_color[i], facecolor='none')
currentAxis.add_patch(rect)
pass
plt.show()
pass
```
#### File: data/vision_data_aug/_test_base.py
```python
import cv2
def contrast(img0):
img1 = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY) #ๅฝฉ่ฒ่ฝฌไธบ็ฐๅบฆๅพ็
m, n = img1.shape
#ๅพ็็ฉ้ตๅๅคๆฉๅฑไธไธชๅ็ด
img1_ext = cv2.copyMakeBorder(img1,1,1,1,1,cv2.BORDER_REPLICATE) / 1.0 # ้คไปฅ1.0็็ฎ็ๆฏuint8่ฝฌไธบfloatๅ๏ผไพฟไบๅ็ปญ่ฎก็ฎ
rows_ext,cols_ext = img1_ext.shape
b = 0.0
for i in range(1,rows_ext-1):
for j in range(1,cols_ext-1):
b += ((img1_ext[i,j]-img1_ext[i,j+1])**2 + (img1_ext[i,j]-img1_ext[i,j-1])**2 +
(img1_ext[i,j]-img1_ext[i+1,j])**2 + (img1_ext[i,j]-img1_ext[i-1,j])**2)
cg = b/(4*(m-2)*(n-2)+3*(2*(m-2)+2*(n-2))+2*4) #ๅฏนๅบไธ้ข48็่ฎก็ฎๅ
ฌๅผ
print(cg)
```
#### File: data/vision_data_aug/test_image_contrast_aug.py
```python
import os
os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from Putil.data.vision_data_aug.image_aug import Contrast
import numpy as np
from matplotlib import pyplot as plt
import cv2
import numpy as np
def contrast(img0):
img1 = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY) #ๅฝฉ่ฒ่ฝฌไธบ็ฐๅบฆๅพ็
m, n = img1.shape
#ๅพ็็ฉ้ตๅๅคๆฉๅฑไธไธชๅ็ด
img1_ext = cv2.copyMakeBorder(img1,1,1,1,1,cv2.BORDER_REPLICATE) / 1.0 # ้คไปฅ1.0็็ฎ็ๆฏuint8่ฝฌไธบfloatๅ๏ผไพฟไบๅ็ปญ่ฎก็ฎ
rows_ext,cols_ext = img1_ext.shape
b = 0.0
for i in range(1,rows_ext-1):
for j in range(1,cols_ext-1):
b += ((img1_ext[i,j]-img1_ext[i,j+1])**2 + (img1_ext[i,j]-img1_ext[i,j-1])**2 +
(img1_ext[i,j]-img1_ext[i+1,j])**2 + (img1_ext[i,j]-img1_ext[i-1,j])**2)
cg = b/(4*(m-2)*(n-2)+3*(2*(m-2)+2*(n-2))+2*4) #ๅฏนๅบไธ้ข48็่ฎก็ฎๅ
ฌๅผ
print(cg)
image = cv2.imread('./test/data/vision_data_aug/test_image.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
print(np.mean(image[:, :, 0] * 0.299) + np.mean(image[:, :, 1] * 0.587) + np.mean(image[:, :, 2] * 0.114))
contrast(image)
plt.imshow(image)
plt.show()
#image = np.zeros(shape=[100, 100, 3], dtype=np.float32)
_contrast = Contrast()
_contrast.contrast = 100
image_with_contrast = _contrast(image)
print(np.mean(image_with_contrast[:, :, 0] * 0.299) + np.mean(image_with_contrast[:, :, 1] * 0.587) + np.mean(image_with_contrast[:, :, 2] * 0.114))
contrast(image_with_contrast)
plt.imshow(image_with_contrast)
plt.show()
```
#### File: test/test_pytest/test_pytest.py
```python
import pytest
def test_params(params):
print('asdas:{}'.format(params))
```
#### File: test/TestUsingExtern/Matplotlib300.py
```python
from optparse import OptionParser
parser = OptionParser(usage="usage %prog [options] arg1 arg2")
level_default = 'Debug'
parser.add_option(
'--Level',
action='store',
type=str,
help='set the log level'
'default: {0}'.format(level_default)
)
test_scatter_hist_default = False
parser.add_option(
'--test_scatter_hist',
action='store_true',
default=test_scatter_hist_default,
help='if you want to test scatter_hist, set this flag'
'default: {0}'.format(test_scatter_hist_default)
)
(options, args) = parser.parse_args()
import Putil.base.logger as plog
plog.PutilLogConfig.config_log_level(stream=plog.LogReflect(options.Level).Level)
plog.PutilLogConfig.config_handler(plog.stream_method)
plog.PutilLogConfig.config_format(
"%(filename)s: %(lineno)d: %(levelname)s: %(name)s: %(message)s")
root_logger = plog.PutilLogConfig('test/calc/test_estimate').logger()
root_logger.setLevel(plog.DEBUG)
ScatterHistLogger = root_logger.getChild("ScatterHist")
ScatterHistLogger.setLevel(plog.DEBUG)
def ScatterHist():
pass
if __name__ == '__main__':
pass
```
#### File: tf/param/test_auto_save.py
```python
from optparse import OptionParser
import Putil.loger as plog
from colorama import Fore
import functools
import Putil.test.test_helper as th
parser = OptionParser(usage='usage %prog [options] arg1 arg2')
level_default = 'Debug'
parser.add_option(
'--level',
action='store',
dest='Level',
type=str,
default=level_default,
help='specify the log level for the app'
'default: {0}'.format(level_default)
)
parser.add_option(
'--test_ImproveSave',
action='store_true',
default=False,
dest='TestImproveSave',
help='set this flag while you want to test ImproveSave'
)
(options, args) = parser.parse_args()
plog.PutilLogConfig.config_log_level(stream=plog.LogReflect(options.Level).Level)
plog.PutilLogConfig.config_handler(plog.stream_method)
plog.PutilLogConfig.config_format(
"%(filename)s: %(lineno)d: %(levelname)s: %(name)s: %(message)s")
root_logger = plog.PutilLogConfig('tf/test/tf/param/test_auto_save').logger()
root_logger.setLevel(plog.DEBUG)
TestImproveSaveLogger = root_logger.getChild('TestImproveSave')
TestImproveSaveLogger.setLevel(plog.DEBUG)
import Putil.tf.param.auto_save as auto_save
class model:
def __init__(self):
self._auto_save = auto_save.ImproveSave(5).UseDefaultDecider(max=True).SetIndicatorGet(self.Output).CheckAutoSave()
self._data = [0.0, 1.0, 1.2, 1.3, 1.4, 1.1, 1.3, 1.7, 1.9, 1.0, 1.5]
self._i = -1
pass
def ModelCheck(self):
if self._auto_save.Save() is True:
TestImproveSaveLogger.debug('save in acc: {0}'.format(self.Output()))
return True
else:
return False
pass
@property
def AutoSave(self):
return self._auto_save
def TrainCv(self):
self._i += 1
pass
@property
def Data(self):
return self._data
def Output(self):
return self.Data[self._i]
def __test_improve_save():
print(th.information(0, 'start testing imrpove_save', Fore.GREEN) + Fore.RESET)
m = model()
print(m._auto_save._regular)
assert m._auto_save.IndicatorGetter == m.Output
assert m._auto_save.DecisionGenerator == m._auto_save._decider
hit_target = [1, 2, 3, 4, 7, 8]
for i in range(0, 11):
m.TrainCv()
if m.ModelCheck() is True:
if i in hit_target:
pass
else:
print(th.information(0, 'test improve_save failed', Fore.LIGHTRED_EX) + Fore.RESET)
pass
pass
pass
print(th.information(0, 'test improve_save successful', Fore.LIGHTGREEN_EX) + Fore.RESET)
pass
if __name__ == '__main__':
if options.TestImproveSave:
__test_improve_save()
```
#### File: Putil/tf/model_cv_example.py
```python
import tensorflow as tf
class Model:
def __init__(self, **options):
"""
init the model, do some member variance declaration
:param options:
"""
# todo: save the total dict param
# todo: maybe {"arch_com": {}, "opt":{}, "arch_def":{}, "training":{}}
self._param = None
# todo: save the param which shared while using self._param_arch_def to build arch, this is to avoid to much
# todo: param write and change in the config file, but you should complement the arch_def using this arch_com
# todo: maybe {"base_model": "name", "regularizer_weight": float, "loss_type": "name", "param_dtype": float,
# todo: "moving_decay": float, ...}
self._param_arch_com = None
# todo: save the param which is used for opt-init, maybe: { "opt": "name", "opt_param":{ ...}}
self._param_opt = None
# todo: save the param which is used for building the arch, with arch_com
# todo: maybe {"name from base model": { ...some param for build}}
self._param_arch_def = None
# todo: save the param which is used in training time
# todo: maybe {"summary_path": "path", "val_epoch": int, "display_batch": int,
# todo: "epoch": int, "batch_size": int, ...}
self._param_training = None
# :
self._placeholder = dict()
# todo: collect the name which represent model train outputs dict keys
self._train_result_reflect = []
# todo: collect the name which represent model val outputs dict keys
self._val_result_reflect = []
self._sess = tf.Session()
self._loss = None
self._train_op = None
self._opt = None
self._step = None
pass
# todo: make the placeholder needed in model run
def __make_placeholder(self, data):
return {}
pass
# todo: process the param input to special param in the self, for easier use
def extract_param(self, param):
pass
# todo: build the model, complement self._loss with loss for training
def __build_with_loss(self):
pass
# todo: complement the self._opt for training
def __build_opt(self):
pass
# todo: init the model which will call in every cv estimate
def re_init(self):
# : reset the graph to default
tf.reset_default_graph()
# :
self.__build_with_loss()
# : complement the self._sess
self._sess = tf.Session()
# : make self._step
self._step = tf.Variable(0, trainable=True)
# : complement the opter
self.__build_opt()
# : make exponential_moving_average
_ema = tf.train.ExponentialMovingAverage(self._param_arch_com['moving_average'], num_updates=self._step)
# : dependent on the _ema.apply build self._train_op
with tf.control_dependencies([_ema.apply(tf.trainable_variables())]):
self._train_op = self._opt.minimize(self._loss. self._step)
pass
# todo: input data which generated by the DataGenerator and train on these data as one batch
# todo: return the result you want to estimate packed into dictionary
def TrainCV(self, data):
# todo: feed the placeholder
feed = self.__make_placeholder(data)
# todo: run the loss and train and ***
self._sess.run([], feed_dict=feed)
# todo: return the result want to estimate: TrainResultReflect
return {}
pass
# todo: input data which generated by DataGenerator and Val on these data as one batch
# todo: return the result you want to estimate packed into dictionary
def Val(self, data):
feed = self.__make_placeholder(data)
self._sess.run([], feed_dict=feed)
return {}
# : return the reflect dict represent the result name which create int the train
@property
def TrainResultReflect(self):
return self._train_result_reflect
# : return the reflect dict represent the result name which create int the val
@property
def ValResultReflect(self):
return self._val_result_reflect
pass
```
#### File: Putil/tf/model.py
```python
from abc import ABCMeta, abstractmethod
import tensorflow as tf
class Net(metaclass=ABCMeta):
def __init__(self, net_name):
pass
pass
class Model(metaclass=ABCMeta):
def __init__(self, graph_name):
self._save_dir = None
self._weight_fold_name = None
self._summary_fold_name = None
tf.GraphKeys.TRAIN = 'Train'
self._train_summary_key = tf.GraphKeys.TRAIN
tf.GraphKeys.EVALUATE = 'Evaluate'
self._evaluate_summary_key = tf.GraphKeys.EVALUATE
tf.GraphKeys.TEST = 'Test'
self._test_summary_key = tf.GraphKeys.TEST
with tf.variable_scope(graph_name):
self._training = tf.placeholder(dtype=tf.bool, shape=[], name='training')
self._step = tf.placeholder
self._loss = None
self._train_op = None
pass
```
#### File: Putil/tf/util.py
```python
import tensorflow as tf
from Putil.np import util
import Putil.loger as plog
import sys
root_logger = plog.PutilLogConfig("TfUtil").logger()
root_logger.setLevel(plog.DEBUG)
TfTypeLogger = root_logger.getChild("TfTypeLogger")
TfTypeLogger.setLevel(plog.DEBUG)
_tf_type = {
0.32: tf.float32,
32: tf.int32,
0.64: tf.float64,
64: tf.int64,
8: tf.uint8,
-8: tf.int8,
0.16: tf.float16,
16: tf.int16
}
class tf_type:
def __init__(self, label):
try:
self._type = _tf_type[label]
except KeyError as e:
TfTypeLogger.error('key: {0} is not supported\n{1}'.formay(label, e))
sys.exit()
self._label = label
pass
@property
def Type(self):
return self._type
def to_np(self):
return util._np_type[self._label]
@staticmethod
def to_np(tf_dtype):
for _reflect in _tf_type.items():
if _reflect[1] == tf_dtype:
return util._np_type[_reflect[0]]
else:
pass
pass
pass
```
#### File: torch/attention/KQV.py
```python
import torch
from Putil.torch.functional import correlated_weight
from Putil.torch.util import TorchNoOpModule
class KQV(torch.nn.Module):
def __init__(self, dims, key_func, query_func, value_func):
torch.nn.Module.__init__(self)
self._dims = dims
self._key_func = key_func
self._query_func = query_func
self._value_func = value_func
pass
def forward(self, x):
key = self._key_func(x)
query = self._query_func(x)
value = self._value_func(x)
return correlated_weight(key, query, value, self._dims)
pass
class KQV2DPixel(KQV):
def __init__(self, in_channels, mid_channels):
key_func = torch.nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, \
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(0, 0))
query_func = torch.nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, \
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(0, 0))
value_func = torch.nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, \
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(0, 0))
KQV.__init__(self, [2, 3], key_func, query_func, value_func)
pass
pass
class NonLocal(KQV2DPixel):
def __init__(self, in_channels, mid_channels):
KQV2DPixel.__init__(self, in_channels, mid_channels)
self._rebuild = torch.nn.Conv2d(in_channels=mid_channels, out_channels=in_channels, \
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(0, 0))
pass
def forward(self, x):
pixel_refine_with_attension = KQV2DPixel.forward(self, x)
rebuild = self._rebuild(pixel_refine_with_attension)
return torch.add([x, rebuild])
pass
class KQV2DChannel(KQV):
def __init__(self, in_channels, mid_channels):
key_func = torch.nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, \
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(0, 0))
query_func = torch.nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, \
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(0, 0))
value_func = torch.nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, \
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(0, 0))
KQV.__init__(self, [1], key_func, query_func, value_func)
pass
pass
class NonHeightNonWidthNoChannel(torch.nn.Module):
def __init__(self, in_channels, kernel_size, mid_channels):
pass
pass
class NonLocalNonChannel(NonLocal):
def __init__(self, in_channels, mid_channels):
NonLocal.__init__(self, in_channels, mid_channels)
pass
```
#### File: vision/object_detection/box.py
```python
import copy
import torch
# tlwh: boxes, shape[batch, 4, ...], box format: (top_left_x, top_left_y, width, height)
# tlbr: boxes, shape[batch, 4, ...], box format: (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
# cxcywh: boxes, shape[batch, 4, ...], box format: (center_x, center_y, width, height)
##@brief
# @note
# @param[in] boxes, shape[batch, 4, ...], box format: (top_left_x, top_left_y, width, height)
# @return cxcywh: boxes, shape[batch, 4, ...], box format: (center_x, center_y, width, height)
def _tlwh_to_cxcywh(box):
box = torch.cat([box[:, 0: 2], box[:, 0: 2] + box[:, 2: 4]], dim=1)
return box
##@brief tlwh ๆจกๅผ็box่ฝฌไธบtlbr
# @note
# @param[in] boxes, shape[batch, 4, ...], box format: (top_left_x, top_left_y, width, height)
# @return tlbr: boxes, shape[batch, 4, ...], box format: (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
def _tlwh_to_tlbr(box):
box = torch.cat([box[:, 0: 2], box[:, 0: 2] + box[:, 2: 4]], dim=1)
return box
##@brief
# @note
# @param[in] boxes, shape[batch, 4, ...], box can be the output of _tlwh_to_tlbr with format: (top_left_x, top_left_y, width, height)
# @return top_left_x, top_left_y, width, height, shape[batch๏ผ 1๏ผ ...] for every element
def _to_xywh(box):
return box[:, 0: 1], box[: 1: 2], box[:, 2: 3], box[:, 3: 4]
##@brief
# @note
# @param[in] boxes, shape[batch, 4, ...], box can be the output of _tlwh_to_tlbr with format: (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
# @return top_left_x, top_left_y, bottom_right_x, bottom_right_y, shape[batch๏ผ 1๏ผ ...] for every element
def _to_xyxy(box):
return box[:, 0: 1], box[:, 1: 2], box[:, 2: 3], box[:, 3: 4]
##@brief
# @note
# @param[in] boxes, shape[batch, 4, ...], box can be the output of _tlwh_to_cxcywh with format: (center_x, center_y, bottom_right_x, bottom_right_y)
# @return center_x, center_y, bottom_right_x, bottom_right_y, shape[batch๏ผ 1๏ผ ...] for every element
def _to_cxcywh(box):
return box[:, 0: 1], box[: 1: 2], box[:, 2: 3], box[:, 3: 4]
##@brief ่ฎก็ฎbox็้ข็งฏ
# @note
# @param[in]
# @param[in]
# @param[in]
# @param[in]
# @return
def _box_area(x1, y1, x2, y2):
return (x2 - x1) * (y2 - y1)
##@brief ่ทๅcap็box็ปๆ
# @note
# @return
def _cap_box(x11, y11, x12, y12, x21, y21, x22, y22):
cap_x1 = torch.max(x11, x21)
cap_y1 = torch.max(y11, y21)
cap_x2 = torch.min(x12, x22)
cap_y2 = torch.min(y12, y22)
return cap_x1, cap_y1, cap_x2, cap_y2
##@brief ่ทๅๆๅฐๅ
ๅซไธคไธชbox็ๆญฃ็ฉๅฝข
# @note
# @return
def _argmin_box(x11, y11, x12, y12, x21, y21, x22, y22):
closure_x1 = torch.min(x11, x21)
closure_y1 = torch.min(y11, y21)
closure_x2 = torch.max(x12, x22)
closure_y2 = torch.max(y12, y22)
return closure_x1, closure_y1, closure_x2, closure_y2
##@brief ่ทๅcap็้ข็งฏ
# @note
# @return
def _cap(x11, y11, x12, y12, x21, y21, x22, y22):
cap_x1, cap_y1, cap_x2 ,cap_y2 = _cap_box(x11, y11, x12, y12, x21, y21, x22, y22)
mask = (cap_y2 > cap_y1) * (cap_x2 > cap_x1)
cap = (cap_x2 * mask - cap_x1 * mask) * (cap_y2 * mask - cap_y1 * mask) # cap
return cap
##@brief ่ทๅcup็้ข็งฏ
# @note
# @return
def _cup(x11, y11, x12, y12, x21, y21, x22, y22, cap):
cup = (x12 - x11) * (y12 - y11) + (x22 - x21) * (y22 - y21) - cap
return cup
##@brief ่ทๅcapๅcup็้ข็งฏ
# @note ไธบไปไน่ฆๆ่ฟไธชๅฝๆฐๅข๏ผๆฏๅ ไธบcapๅcup่ฟ็ฎๆ้ๅคๅ๏ผ่ฟๆฏ้ฟๅ
ๆตช่ดนๆถ้ด
# @return
def _cap_cup(x11, y11, x12, y12, x21, y21, x22, y22):
cap = _cap(x11, y11, x12, y12, x21, y21, x22, y22)
cup = _cup(x11, y11, x12, y12, x21, y21, x22, y22, cap)
return cap, cup
```
#### File: vision/object_detection/diou.py
```python
import torch
from Putil.torch.indicator.vision.object_detection import box
from Putil.torch.indicator.vision.object_detection import iou
def _square_center_distance(cx1, cy1, cx2, cy2):
return torch.pow(cx2 - cx1, 2) + torch.pow(cy2 - cy1, 2)
##@brief
# @note
# https://arxiv.org/pdf/1911.08287.pdf
# DIoU = IoU - \
# @param[in] pre
# float or double, positivated, [batch_size, ..., one_box] content of box: (top_left_x, top_left_y, width, height)
# @prarm[in] gt
# float or double, positivated, [batch_size, ..., one_box] content of box: (top_left_x, top_left_y, width, height)
# @ret
# 0: the iou [batch_size, ..., 1]
# 1: the giou [batch_size, ..., 1]
class DIoU(iou.iou):
def iou_index(self):
return 0
def __init__(self):
iou.iou.__init__(self)
pass
def forward(self, box1, box2):
cxcywh1 = box._tlwh_to_cxcywh(box1)
cx1, cy1, _, _ = box._to_cxcywh(cxcywh1)
cxcywh2 = box._tlwh_to_cxcywh(box2)
cx2, cy2, _, _ = box._to_cxcywh(cxcywh2)
tlbr1 = box._tlwh_to_tlbr(box1)
tlbr2 = box._tlwh_to_tlbr(box2)
x11, y11, x12, y12 = box._to_xyxy(tlbr1)
x21, y21, x22, y22 = box._to_xyxy(tlbr2)
_iou = iou._iou(x11, y11, x12, y12, x21, y21, x22, y22)
cbox_x1, cbox_y1, cbox_x2, cbox_y2 = box._argmin_box(x11, y11, x12, y12, x21, y21, x22, y22)
_area_c = box._box_area(cbox_x1, cbox_y1, cbox_x2, cbox_y2)
_square_d = _square_center_distance(cx1, cy1, cx2, cy2)
diou = _iou - _square_d / (torch.pow(_area_c, 2) + 1e-32)
return diou, _iou, _area_c, _square_d
```
#### File: vision/object_detection/giou.py
```python
import torch
from torch.nn import Module
from Putil.torch.indicator.vision.object_detection import box
from Putil.torch.indicator.vision.object_detection import iou
def _argmin_area(x11, y11, x12, y12, x21, y21, x22, y22):
cx1, cy1, cx2, cy2 = box._argmin_box(x11, y11, x12, y12, x21, y21, x22, y22)
area_c = box._box_area(cx1, cy1, cx2, cy2)
return area_c
##@brief
# @note
# https://arxiv.org/pdf/1902.09630.pdf
# C=\underset{C}{argmin}{(pre\cup gt\subseteq C)}
# GIoU=IoU-\frac{{{\parallel C -(pre\cup gt)\parallel}_0}}{{\parallel C\parallel}_0}
# iouๅจiouไธบ0ๆถๆ ๆณ็ดๆฅไผๅ๏ผ้ๅฏนbbox็ๅๅฝๆ ๆณ็ญไปทไบiou็ๅๅฝ๏ผๆๅบgiouๅฏไฝไธบ็ฎๆ ่ฟ่กไผๅ
# @param[in] pre
# float or double, positivated, [batch_size, one_box, ...] content of box:
# (top_left_x + any_x_shift, top_left_y + any_y_shift, width, height)
# @prarm[in] gt
# float or double, positivated, [batch_size, one_box, ...] content of box:
# (top_left_x + any_x_shift, top_left_y + any_y_shift, width, height)
# @ret
# 0: the iou [batch_size, ..., 1]
# 1: the giou [batch_size, ..., 1]
class GIoU(iou.iou):
def iou_index(self):
return 1
def __init__(self):
iou.iou.__init__(self)
pass
def forward(self, box1, box2):
box1 = box._tlwh_to_tlbr(box1)
box2 = box._tlwh_to_tlbr(box2)
x11, y11, x12, y12 = box._to_xyxy(box1)
x21, y21, x22, y22 = box._to_xyxy(box2)
cap, cup = box._cap_cup(x11, y11, x12, y12, x21, y21, x22, y22)
_iou = iou._cap_cup_iou(cap, cup)
_area_c = _argmin_area(x11, y11, x12, y12, x21, y21, x22, y22)
_giou = _iou - ((_area_c - cup) / _area_c + 1e-32)
return _iou, _giou
```
#### File: vision/object_detection/iou.py
```python
from abc import ABCMeta, abstractmethod
import torch
from Putil.torch.indicator.vision.object_detection import box
##@brief ่ฎก็ฎiou
# @note
# @return
def _iou(x11, y11, x12, y12, x21, y21, x22, y22):
cap, cup = box._cap_cup(x11, y11, x12, y12, x21, y21, x22, y22)
return cap / cup
def _cap_cup_iou(cap, cup):
return cap / cup
##@brief ่ฎก็ฎIoU๏ผๅบไบ[batch, box, ...]่ฟ่ก่ฎก็ฎ๏ผbox็็ปๆๆฏ[top_left_x, top_left_y, width, height],
# ่ฟๅ็ๆฏ[batch, 1, ...]๏ผ็ฌฌไบ็ปด่กจ็คบ็ๆฏiouๅผ๏ผๅฝๅๅๅ
ไธๅญๅจgt_box็ๆ
ๅตไฝฟ็จ[0, 0, 0, 0]ไปฃ่กจ๏ผ
# ้ฃไนไธๅ็iou๏ผ้ๅฏนไธๅญๅจgt็ๆ
ๅต่ทๅพ็ๅผๅฐฑไธไธๆ ท๏ผ้่ฆ็นๅซๆณจๆ **ไธ่ฌๆ
ๅตไธ๏ผ่ฎก็ฎไธไธชbatch็MeanIoU้ฝๆฏ้่ฆ
# ่ฟ
# @note
class iou(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
pass
##@brief ่ฟๅๅฝๅๅฏน่ฑก็ๅ็กฎiouๅผ็ดขๅผ๏ผๆไบ็่ฟๅๅผๅฏ่ฝๆๅคไธชๆฐๆฎ๏ผๅ
ๅซ่ฟ็จๆฐๆฎไปฅๅๅบ็กiou็ญ๏ผ๏ผ้่ฆ่ฏฅๆฅๅฃๆนไพฟ็่ฟๅๅฏนๅบiou็็ดขๅผ
# @return int ็ดขๅผ
@abstractmethod
def iou_index(self):
pass
@abstractmethod
def iou_mean(self, iou):
pass
class MeanIoU(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
pass
def forward(self, iou, obj_gt):
iou_filtered = iou * obj_gt
iou = torch.nansum(iou_filtered) / ((torch.isnan(iou_filtered).eq(False) * obj_gt).sum() + 1e-32)
return iou
##@brief
# @note
class IoU(iou):
def iou_index(self):
return 0
def __init__(self):
iou.__init__(self)
pass
def forward(self, box1, box2):
box1 = box._tlwh_to_tlbr(box1)
box2 = box._tlwh_to_tlbr(box2)
x11, y11, x12, y12 = box._to_xyxy(box1)
x21, y21, x22, y22 = box._to_xyxy(box2)
iou = _iou(x11, y11, x12, y12, x21, y21, x22, y22)
return iou,
```
#### File: torch/loss/ghm_loss.py
```python
class GHM_Loss(nn.Module):
def __init__(self, bins, alpha):
super(GHM_Loss, self).__init__()
self._bins = bins
self._alpha = alpha
self._last_bin_count = None
def _g2bin(self, g):
return torch.floor(g * (self._bins - 0.0001)).long()
def _custom_loss(self, x, target, weight):
raise NotImplementedError
def _custom_loss_grad(self, x, target):
raise NotImplementedError
def forward(self, x, target):
g = torch.abs(self._custom_loss_grad(x, target))
bin_idx = self._g2bin(g)
bin_count = torch.zeros((self._bins))
for i in range(self._bins):
bin_count[i] = (bin_idx == i).sum().item()
N = x.size(0)
nonempty_bins = (bin_count > 0).sum().item()
gd = bin_count * nonempty_bins
gd = torch.clamp(gd, min=0.0001)
beta = N / gd
return self._custom_loss(x, target, beta[bin_idx])
class GHMC_Loss(GHM_Loss):
def __init__(self, bins, alpha):
super(GHMC_Loss, self).__init__(bins, alpha)
def _custom_loss(self, x, target, weight):
return torch.sum((torch.nn.NLLLoss(reduce=False)(torch.log(x),target)).mul(weight.to(device).detach()))/torch.sum(weight.to(device).detach())
def _custom_loss_grad(self, x, target):
x=x.cpu().detach()
target=target.cpu()
return torch.tensor([x[i,target[i]] for i in range(target.shape[0])])-target
```
#### File: Putil/torch/one_hot.py
```python
import torch
from torch.nn import Module
class OneHot(Module):
def __init__(self, amount):
pass
def forward(self, x, dim):
pass
```
#### File: torch/optimization/combine_optimization.py
```python
import torch
class CombineOptimization:
def __init__(self, **optimizations):
self._optimizations = optimizations
pass
def step(self, closure=None):
for index, (k, v) in enumerate(self._optimizations.items()):
v.step()
pass
pass
def load_state_dict(self, state_dict, unexisted_strategy):
for index, (k, v) in enumerate(self._optimizations.items()):
if k in state_dict.dict():
v.load_state_dict(state_dict[k])
pass
else:
pass
pass
pass
```
#### File: torch/pretrained_model/mobilenet1.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class MobileNet(nn.Module):
def __init__(self):
super(MobileNet, self).__init__()
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1),
nn.AvgPool2d(7),
)
self.fc = nn.Linear(1024, 1000)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 1024)
x = self.fc(x)
return x
def get_bn_before_relu(self):
bn1 = self.model[3][-2]
bn2 = self.model[5][-2]
bn3 = self.model[11][-2]
bn4 = self.model[13][-2]
return [bn1, bn2, bn3, bn4]
def get_channel_num(self):
return [128, 256, 512, 1024]
def extract_feature(self, x, preReLU=False):
feat1 = self.model[3][:-1](self.model[0:3](x))
feat2 = self.model[5][:-1](self.model[4:5](F.relu(feat1)))
feat3 = self.model[11][:-1](self.model[6:11](F.relu(feat2)))
feat4 = self.model[13][:-1](self.model[12:13](F.relu(feat3)))
out = self.model[14](F.relu(feat4))
out = out.view(-1, 1024)
out = self.fc(out)
if not preReLU:
feat1 = F.relu(feat1)
feat2 = F.relu(feat2)
feat3 = F.relu(feat3)
feat4 = F.relu(feat4)
return [feat1, feat2, feat3, feat4], out
def mobilenet_v1(*args, **kwargs):
return MobileNet(*args, **kwargs)
```
#### File: torch/pretrained_model/vgg.py
```python
from enum import Enum
from torchviz.dot import make_dot
import torch
from torch import nn
from torch.nn import Module
from torchvision.models import vgg
from torchvision.models.utils import load_state_dict_from_url
from torch.autograd import Variable
import Putil.base.logger as plog
vgg_logger = plog.PutilLogConfig('vgg').logger()
vgg_logger.setLevel(plog.DEBUG)
VGGLogger = vgg_logger.getChild('VGG')
VGGLogger.setLevel(plog.DEBUG)
def make_layers(cfg, downsample, batch_norm=False):
resolution_output = []
layers = []
in_channels = 3
downsample_time = 0
final_cfg = list()
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
resolution_output.append(layers[-1])
downsample_time += 1
if downsample == 2 ** downsample_time:
final_cfg.append(v)
break
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pass
final_cfg.append(v)
pass
seq = nn.Sequential(*layers)
return seq, resolution_output, final_cfg
class VGG(Module):
class VGGArch(Enum):
vgg11 = 'vgg11'
vgg13 = 'vgg13'
vgg16 = 'vgg16'
vgg19 = 'vgg19'
vgg11_bn = 'vgg11_bn'
vgg13_bn = 'vgg13_bn'
vgg16_bn = 'vgg16_bn'
vgg19_bn = 'vgg19_bn'
vgg_arch_url_dic = {
VGGArch.vgg11.name: {'cfg': vgg.cfgs['A'], 'url': vgg.model_urls['vgg11']},
VGGArch.vgg13.name: {'cfg': vgg.cfgs['B'], 'url': vgg.model_urls['vgg13']},
VGGArch.vgg16.name: {'cfg': vgg.cfgs['D'], 'url': vgg.model_urls['vgg16']},
VGGArch.vgg19.name: {'cfg': vgg.cfgs['E'], 'url': vgg.model_urls['vgg19']},
VGGArch.vgg11_bn.name: {'cfg': vgg.cfgs['A'], 'url': vgg.model_urls['vgg11_bn']},
VGGArch.vgg13_bn.name: {'cfg': vgg.cfgs['B'], 'url': vgg.model_urls['vgg13_bn']},
VGGArch.vgg16_bn.name: {'cfg': vgg.cfgs['D'], 'url': vgg.model_urls['vgg16_bn']},
VGGArch.vgg19_bn.name: {'cfg': vgg.cfgs['E'], 'url': vgg.model_urls['vgg19_bn']}
}
def __init__(self, vgg_arch, downsample, model_dir, load_pretrained):
Module.__init__(self)
self.features, self._resolution_output, self._final_cfg = make_layers(
VGG.vgg_arch_url_dic[vgg_arch]['cfg'], downsample)
if load_pretrained:
VGGLogger.info('load pretrained: path: {} url: {}'.format(model_dir, VGG.vgg_arch_url_dic[vgg_arch]['url']))
state_dict = load_state_dict_from_url(VGG.vgg_arch_url_dic[vgg_arch]['url'], progress=True, model_dir=model_dir)
self.load_state_dict(state_dict, strict=False)
def forward(self, x):
return self.features(x)
@property
def resolution_output(self):
return self._resolution_output
@property
def final_cfg(self):
return self._final_cfg
pass
```
#### File: Putil/torch/summary_writer.py
```python
import torch.multiprocessing as mp
from multiprocessing.managers import BaseManager
from tensorboardX import SummaryWriter
class Writer:
def __init__(self):
self._writer = SummaryWriter('./torch')
pass
pass
BaseManager.register('writer', Writer)
BaseManager.register('OWriter', SummaryWriter)
def main():
manager = BaseManager()
manager.start()
w = manager.writer()
mp.spawn(test, args=(w, ), nprocs=2)
w = manager.OWriter()
mp.spawn(test, args=(w, ), nprocs=2)
def test(n, w):
print(w)
if __name__ == '__main__':
main()
```
#### File: Putil/VGG16/standard_model.py
```python
import tensorflow as tf
class Model:
def __init__(self):
pass
def _max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name=name)
def _conv_layer(self, bottom, name):
conv = tf.layers.conv2d(
with tf.variable_scope(name):
bottom,
filters=grow,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
kernel_initializer=tf.variance_scaling_initializer(mode='fan_avg', dtype=param_dtype),
kernel_regularizer=layers.l2_regularizer(regularize_weight),
bias_initializer=tf.zeros_initializer(dtype=param_dtype),
bias_regularizer=layers.l2_regularizer(regularize_weight),
name='conv'
)
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
# Input should be an rgb image [batch, height, width, 3]
# values scaled [0, 1]
def build(self, feed, train=False):
self.relu1_1 = self._conv_layer(feed, "conv1_1")
self.relu1_2 = self._conv_layer(self.relu1_1, "conv1_2")
self.pool1 = self._max_pool(self.relu1_2, 'pool1')
self.relu2_1 = self._conv_layer(self.pool1, "conv2_1")
self.relu2_2 = self._conv_layer(self.relu2_1, "conv2_2")
self.pool2 = self._max_pool(self.relu2_2, 'pool2')
self.relu3_1 = self._conv_layer(self.pool2, "conv3_1")
self.relu3_2 = self._conv_layer(self.relu3_1, "conv3_2")
self.relu3_3 = self._conv_layer(self.relu3_2, "conv3_3")
self.pool3 = self._max_pool(self.relu3_3, 'pool3')
self.relu4_1 = self._conv_layer(self.pool3, "conv4_1")
self.relu4_2 = self._conv_layer(self.relu4_1, "conv4_2")
self.relu4_3 = self._conv_layer(self.relu4_2, "conv4_3")
self.pool4 = self._max_pool(self.relu4_3, 'pool4')
self.relu5_1 = self._conv_layer(self.pool4, "conv5_1")
self.relu5_2 = self._conv_layer(self.relu5_1, "conv5_2")
self.relu5_3 = self._conv_layer(self.relu5_2, "conv5_3")
self.pool5 = self._max_pool(self.relu5_3, 'pool5')
pass
pass
if __name__ == '__main__':
vgg16 = Model()
feed = tf.placeholder(tf.float32, [10, 224, 224, 3], name='feed')
vgg16.build(feed)
pass
```
#### File: Putil/visual/matplotlib_plot.py
```python
import random
class random_type:
def __init__(self):
self._line_type = ['-', '--', ':']
self._color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
self._marker = [
'.', ',', 'o', 'v', '^', '>', '<',
'1', '2', '3', '4', 's', 'p', '*',
'h', 'H', '+', 'x', 'D', 'd', '|', '_'
]
self._len_l = len(self._line_type)
self._len_co = len(self._color)
self._len_ma = len(self._marker)
self._used = list()
pass
def type_gen(self, **options):
color = options.pop('color', None)
marker = options.pop('marker', None)
line = options.pop('line', None)
if line is not None:
pass
else:
line = self._line_type[random.choice(list(range(0, self._len_l)))]
pass
if color is not None:
pass
else:
color = self._color[random.choice(list(range(0, self._len_co)))]
pass
if marker is not None:
pass
else:
marker = self._marker[random.choice(list(range(0, self._len_ma)))]
type = '{0}{1}{2}'.format(color, marker, line)
self._used.append(type)
return type
pass
def no_repeat(self, type):
if type in self._used:
return True
else:
return False
pass
pass
```
#### File: Putil/Yolo/yolo2_model_base.py
```python
import tensorflow as tf
import tensorflow.contrib.layers as layers
from colorama import Fore
import numpy as np
import random
import Putil.np.util as npu
import Putil.tf.util as tfu
import base.logger as plog
root_logger = plog.PutilLogConfig('yolo2ModelBase').logger()
root_logger.setLevel(plog.DEBUG)
Yolo2BuildLogger = root_logger.getChild('Yolo2Build')
Yolo2BuildLogger.setLevel(plog.DEBUG)
Yolo2GenerateFeedLogger = root_logger.getChild('Yolo2GenerateFeed')
Yolo2GenerateFeedLogger.setLevel(plog.DEBUG)
StandardYolo2GenerateLogger = root_logger.getChild('StandardYolo2Generate')
StandardYolo2GenerateLogger.setLevel(plog.DEBUG)
Yolo2GenerateLogger = root_logger.getChild('Yolo2Generate')
Yolo2GenerateLogger.setLevel(plog.DEBUG)
Yolo2GenerateILogger = root_logger.getChild('Yolo2GenerateI')
Yolo2GenerateILogger.setLevel(plog.DEBUG)
assert tf.__version__ == '1.6.0', Fore.RED + 'version of tensorflow should be 1.6.0'
class Yolo2Build:
def __init__(self, net_output, class_num, prior_h, prior_w, scalar, _dtype):
self._net_output = net_output
self._class_amount = class_num
self.__check_prior(prior_h, prior_w)
self._prior_height = prior_h
self._prior_width = prior_w
self._cluster_object_count = len(prior_h)
self._scalar = scalar
self._dtype = _dtype
self._pro__ = self.GeneratePro()
self._output_tensor = self._pro__['pro']
self._anchor_pro__ = self._pro__['anchor']
self._precision_pro__ = self._pro__['precision']
self._class_pro__ = self._pro__['class']
self._y_pro__ = self._pro__['y']
self._x_pro__ = self._pro__['x']
self._h_pro__ = self._pro__['h']
self._w_pro__ = self._pro__['w']
self._output_loss__, self._place_gt_result__, self._iou_result__ = self.AppendLoss()
self._gt_one_hot_class__ = self._place_gt_result__['class']
self._gt_feed_class__ = self._place_gt_result__['feed_class']
self._gt_y_offset__ = self._place_gt_result__['y_offset']
self._gt_y__ = self._place_gt_result__['y']
self._gt_y_feed__ = self._place_gt_result__['y_feed']
self._gt_x_offset__ = self._place_gt_result__['x_offset']
self._gt_x__ = self._place_gt_result__['x']
self._gt_x_feed__ = self._place_gt_result__['x_feed']
self._gt_h__ = self._place_gt_result__['h']
self._gt_h_feed__ = self._place_gt_result__['h_feed']
self._gt_w__ = self._place_gt_result__['w']
self.__gt_w_feed__ = self._place_gt_result__['w_feed']
self._anchor_mask__ = self._place_gt_result__['anchor_mask']
self._negative_anchor_mask__ = self._place_gt_result__['negative_anchor_mask']
self._total_loss__ = self._output_loss__['total_loss']
self._anchor_loss__ = self._output_loss__['anchor_loss']
self._precision_loss__ = self._output_loss__['precision_loss']
self._class_loss__ = self._output_loss__['class_loss']
self._indicator_mean_iou = self._output_loss__['mean_iou']
self._indicator_classify_top_one_acc = self._output_loss__['classify_top_one_acc']
self._gt_iou__ = self._iou_result__
pass
@property
def IndicatorClassifyTopOneAcc(self):
return self._indicator_classify_top_one_acc
@property
def IndicatorIoU(self):
return self._indicator_mean_iou
@property
def NewOutput(self):
return self._net_output
@property
def ClassAmount(self):
return self._class_amount
@property
def PriorHeight(self):
return self._prior_height
@property
def PriorWidth(self):
return self._prior_width
@property
def ClusterObjectAmount(self):
return self._cluster_object_count
@property
def Scalar(self):
return self._scalar
@property
def Dtype(self):
return self._dtype
@property
def Pro(self):
return self._output_tensor
@property
def AnchorPro(self):
return self._anchor_pro__
@property
def PrecisionPro(self):
return self._precision_pro__
@property
def ClassPro(self):
return self._class_pro__
@property
def YPro(self):
return self._y_pro__
@property
def XPro(self):
return self._x_pro__
@property
def HPro(self):
return self._h_pro__
@property
def WPro(self):
return self._w_pro__
@property
def GtOneHotClass(self):
return self._gt_one_hot_class__
@property
def GtClassFeed(self):
return self._gt_feed_class__
@property
def GtYOffset(self):
return self._gt_y_offset__
@property
def GtY(self):
return self._gt_y__
@property
def GtYFeed(self):
return self._gt_y_feed__
@property
def GtXOffset(self):
return self._gt_x_offset__
@property
def GtX(self):
return self._gt_x__
@property
def GtxFeed(self):
return self._gt_x_feed__
@property
def GtH(self):
return self._gt_h__
@property
def GtHFeed(self):
return self._gt_h_feed__
@property
def GtW(self):
return self._gt_w__
@property
def GtWFeed(self):
return self.__gt_w_feed__
@property
def AnchorMask(self):
return self._anchor_mask__
@property
def NegativateAnchorMask(self):
return self._negative_anchor_mask__
@property
def TotalLoss(self):
return self._total_loss__
@property
def AnchorLoss(self):
return self._anchor_loss__
@property
def PrecisionLoss(self):
return self._precision_loss__
@property
def ClassLoss(self):
return self._class_loss__
@property
def GtIou(self):
return self._gt_iou__
def __check_prior(self, prior_h, prior_w):
# check failed throw exception
return True
pass
def GeneratePro(self):
return gen_pro(self._net_output, self._class_amount, self._cluster_object_count, self._dtype)
pass
def AppendLoss(self):
return append_yolo2_loss(self._pro__, self._class_amount, self._prior_height, self._prior_width, self._scalar,
self._dtype)
pass
pass
def append_yolo2_loss(
yolo2_net_feature,
class_num,
prior_h,
prior_w,
scalar,
_dtype=0.32
):
"""
:param yolo2_net_feature: feature from base net output
:param class_num: the count of the class with background
:param prior_h: prior height list or 1-D ndarray
:param prior_w: prior width list or 1-D ndarray
:param scalar: down sample scalar
:param _dtype: model parameter dtype, default 0.32
:return:
"""
assert len(prior_w) == len(prior_h), Fore.RED + 'prior height should be same length with prior width'
print(Fore.YELLOW + '-------generate yolo2 loss---------')
print(Fore.GREEN + 'class_num : ', class_num)
print(Fore.GREEN + 'prior_h : ', prior_h)
print(Fore.GREEN + 'prior_w : ', prior_w)
print(Fore.GREEN + 'scalar : ', scalar)
cluster_object_count = len(prior_w)
place_gt_result = __PlaceGT(cluster_object_count=cluster_object_count, _dtype=_dtype).Place
place_process_result = __place_process(
place_gt_result,
class_num,
prior_h,
prior_w,
scalar=scalar,
_dtype=_dtype
)
pro_result_read_result = __pro_result_reader(
split_pro_result=yolo2_net_feature)
calc_iou_result = __calc_iou(
pro_result_read_result=pro_result_read_result,
place_process_result=place_process_result,
scalar=scalar,
prior_h=prior_h,
prior_w=prior_w,
_dtype=_dtype
)
loss = __calc_loss(
split_pro_result=yolo2_net_feature,
gt_process_result=place_process_result,
calc_iou_result=calc_iou_result)
print(Fore.YELLOW + '-------generate yolo2 loss done---------')
return loss, place_process_result, calc_iou_result
# generator placeholder for total feed, designed to easy used and generate
# gt is the standard data
# 'class' : int include background and all kind of object
# 'p_mask' : set 1.0 in the cell location which has an object and set 0.0 for other
# 'n_mask' : set 1.0 in the cell location which does not contain any object and set 0.0 for other
# 'y': object center location y shift from the top left point int the cell, set 0.0 which cell does not contain object
# 'x': object center location x shift from the top left point int the cell, set 0.0 which cell does not contain object
# relationship between real (center_y, center_x, height, width) and (y_shift, x_shift, h_shift, w_shift):
class __PlaceGT:
def __init__(self, cluster_object_count, _dtype):
gt_place = dict()
dtype = tfu.tf_type(_dtype).Type
with tf.name_scope('GT'):
gt_place['class'] = tf.placeholder(dtype=tf.int32, shape=[None, None, None, cluster_object_count],
name='class')
# set 0.0 in the cell which does not contain any object except background
gt_place['y'] = tf.placeholder(dtype=dtype, shape=[None, None, None, cluster_object_count],
name='y')
gt_place['x'] = tf.placeholder(dtype=dtype, shape=[None, None, None, cluster_object_count],
name='x')
# !!!!important: because of the follow process in (__place_process), hw should not contain negative and zero
# !!!!suggest fill prior value in the cell location which does not contain any object
gt_place['h'] = tf.placeholder(dtype=dtype, shape=[None, None, None, cluster_object_count],
name='h')
gt_place['w'] = tf.placeholder(dtype=dtype, shape=[None, None, None, cluster_object_count],
name='w')
# the mask frequently used in calc loss
gt_place['p_mask'] = tf.placeholder(dtype=dtype, shape=[None, None, None, 1], name='p_mask')
gt_place['n_mask'] = tf.placeholder(dtype=dtype, shape=[None, None, None, 1], name='n_mask')
# avoid learning illegal anchor
gt_place['anchor_mask'] = tf.placeholder(
dtype=dtype, shape=[None, None, None, cluster_object_count], name='anchor_mask')
pass
self._gt_place = gt_place
pass
@property
def Place(self):
return self._gt_place
def __generate(self):
return self._gt_place
pass
@property
def Class(self):
return self._gt_place['class']
@property
def Y(self):
return self._gt_place['y']
@property
def X(self):
return self._gt_place['x']
@property
def H(self):
return self._gt_place['h']
@property
def W(self):
return self._gt_place['w']
@property
def PMask(self):
return self._gt_place['p_mask']
@property
def NMask(self):
return self._gt_place['n_mask']
@property
def LegalAnchor(self):
return self._gt_place['anchor_mask']
pass
# : the pro tensor is not easy to used in calc loss, make same process in this function, this function should make
# : sure gradient can propagate directly
def __split_pro_ac(pro, class_num, cluster_object_count):
"""
to split the pro of yolo2 into several part
:param pro: the pro of gen_pro
:param class_num: class amount
:param cluster_object_count: prior anchor amount
:return:
{'pro': pro, 'anchor': anchor_pro, 'precision': precision_pro, 'class': class_pro,
'y': y_pro, 'x': x_pro, 'h': h_pro, 'w': w_pro}
pro: the input pro [y, x, h, w, precision, class_part, .., ..., ]
anchor: take all anchor and concat then[batch, cell_height, cell_width, cluster_object_count, 4(include [y, x, h, w])]
precision:[batch, cell_height, cell_width, cluster_object_count]
class:[batch * cell_height * cell_width * cluster_object_count, class_amount]
y: [batch, cell_height, cell_width, cluster_object_count]
x: [batch, cell_height, cell_width, cluster_object_count]
h: [batch, cell_height, cell_width, cluster_object_count]
w: [batch, cell_height, cell_width, cluster_object_count]
"""
with tf.name_scope('split_and_pro'):
# generate all part y x: sigmoid; h w: None; precision: sigmoid; class: part softmax
with tf.name_scope('total_split'):
with tf.name_scope('y_part'):
y_part = pro[:, :, :, 0: ((cluster_object_count - 1) * (4 + 1 + class_num) + 1): 4 + 1 + class_num]
y_pro = y_part
pass
with tf.name_scope('x_part'):
x_part = pro[:, :, :, 1: ((cluster_object_count - 1) * (4 + 1 + class_num) + 2): 4 + 1 + class_num]
x_pro = x_part
pass
with tf.name_scope('h_part'):
h_part = pro[:, :, :, 2: ((cluster_object_count - 1) * (4 + 1 + class_num) + 3): 4 + 1 + class_num]
h_pro = h_part
pass
with tf.name_scope('w_part'):
w_part = pro[:, :, :, 3: ((cluster_object_count - 1) * (4 + 1 + class_num) + 4): 4 + 1 + class_num]
w_pro = w_part
pass
with tf.name_scope('precision_part'):
precision_part = pro[:, :, :,
4: ((cluster_object_count - 1) * (4 + 1 + class_num) + 5): 4 + 1 + class_num]
precision_pro = precision_part
pass
with tf.name_scope('class_part'):
class_part = tf.reshape(pro, [-1, 4 + 1 + class_num])
class_part = class_part[:, 5::]
class_pro = class_part
pass
pass
with tf.name_scope('anchor_pro'):
anchor_pro = tf.concat(
[tf.expand_dims(y_pro, axis=-1), tf.expand_dims(x_pro, -1), tf.expand_dims(h_pro, -1),
tf.expand_dims(w_pro, -1)],
axis=-1)
return {'pro': pro, 'anchor': anchor_pro, 'precision': precision_pro, 'class': class_pro,
'y': y_pro, 'x': x_pro, 'h': h_pro, 'w': w_pro}
pass
# : this function is used to generate the standard pro in yolo-version2 network, which split into
# {'pro': pro, 'anchor': anchor_pro, 'precision': precision_pro, 'class': class_pro,
# 'y': y_pro, 'x': x_pro, 'h': h_pro, 'w': w_pro}
def gen_pro(other_new_feature, class_num, cluster_object_count, _dtype=0.32):
"""
pro = {'pro': pro, 'anchor': anchor_pro, 'precision': precision_pro, 'class': class_pro,
'y': y_pro, 'x': x_pro, 'h': h_pro, 'w': w_pro}
:param other_new_feature: base net feature
:param class_num:
:param cluster_object_count:
:return:
"""
print(Fore.YELLOW + '-----------generate yolo2 base pro---------')
print(Fore.GREEN + 'class_num : ', class_num)
print(Fore.GREEN + 'cluster_object_count : ', cluster_object_count)
feature_chanel = other_new_feature.shape.as_list()[-1]
dtype = tfu.tf_type(_dtype).Type
with tf.name_scope('yolo_pro'):
weight = tf.get_variable(
name='compress_w',
shape=[1, 1, feature_chanel, cluster_object_count * (class_num + 4 + 1)],
initializer=layers.variance_scaling_initializer(seed=0.5, mode='FAN_AVG'),
dtype=dtype
)
# bias = tf.get_variable(
# name='compress_b',
# shape=[cluster_object_count * (class_num + 4 + 1)],
# initializer=layers.variance_scaling_initializer(seed=0.5, mode='FAN_AVG'),
# dtype=dtype
# )
conv = tf.nn.conv2d(other_new_feature, weight, [1, 1, 1, 1], padding='SAME', name='conv')
# add = tf.nn.bias_add(conv, bias, name='bias_add')
add = conv
pass
pro = __split_pro_ac(add, class_num, cluster_object_count)
return pro
pass
# : the result of place_gt are not easy to used to calc loss, make some process in the function
def __place_process(gt_place_result, class_num, prior_h, prior_w, scalar, _dtype):
"""
process the placeholder for using in the network easier
:param gt_place_result: the result of placeholder
:param class_num: the count of the class type
:param prior_h: prior height list
:param prior_w: prior width list
:param scalar: down sample scalar
:return:
"""
dtype = tfu.tf_type(_dtype).Type
gt_process = dict()
assert len(prior_h) == len(prior_w), Fore.RED + 'len of the prior_h and prior_w should be the same'
with tf.name_scope('gt_place_process'):
gt_process_one_hot = tf.one_hot(
gt_place_result['class'],
class_num,
1.0,
0.0,
name='one_hot',
dtype=dtype)
gt_process['class'] = tf.reshape(gt_process_one_hot, [-1, class_num], name='one_hot_reshape')
gt_process['feed_class'] = gt_place_result['class']
gt_process['y_offset'] = tf.floormod(gt_place_result['y'], scalar)
gt_process['y'] = tf.div(gt_process['y_offset'], scalar)
gt_process['y_feed'] = gt_place_result['y']
gt_process['x_offset'] = tf.floormod(gt_place_result['x'], scalar)
gt_process['x'] = tf.div(gt_process['x_offset'], scalar)
gt_process['x_feed'] = gt_place_result['x']
gt_process['h'] = tf.log(tf.div(gt_place_result['h'] + 1.0e-32, prior_h))
gt_process['h_feed'] = gt_place_result['h']
gt_process['w'] = tf.log(tf.div(gt_place_result['w'] + 1.0e-32, prior_w))
gt_process['w_feed'] = gt_place_result['w']
gt_process['anchor_mask'] = gt_place_result['anchor_mask']
gt_process['negative_anchor_mask'] = 1 - gt_place_result['anchor_mask']
pass
return gt_process
# : to read the pro result, avoid the gradient propagate from precision loss to the network twice
def __pro_result_reader(split_pro_result):
"""
read the pro result, avoid the gradient propagate from precision loss to the network twice
:param split_pro_result: __split_pro result
:return:
"""
pro_result_read = dict()
pro_result_read['y'] = tf.identity(split_pro_result['y'], name='y_read')
pro_result_read['x'] = tf.identity(split_pro_result['x'], name='x_read')
pro_result_read['h'] = tf.identity(split_pro_result['h'], name='h_read')
pro_result_read['w'] = tf.identity(split_pro_result['w'], name='w_read')
return pro_result_read
pass
# :use gt_anchor and anchor_pro to calc iou๏ผ output for calc precision loss
def __calc_iou(pro_result_read_result, place_process_result, scalar, prior_h, prior_w, _dtype):
yt = place_process_result['y_offset']
xt = place_process_result['x_offset']
ht = place_process_result['h_feed']
wt = place_process_result['w_feed']
anchor_mask = place_process_result['anchor_mask']
with tf.name_scope('calc_iou'):
yp = pro_result_read_result['y'] * scalar
xp = pro_result_read_result['x'] * scalar
hp = tf.multiply(tf.exp(pro_result_read_result['h']), prior_h)
wp = tf.multiply(tf.exp(pro_result_read_result['w']), prior_w)
min_bottom = tf.reduce_min(
tf.concat([tf.expand_dims(yp + 0.5 * hp, -1), tf.expand_dims(yt + 0.5 * ht, -1)], axis=-1), axis=-1)
max_top = tf.reduce_max(
tf.concat([tf.expand_dims(yp - 0.5 * hp, -1), tf.expand_dims(yt - 0.5 * ht, -1)], axis=-1), axis=-1)
min_right = tf.reduce_min(
tf.concat([tf.expand_dims(xp + 0.5 * wp, -1), tf.expand_dims(xt + 0.5 * wt, -1)], axis=-1), axis=-1)
max_left = tf.reduce_max(
tf.concat([tf.expand_dims(xp - 0.5 * wp, -1), tf.expand_dims(xt - 0.5 * wt, -1)], axis=-1), axis=-1)
cross_area = tf.multiply(tf.nn.relu(min_right - max_left), tf.nn.relu(min_bottom - max_top), name='cross_area')
pass
all_iou = tf.div(
cross_area,
tf.subtract(tf.add(tf.multiply(ht, wt, name='gt_area'), tf.multiply(hp, wp, name='pre_area')), cross_area),
name='all_iou')
iou = tf.multiply(all_iou, anchor_mask, name='iou_apply_anchor_mask')
return iou
pass
# : generate the loss op
def __calc_loss(split_pro_result, gt_process_result, calc_iou_result, anchor_loss_weight=1.0, precision_loss_weight=1.0,
class_loss_weight=1.0, lambda_obj=1.0, lambda_nobj=1.0):
y_pro = split_pro_result['y']
x_pro = split_pro_result['x']
h_pro = split_pro_result['h']
w_pro = split_pro_result['w']
precision_pro = split_pro_result['precision']
class_pro = split_pro_result['class']
anchor_mask = gt_process_result['anchor_mask']
negative_anchor_mask = gt_process_result['negative_anchor_mask']
gt_y = gt_process_result['y']
gt_x = gt_process_result['x']
gt_h = gt_process_result['h']
gt_w = gt_process_result['w']
gt_class = gt_process_result['class']
legal_anchor_amount = tf.add(tf.reduce_sum(anchor_mask, name='legal_anchor_amount'), 1.0e-32, name='avoid_zero')
negative_anchor_amount = tf.add(tf.reduce_sum(negative_anchor_mask, name='negative_anchor_amount'), 1.0e-32,
name='avoid_zero')
with tf.name_scope('process'):
anchor_mask_reshape = tf.reshape(anchor_mask, [-1])
pass
with tf.name_scope('indicator'):
with tf.name_scope('iou'):
iou = tf.div(tf.reduce_sum(tf.multiply(calc_iou_result, anchor_mask, name='apply_anchor_mask')),
legal_anchor_amount, name='average_iou')
pass
with tf.name_scope('top_one_classify_acc'):
# important:
# if pro_class or class_pro has all zero or equal data just like [[0, 0], ...]
# it would make the top location mixed
# so we must calculate the score to make it more sense:
# correct_score_sum / legal_anchor_amount
wrong_mask = tf.multiply(tf.cast(tf.subtract(tf.argmax(class_pro, axis=-1),
tf.argmax(gt_class, axis=-1)), dtype=anchor_mask.dtype),
anchor_mask_reshape)
correct_mask = tf.multiply(tf.subtract(1.0, wrong_mask), anchor_mask_reshape)
# correct_count = tf.count_nonzero(correct_mask, dtype=legal_anchor_amount.dtype)
correct_score_amount = tf.reduce_sum(tf.multiply(tf.reduce_max(tf.nn.softmax(class_pro, axis=-1)), correct_mask))
# classify_top_one_acc = tf.multiply(tf.div(correct_count, legal_anchor_amount),
# tf.div(correct_score_amount, legal_anchor_amount))
classify_top_one_acc = tf.div(correct_score_amount, legal_anchor_amount)
pass
pass
with tf.name_scope('loss'):
with tf.name_scope('anchor_loss'):
# yx loss part
with tf.name_scope('yx_loss'):
yx_loss = tf.add(
tf.square(tf.subtract(y_pro, gt_y, name='y_sub') * anchor_mask, name='y_square'),
tf.square(tf.subtract(x_pro, gt_x, name='x_sub') * anchor_mask, name='x_square'),
name='y_x_add')
pass
# hw loss part
with tf.name_scope('hw_loss'):
hw_loss = tf.add(
tf.square(tf.subtract(tf.sqrt(h_pro * anchor_mask, name='h_pro_sqrt'),
tf.sqrt(gt_h * anchor_mask, name='gt_h_sqrt'), name='h_sub'),
name='h_square'),
tf.square(tf.subtract(tf.sqrt(w_pro * anchor_mask, name='w_pro_sqrt'),
tf.sqrt(gt_w * anchor_mask, name='gt_w_sqrt'), name='w_sub'),
name='w_square'),
name='hw_add')
pass
# anchor loss
anchor_loss = tf.add(
tf.multiply(
lambda_obj,
tf.div(
tf.reduce_sum(yx_loss, name='batch_sum'),
legal_anchor_amount,
name='yx_anchor_obj_mean'),
name='apply_lambda_weight'),
tf.multiply(
lambda_obj,
tf.div(
tf.reduce_sum(hw_loss, name='batch_sum'),
legal_anchor_amount,
name='hw_anchor_obj_mean'),
name='apply_lambda_weight'),
name='anchor_loss_sum'
)
anchor_loss = tf.multiply(anchor_loss, anchor_loss_weight, name='apply_anchor_loss_weight')
# anchor_loss = gt_w
pass
with tf.name_scope('precision_loss'):
precision_loss_all = tf.square(tf.subtract(precision_pro, calc_iou_result))
p_precision_loss = lambda_obj * tf.div(
tf.reduce_sum(tf.multiply(precision_loss_all, anchor_mask, name='apply_anchor_mask')),
legal_anchor_amount)
n_precision_loss = lambda_nobj * tf.div(
tf.reduce_sum(tf.multiply(precision_loss_all, negative_anchor_mask, name='apply_negative_anchor_mask')),
negative_anchor_amount)
precision_loss = tf.add(p_precision_loss, n_precision_loss, name='sum')
precision_loss = tf.multiply(precision_loss, precision_loss_weight, name='apply_precision_loss_weight')
# precision_loss = tf.add(p_precision_loss, n_precision_loss, name='loss')
pass
with tf.name_scope('class_loss'):
# class calc softmax entropy loss and multiply the anchor mask
class_loss_whole = tf.multiply(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=gt_class, logits=class_pro),
tf.reshape(anchor_mask, [-1]),
name='class_loss')
_class_loss = tf.multiply(
lambda_obj,
tf.div(tf.reduce_sum(class_loss_whole, name='batch_sum'), legal_anchor_amount,
name='class_anchor_obj_mean'),
name='apply_lambda_weight')
# _class_loss = class_loss_whole
# _class_loss = legal_anchor_amount
class_loss = tf.multiply(_class_loss, class_loss_weight, name='apply_class_loss_weight')
pass
total_loss = tf.add(anchor_loss, tf.add(precision_loss, class_loss), name='total_loss')
pass
return {'total_loss': total_loss, 'anchor_loss': anchor_loss, 'precision_loss': precision_loss,
'class_loss': class_loss, 'mean_iou': iou, 'classify_top_one_acc': classify_top_one_acc}
pass
import six
import abc
@six.add_metaclass(abc.ABCMeta)
class Yolo2GenerateI(object):
"""
use normal information to generate the tensor feeding into the network build with above function
generate: y, x, w, h, class, obj_mask, nobj_mask, anchor_mask
"""
@abc.abstractmethod
def _default_generate_feed_function(self, param):
pass
@abc.abstractmethod
def CheckGenerateFeedParamFit(self, param):
pass
@abc.abstractmethod
def _default_generate_result_function(self, param):
pass
@abc.abstractmethod
def CheckGenerateResultParamFit(self, param):
pass
pass
@six.add_metaclass(abc.ABCMeta)
class Yolo2Generate(Yolo2GenerateI):
def __init__(self):
self._generate_feed_function = self._default_generate_feed_function
self._generate_result_function = self._default_generate_result_function
pass
def GenerateFeed(self, param):
Yolo2GenerateLogger.info('-->GenerateFeed')
return self._generate_feed_function(param)
pass
def GenerateResult(self, param):
return self._generate_result_function(param)
pass
pass
"""
StandardYolo2Generate:
the paper use:
the center of prior anchor is locate at (i * scalar, j * scalar)
anchor mask:[batch, cell_height, cell_width, prior_anchor_amount]
every obj get one and only one nearest anchor to predict
any anchor does not hold an obj would be rejected, place zero
any anchor cross the edge of image was reject๏ผ place zero
*************************************important*************************************
(some conditions:
if more than one prior anchor Iou gt_x, get the same maximum value,
they would be hold to the gt_x at the same time
if no more prior to provide gt_prediction this gt would be abandon)
#####################################important#####################################
obj mask:[batch, cell_height, cell_width, prior_anchor_amount]
any cell does not hold any obj place zero
any cell does hold any obj place one
nobj mask:[batch, cell_height, cell_width, prior_anchor_amount]
any cell does not hold any obj place one
any cell does hold any obj place zero
y:[batch, cell_height, cell_width, prior_anchor_amount]
y = (real_center_y % scalar) / scalar
x:[batch, cell_height, cell_width, prior_anchor_amount]
x = (real_center_x % scalar) / scalar
h:[batch, cell_height, cell_width, prior_anchor_amount]
h = ln(real_height / prior_height)
w:[batch, cell_height, cell_width, prior_anchor_amount]
w = ln(real_width / prior_width)
class:[batch, cell_height, cell_width, prior_anchor_amount]
class = obj_represent_int
"""
import Putil.calc.estimate as es
class StandardYolo2Generate(Yolo2Generate):
def __init__(self, prior_hw, scalar):
Yolo2Generate.__init__(self)
self.feed_height = None
self.feed_width = None
self.y = None
self.x = None
self.h = None
self.w = None
self.anchor_mask = None
self.classify = None
self.obj_mask = None
self.nobj_mask = None
self.scalar = scalar
self._dtype = None
self.prior_hw = prior_hw
self.anchor_amount = len(prior_hw)
pass
def __update_feed_shape(self, feed_height, feed_width, anchor_amount, _dtype):
"""
while any parameter include batch, feed_height, feed_width, anchor_amount, _dtype
changed, the tensor shape or type should be changed
this is the function for updating the tensor
:return:
"""
self._dtype = _dtype
self.feed_height = feed_height
self.feed_width = feed_width
self.anchor_amount = anchor_amount
dtype = npu.np_type(self._dtype).Type
self.y = np.zeros(shape=[1, self.feed_height, self.feed_width, self.anchor_amount], dtype=dtype)
self.x = np.zeros(shape=[1, self.feed_height, self.feed_width, self.anchor_amount], dtype=dtype)
self.h = np.zeros(shape=[1, self.feed_height, self.feed_width, self.anchor_amount], dtype=dtype)
self.w = np.zeros(shape=[1, self.feed_height, self.feed_width, self.anchor_amount], dtype=dtype)
self.anchor_mask = np.zeros(shape=[1, self.feed_height, self.feed_width, self.anchor_amount], dtype=dtype)
self.classify = np.zeros(shape=[1, self.feed_height, self.feed_width, self.anchor_amount], dtype=dtype)
pass
# unused
def __cross_edge_anchor_reject(self):
"""
process the anchor mask to make the anchor which cross the edge of image zero
:param batch:
:param feed_height:
:param feed_width:
:param anchor_amount:
:param _dtype:
:return:
"""
anchor_mask_shape = self.anchor_mask.shape
# calculate the allowed band for box to expand in the anchor mask[batch, feed_height, feed_width, anchor_amount, 4]
# 4: [top_height_band, bottom_height_band, left_width_band, right_width_band]
max_band_h = anchor_mask_shape[1]
max_band_w = anchor_mask_shape[2]
top_height_band = np.expand_dims(
np.linspace(0, max_band_h - 1, num=max_band_h).repeat(max_band_w).reshape([max_band_h, max_band_w]), -1)
bottom_height_band = np.expand_dims(
np.linspace(0, max_band_h - 1, num=max_band_h)[::-1].repeat(max_band_w).reshape([max_band_h, max_band_w]),
-1)
left_width_band = np.expand_dims(
np.linspace(0, max_band_w - 1, num=max_band_w).repeat(max_band_h).reshape([max_band_w, max_band_h]).T, -1)
right_width_band = np.expand_dims(
np.linspace(0, max_band_w - 1, num=max_band_w)[::-1].repeat(max_band_h).reshape([max_band_w, max_band_h]).T,
-1)
band_t_b_l_r = np.concatenate((top_height_band, bottom_height_band, left_width_band, right_width_band), -1)
# calculate the prior wh expand in the anchor mask[batch, feed_height, feed_width, anchor_amount, 4]
# subtract, replace negative by zero, multiply, and then concat
prior_expand_t_b_1_r_list = []
rejected = []
for i in self.prior_hw:
expand = np.array([0.5 * i[0], 0.5 * i[0], 0.5 * i[1], 0.5 * i[1]]).repeat(max_band_h * max_band_w).reshape(
max_band_h, max_band_w, 4)
expanded = band_t_b_l_r - expand
expanded[expanded < 0] = 0
rejected.append(expanded[:, :, 0] * expanded[:, :, 1] * expanded[:, :, 2] * expanded[:, :, 3])
pass
rejected = np.concatenate(rejected)
# multiply the anchor mask
return rejected
pass
def __find_same_cell_location(self, scalar, gt_box, classify):
"""
use scalar and gt_box to generate same cell format
[[gt_box, ...](the box in the same cell, [cell], [offset, ...]...]
gt_box: [y, x, h, w]; cell: [cell_y=gt_box.y//scalar, cell_x=gt_box.x//scalar];
offset: [offset_y=gt_box.y%scalar, offset_x=gt_box.x%scalar]
:param scalar:
:param gt_box:
:return:
"""
format = list()
# sort by y**2 + x**2 get the index
array_gt_box = np.array(gt_box)
order = (array_gt_box[:, 0] ** 2 + array_gt_box[:, 1] ** 2).argsort()
killed = []
for i in zip(order, range(0, len(order))):
index_ = i[0]
order_index_ = i[1]
if index_ in killed:
continue
pass
cell_y = gt_box[index_][0] // scalar
cell_x = gt_box[index_][1] // scalar
offset_y = gt_box[index_][0] % scalar
offset_x = gt_box[index_][1] % scalar
format.append([[]])
format[-1][0].append(gt_box[index_])
format[-1].append([cell_y, cell_x])
format[-1].append([])
format[-1][2].append([offset_y, offset_x])
format[-1].append([])
format[-1][3].append(classify[index_])
for j in order[order_index_ + 1:]:
if (gt_box[index_][0] // scalar == gt_box[j][0] // scalar) & (
gt_box[index_][1] // scalar == gt_box[j][1] // scalar):
# add to the format and add to killed
offset_y = gt_box[j][0] % (scalar - 1)
offset_x = gt_box[j][1] % (scalar - 1)
format[-1][0].append(gt_box[j])
format[-1][2].append([offset_y, offset_x])
format[-1][3].append(classify[j])
killed.append(j)
pass
else:
break
pass
pass
pass
return format
pass
def _default_generate_feed_function(self, param):
"""
:param param: dict
gt_box: support batch [[obj_0_yxhwc, obj_1_yxhwc, ...obj_n_yxhwc..., ], sample_1, sample_2, ....]
feed_height:
feed_width:
class:
_dtype:
:return:
"""
StandardYolo2GenerateLogger.debug('-->_default_generate_feed_function')
gt_box = param['gt_box']
feed_height = param['feed_height']
feed_width = param['feed_width']
classify = param['class']
_dtype = param['_dtype']
if (feed_height != self.feed_height or feed_width != self.feed_width) \
or (self.feed_height is None and self.feed_width is None) or (_dtype != self._dtype):
self.__update_feed_shape(feed_height, feed_width, self.anchor_amount, _dtype)
gt_format = self.__find_same_cell_location(scalar=self.scalar, gt_box=gt_box, classify=classify)
for i in gt_format:
ohw = np.concatenate([i[0], i[2]], -1)
iou_matrix = es.calc_iou_matrix_ohw(
self.prior_hw,
ohw,
group1_h_index=0,
group1_w_index=1,
group2_y_index=4,
group2_x_index=5,
group2_h_index=2,
group2_w_index=3
)
box__ = i[0]
cell__ = i[1]
offset__ = i[2]
classify__ = i[3]
cell_obj_amount = len(i[0])
cell_y__ = cell__[0]
cell_x__ = cell__[1]
for j in range(0, cell_obj_amount):
max_iou = np.max(iou_matrix)
location = np.where(iou_matrix == max_iou)
for k in zip(location[0], location[1]):
anchor__ = k[0]
obj__ = k[1]
box_item__ = box__[obj__]
prior_box_ = self.prior_hw[anchor__]
offset_ = offset__[obj__]
self.anchor_mask[0, cell_y__, cell_x__, anchor__] = 1.0
self.y[0, cell_y__, cell_x__, anchor__] = box_item__[0]
self.x[0, cell_y__, cell_x__, anchor__] = box_item__[1]
self.h[0, cell_y__, cell_x__, anchor__] = box_item__[2]
self.w[0, cell_y__, cell_x__, anchor__] = box_item__[3]
self.classify[0, cell_y__, cell_x__, anchor__] = classify__[obj__]
# self.anchor_mask[]
pass
pass
pass
return {'y': self.y, 'x': self.x, 'h': self.h, 'w': self.w, 'class': self.classify,
'anchor_mask': self.anchor_mask}
pass
@property
def FindSameCellLocation(self):
return self.__find_same_cell_location
pass
def __divide_anchor(self, gt_format_item):
"""
use the output of __find_same_cell_location to divide anchor's owner
:param gt_format:
:return:
"""
pass
def CheckGenerateFeedParamFit(self, param):
return True
pass
def _default_generate_result_function(self, param):
"""
:param param:
:return:
"""
threshold = param['threshold']
y_pro__ = param['y']
x_pro__ = param['x']
h_pro__ = param['h']
w_pro__ = param['w']
shape = y_pro__.shape
precision_pro__ = param['precision']
class_pro__ = param['class']
ret = list()
cell_accept = np.where(precision_pro__ >= threshold)
for location in zip(cell_accept[0], cell_accept[1], cell_accept[2], cell_accept[3]):
batch_l = location[0]
cell_height_l = location[1]
cell_width_l = location[2]
anchor_l = location[3]
pre_l = precision_pro__[batch_l, cell_height_l, cell_width_l, anchor_l]
y_l = self.scalar * (cell_height_l + y_pro__[batch_l, cell_height_l, cell_width_l, anchor_l])
x_l = self.scalar * (cell_width_l + x_pro__[batch_l, cell_height_l, cell_width_l, anchor_l])
h_l = self.prior_hw[anchor_l][0] * np.exp(h_pro__[batch_l, cell_height_l, cell_width_l, anchor_l])
w_l = self.prior_hw[anchor_l][1] * np.exp(w_pro__[batch_l, cell_height_l, cell_width_l, anchor_l])
class_get = class_pro__[
batch_l * (shape[0] * shape[1] * shape[2]) + cell_height_l * shape[2] + cell_width_l * shape[
3] + anchor_l, :]
class_l = np.where(class_get == np.max(class_get))
ret.append({'y': y_l, 'x': x_l, 'h': h_l, 'w': w_l, 'class': class_l, 'pre': pre_l})
pass
return ret
pass
def CheckGenerateResultParamFit(self, param):
return True
pass
pass
``` |
{
"source": "jihuncho7/Film_ex",
"score": 2
} |
#### File: Film_ex/api/views.py
```python
from django.http import HttpResponse
from django.shortcuts import redirect
from rest_framework import viewsets, permissions, generics, status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view
# from .models import FilmList
# Create your views here.
def HelloAPI(request):
request.session['jwt'] = "<PASSWORD>"
return HttpResponse(request.session.get('jwt'))
def Home(request):
user = request.session.get('user')
return redirect('http://localhost:8080')
# class FilmViewSet(viewsets.ModelViewSet):
# queryset = FilmList.objects.all()
# serializer_class = FilmSerializer
# ์์ฑ์ ๋ค์ด๊ฐ๋ ๋ค์ด๊ฐ ๋ด์ฉ
# def perform_create(self, serializer):
# serializer.save(user = self.request.user)
``` |
{
"source": "jihunchoi/nyu-mll-spinn",
"score": 3
} |
#### File: spinn/util/misc.py
```python
import numpy as np
from collections import deque
import json
def debug_gradient(model, losses):
model.zero_grad()
for name, loss in losses:
print(name)
loss.backward(retain_variables=True)
stats = [
(p.grad.norm().data[0],
p.grad.max().data[0],
p.grad.min().data[0],
p.size()) for p in model.parameters()]
for s in stats:
print(s)
print()
model.zero_grad()
class GenericClass(object):
def __init__(self, **kwargs):
super(GenericClass, self).__init__()
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
s = "{}"
return s.format(self.__dict__)
class Args(GenericClass):
pass
class Vocab(GenericClass):
pass
class Example(GenericClass):
pass
def time_per_token(num_tokens, total_time):
return sum(total_time) / float(sum(num_tokens))
def count_parse2(parse, index, const_parsed=[]):
"""
To-do: this is more efficient than count_parse() but is currently broken, fic it!
Compute Constituents Parsed metric for ListOps style examples.
"""
after = parse[index:]
before = parse[:index]
between = after[: after.index("]")]
o_b = between.count("(") # open, between
c_b = between.count(")") # close, between
end = after.index("]")
cafter = after[end+1:]
stop = None
stop_list = []
for item in cafter:
stop_list.append(")" == item)
if stop_list[-1] == False:
break
if False in stop_list:
stop = stop_list.index(False)
else:
stop = None
cafter = cafter[: stop]
c_a = cafter.count(")")
stop = None
stop_list = []
for item in before[::-1] :
stop_list.append("(" == item)
if stop_list[-1] == False:
break
if False in stop_list:
stop = len(before) - stop_list.index(False) - 1
else:
stop = None
cbefore = before[stop:]
o_a = cbefore.count("(")
ints = sum(c.isdigit() for c in between) + between.count("-")
op = o_a + o_b
cl = c_a + c_b
if op >= ints and cl >= ints:
if op == ints+1 or cl == ints+1:
const_parsed.append(1)
parse[index - o_a : index + len(between) + 1 + c_a] = '-'
mathops = ["[MAX", "[MIN", "[MED", "[SM"]
some_ops = list(set(mathops) & set(parse))
ops_i = [parse[::-1].index(m) for m in some_ops]
if len(ops_i) != 0:
op_i = min([i for i in ops_i if i >= 0])
index = len(parse) - op_i - 1
count_parse2(parse, index, const_parsed)
return sum(const_parsed)
def count_parse(parse, index, const_parsed=[]):
"""
Compute Constituents Parsed metric for ListOps style examples.
"""
mathops = ["[MAX", "[MIN", "[MED", "[SM"]
if "]" in parse:
after = parse[index:]
before = parse[:index]
between = after[: after.index("]")]
nest_check = [m in between[1:] for m in mathops]
if True in nest_check:
op_i = nest_check.index(True)
nested_i = after[1:].index(mathops[op_i]) + 1
nested = after[nested_i : ]
c = count_parse(parse, index+nested_i, const_parsed)
cc = count_parse(parse, index, const_parsed)
else:
o_b = between.count("(") # open, between
c_b = between.count(")") # close, between
end = after.index("]")
cafter = after[end+1:]
stop = None
stop_list = []
for item in cafter:
stop_list.append(")" == item)
if stop_list[-1] == False:
break
if False in stop_list:
stop = stop_list.index(False)
else:
stop = None
cafter = cafter[: stop]
c_a = cafter.count(")")
stop = None
stop_list = []
for item in before[::-1] :
stop_list.append("(" == item)
if stop_list[-1] == False:
break
if False in stop_list:
stop = len(before) - stop_list.index(False) - 1
else:
stop = None
cbefore = before[stop:]
o_a = cbefore.count("(")
ints = sum(c.isdigit() for c in between) + between.count("-")
op = o_a + o_b
cl = c_a + c_b
if op >= ints and cl >= ints:
if op == ints+1 or cl == ints+1:
const_parsed.append(1)
parse[index - o_a : index + len(between) + 1 + c_a] = '-'
return sum(const_parsed)
class Accumulator(object):
"""Accumulator. Makes it easy to keep a trailing list of statistics."""
def __init__(self, maxlen=None):
self.maxlen = maxlen
self.cache = dict()
def add(self, key, val):
self.cache.setdefault(key, deque(maxlen=self.maxlen)).append(val)
def get(self, key, clear=True):
ret = self.cache.get(key, [])
if clear:
try:
del self.cache[key]
except BaseException:
pass
return ret
def get_avg(self, key, clear=True):
return np.array(self.get(key, clear)).mean()
class EvalReporter(object):
def __init__(self):
super(EvalReporter, self).__init__()
self.report = []
def save_batch(self,
preds,
target,
example_ids,
output,
sent1_transitions=None,
sent2_transitions=None,
sent1_trees=None,
sent2_trees=None,
cp_metric=False):
'''Saves a batch. Transforms the batch from column-centric information
(information split by columns) to row-centric (by EvalSentence).'''
cp = 0
cp_max = 0
mathops = ["[MAX", "[MIN", "[MED", "[SM"]
b = [preds.view(-1), target.view(-1), example_ids, output]
for i, (pred, truth, eid, output) in enumerate(zip(*b)):
sent = {}
sent['example_id'] = eid
sent['prediction'] = pred
sent['truth'] = truth
sent['output'] = [str(output_val) for output_val in output]
if sent1_transitions is not None:
sent['sent1_transitions'] = sent1_transitions[i].tolist()
if sent2_transitions is not None:
sent['sent2_transitions'] = sent2_transitions[i].tolist()
if sent1_trees is not None:
sent['sent1_tree'] = sent1_trees[i]
if cp_metric:
parse = sent1_trees[i].split()
some_ops = list(set(mathops) & set(parse))
ops_i = [parse.index(m) for m in some_ops]
op_i = min([i for i in ops_i if i >= 0])
cp_max += parse.count("]")
cp += count_parse(parse, op_i, const_parsed=[])
if sent2_trees is not None:
sent['sent2_tree'] = sent2_trees[i]
self.report.append(sent)
if cp_metric:
return cp, cp_max
def write_report(self, filename):
'''Commits the report to a file.'''
with open(filename, 'w') as f:
for example in self.report:
json.dump(example, f, sort_keys=True)
f.write('\n')
def PrintParamStatistics(name, param):
data = param.data.cpu().numpy()
print(name, end=' ')
print("Mean:", np.mean(data), end=' ')
print("Std:", np.std(data), end=' ')
print("Min:", np.min(data), end=' ')
print("Max:", np.max(data))
def recursively_set_device(inp, gpu):
if hasattr(inp, 'keys'):
for k in list(inp.keys()):
inp[k] = recursively_set_device(inp[k], gpu)
elif isinstance(inp, list):
return [recursively_set_device(ii, gpu) for ii in inp]
elif isinstance(inp, tuple):
return (recursively_set_device(ii, gpu) for ii in inp)
elif hasattr(inp, 'cpu'):
if gpu >= 0:
inp = inp.cuda()
else:
inp = inp.cpu()
return inp
``` |
{
"source": "jihunchoi/probability",
"score": 2
} |
#### File: python/distributions/mvn_diag_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class MultivariateNormalDiagTest(test_case.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
tfd.MultivariateNormalDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
dist = tfd.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).shape)
def testDistWithBatchShapeOneThenTransformedThroughSoftplus(self):
# This complex combination of events resulted in a loss of static shape
# information when tensor_util.constant_value(self._needs_rotation) was
# being used incorrectly (resulting in always rotating).
# Batch shape = [1], event shape = [3]
mu = tf.zeros((1, 3))
diag = tf.ones((1, 3))
base_dist = tfd.MultivariateNormalDiag(mu, diag, validate_args=True)
dist = tfd.TransformedDistribution(
base_dist, validate_args=True, bijector=tfp.bijectors.Softplus())
samps = dist.sample(5) # Shape [5, 1, 3].
self.assertAllEqual([5, 1], dist.log_prob(samps).shape)
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
dist = tfd.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual(mu, self.evaluate(dist.mean()))
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
dist = tfd.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], self.evaluate(dist.mean()))
def testEntropy(self):
mu = [-1., 1]
diag = [-1., 5]
diag_mat = np.diag(diag)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=diag_mat**2)
dist = tfd.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllClose(
scipy_mvn.entropy(), self.evaluate(dist.entropy()), atol=1e-4)
def testSample(self):
mu = [-1., 1]
diag = [1., -2]
dist = tfd.MultivariateNormalDiag(mu, diag, validate_args=True)
samps = self.evaluate(dist.sample(int(1e3), seed=0))
cov_mat = self.evaluate(tf.matrix_diag(diag))**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.05, rtol=0.05)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
dist = tfd.MultivariateNormalDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
self.evaluate(dist.sample())
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
dist = tfd.MultivariateNormalDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.shape)
self.assertAllClose(mu, self.evaluate(mean))
n = int(1e3)
samps = self.evaluate(dist.sample(n, seed=0))
cov_mat = self.evaluate(tf.matrix_diag(diag))**2
sample_cov = np.matmul(
samps.transpose([1, 2, 0]), samps.transpose([1, 0, 2])) / n
self.assertAllClose(mu, samps.mean(axis=0), atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov, atol=0.10, rtol=0.05)
def testCovariance(self):
mvn = tfd.MultivariateNormalDiag(loc=tf.zeros([2, 3], dtype=tf.float32))
self.assertAllClose(
np.diag(np.ones([3], dtype=np.float32)),
self.evaluate(mvn.covariance()))
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros([3], dtype=tf.float32), scale_identity_multiplier=[3., 2.])
self.assertAllEqual([2], mvn.batch_shape)
self.assertAllEqual([3], mvn.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0], [0, 3, 0], [0, 0, 3]], [[2, 0, 0], [0, 2, 0],
[0, 0, 2]]])**2.,
self.evaluate(mvn.covariance()))
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros([3], dtype=tf.float32), scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllEqual([2], mvn.batch_shape)
self.assertAllEqual([3], mvn.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0], [0, 2, 0], [0, 0, 1]], [[4, 0, 0], [0, 5, 0],
[0, 0, 6]]])**2.,
self.evaluate(mvn.covariance()))
def testVariance(self):
mvn = tfd.MultivariateNormalDiag(loc=tf.zeros([2, 3], dtype=tf.float32))
self.assertAllClose(
np.ones([3], dtype=np.float32), self.evaluate(mvn.variance()))
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros([3], dtype=tf.float32), scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3], [2, 2, 2]])**2., self.evaluate(mvn.variance()))
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros([3], dtype=tf.float32), scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1], [4, 5, 6]])**2., self.evaluate(mvn.variance()))
def testStddev(self):
mvn = tfd.MultivariateNormalDiag(loc=tf.zeros([2, 3], dtype=tf.float32))
self.assertAllClose(
np.ones([3], dtype=np.float32), self.evaluate(mvn.stddev()))
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros([3], dtype=tf.float32), scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3], [2, 2, 2]]), self.evaluate(mvn.stddev()))
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros([3], dtype=tf.float32), scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1], [4, 5, 6]]), self.evaluate(mvn.stddev()))
def testMultivariateNormalDiagWithSoftplusScale(self):
mu = [-1.0, 1.0]
diag = [-1.0, -2.0]
dist = tfd.MultivariateNormalDiagWithSoftplusScale(
mu, diag, validate_args=True)
samps = self.evaluate(dist.sample(1000, seed=0))
cov_mat = self.evaluate(tf.matrix_diag(tf.nn.softplus(diag))**2)
self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
def testMultivariateNormalDiagNegLogLikelihood(self):
num_draws = 50
dims = 3
x = np.zeros([num_draws, dims], dtype=np.float32)
x_pl = tf.placeholder_with_default(input=x, shape=[None, dims], name="x")
mu_var = tf.get_variable(
name="mu",
shape=[dims],
dtype=tf.float32,
initializer=tf.constant_initializer(1.))
self.evaluate([tf.global_variables_initializer()])
def neg_log_likelihood(mu):
mvn = tfd.MultivariateNormalDiag(
loc=mu, scale_diag=tf.ones(shape=[dims], dtype=tf.float32))
# Typically you'd use `mvn.log_prob(x_pl)` which is always at least as
# numerically stable as `tf.log(mvn.prob(x_pl))`. However in this test
# we're testing a bug specific to `prob` and not `log_prob`;
# http://stackoverflow.com/q/45109305. (The underlying issue was not
# related to `Distributions` but that `reduce_prod` didn't correctly
# handle negative indexes.)
return -tf.reduce_sum(tf.log(mvn.prob(x_pl)))
grad_neg_log_likelihood = self.compute_gradients(
neg_log_likelihood, args=[mu_var])
self.assertEqual(1, len(grad_neg_log_likelihood))
self.assertAllClose(
grad_neg_log_likelihood[0],
np.tile(num_draws, dims),
rtol=1e-6,
atol=0.)
def testDynamicBatchShape(self):
if tf.executing_eagerly():
return
loc = np.float32(self._rng.rand(1, 1, 2))
scale_diag = np.float32(self._rng.rand(1, 1, 2))
mvn = tfd.MultivariateNormalDiag(
loc=tf.placeholder_with_default(input=loc, shape=[None, None, 2]),
scale_diag=tf.placeholder_with_default(
input=scale_diag, shape=[None, None, 2]))
self.assertListEqual(mvn.batch_shape.as_list(), [None, None])
self.assertListEqual(mvn.event_shape.as_list(), [2])
def testDynamicEventShape(self):
if tf.executing_eagerly():
return
loc = np.float32(self._rng.rand(2, 3, 2))
scale_diag = np.float32(self._rng.rand(2, 3, 2))
mvn = tfd.MultivariateNormalDiag(
loc=tf.placeholder_with_default(input=loc, shape=[2, 3, None]),
scale_diag=tf.placeholder_with_default(
input=scale_diag, shape=[2, 3, None]))
self.assertListEqual(mvn.batch_shape.as_list(), [2, 3])
self.assertListEqual(mvn.event_shape.as_list(), [None])
def testKLDivIdenticalGradientDefined(self):
dims = 3
loc = tf.zeros([dims], dtype=tf.float32)
def self_kl_divergence(loc):
mvn = tfd.MultivariateNormalDiag(
loc=loc, scale_diag=np.ones([dims], dtype=np.float32))
return tfd.kl_divergence(mvn, mvn)
gradients = self.compute_gradients(self_kl_divergence, args=[loc])
self.assertAllEqual(
np.ones_like(gradients, dtype=np.bool),
np.isfinite(gradients))
if __name__ == "__main__":
tf.test.main()
```
#### File: optimizer/linesearch/hager_zhang.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import smart_cond
__all__ = [
'hager_zhang',
]
# Container to hold the function value and the derivative at a given point.
# Each entry is a scalar tensor of real dtype. Used for internal data passing.
_FnDFn = collections.namedtuple('_FnDFn', ['x', 'f', 'df'])
# Valuetype to hold information for the floating point precision data.
_FloatInfo = collections.namedtuple('_FloatInfo', ['eps', 'nextfloat'])
def _machine_eps(dtype):
"""Returns the machine epsilon for the supplied dtype."""
if isinstance(dtype, tf.DType):
dtype = dtype.as_numpy_dtype()
return np.finfo(dtype).eps
def _next_after(x):
"""Computes the next larger floating point number.
Tensorflow analogue of the function np.nextafter.
Ideally, this function should have a native C++ kernel in TF core but until
that is done, we are forced to use this implementation.
Args:
x: `Tensor` of real dtype and any shape. The value for which to compute
the next floating point number.
Returns:
A tuple containing the following attributes:
eps: The floating point resolution at `x`. A tensor of same shape and
dtype as the input `x`.
nextfloat: `Tensor` of the same dtype and shape as `x`. The smallest
value `y` which is greater than `x` for that dtype.
"""
cond = lambda epsilon: ~tf.equal(x + epsilon / 2, x)
body = lambda epsilon: epsilon / 2
epsilon = tf.while_loop(cond, body, (tf.ones_like(x),))
return _FloatInfo(eps=epsilon, nextfloat=x + epsilon)
HagerZhangLineSearchResult = collections.namedtuple(
'HagerZhangLineSearchResults', [
'converged', # Whether a pt satisfying Wolfe/Approx wolfe was found.
'failed', # Whether the line search failed. It can fail if the
# objective function or the gradient are not finite at
# an evaluation point.
'func_evals', # Number of function evaluations made.
'iterations', # Number of line search iterations made.
'left_pt', # The left end point of the final bracketing interval.
# If converged is True, it is equal to 'right_pt'.
# Otherwise, it corresponds to the last interval computed.
'objective_at_left_pt', # The function value at the left end point.
# If converged is True, it is equal to
# `fn_right_step`. Otherwise, it
# corresponds to the last interval computed.
'grad_objective_at_left_pt', # The derivative of the function at the
# left end point. If converged is True,
# it is equal to
# `grad_objective_at_right_pt`.
# Otherwise, it corresponds to the last
# interval computed.
'right_pt', # The right end point of the final bracketing interval.
# If converged is True, it is equal to 'left_pt'.
# Otherwise, it corresponds to the last interval computed.
'objective_at_right_pt', # The function value at the right end point.
# If converged is True, it is equal to
# `objective_at_left_pt`.
# Otherwise, it corresponds to the last
# interval computed.
'grad_objective_at_right_pt' # The derivative of the function at the
# right end point. If converged is True,
# it is equal to
# `grad_objective_at_left_pt`.
# Otherwise it corresponds to the last
# interval computed.
])
def hager_zhang(value_and_gradients_function,
initial_step_size=None,
objective_at_zero=None,
grad_objective_at_zero=None,
objective_at_initial_step_size=None,
grad_objective_at_initial_step_size=None,
threshold_use_approximate_wolfe_condition=1e-6,
shrinkage_param=0.66,
expansion_param=5.0,
sufficient_decrease_param=0.1,
curvature_param=0.9,
step_size_shrink_param=0.1,
max_iterations=50,
name=None):
"""The Hager Zhang line search algorithm.
Performs an inexact line search based on the algorithm of
[Hager and Zhang (2006)][2].
The univariate objective function `value_and_gradients_function` is typically
generated by projecting
a multivariate objective function along a search direction. Suppose the
multivariate function to be minimized is `g(x1,x2, .. xn)`. Let
(d1, d2, ..., dn) be the direction along which we wish to perform a line
search. Then the projected univariate function to be used for line search is
```None
f(a) = g(x1 + d1 * a, x2 + d2 * a, ..., xn + dn * a)
```
The directional derivative along (d1, d2, ..., dn) is needed for this
procedure. This also corresponds to the derivative of the projected function
`f(a)` with respect to `a`. Note that this derivative must be negative for
`a = 0` if the direction is a descent direction.
The usual stopping criteria for the line search is the satisfaction of the
(weak) Wolfe conditions. For details of the Wolfe conditions, see
ref. [3]. On a finite precision machine, the exact Wolfe conditions can
be difficult to satisfy when one is very close to the minimum and as argued
by [Hager and Zhang (2005)][1], one can only expect the minimum to be
determined within square root of machine precision. To improve the situation,
they propose to replace the Wolfe conditions with an approximate version
depending on the derivative of the function which is applied only when one
is very close to the minimum. The following algorithm implements this
enhanced scheme.
### Usage:
Primary use of line search methods is as an internal component of a class of
optimization algorithms (called line search based methods as opposed to
trust region methods). Hence, the end user will typically not want to access
line search directly. In particular, inexact line search should not be
confused with a univariate minimization method. The stopping criteria of line
search is the satisfaction of Wolfe conditions and not the discovery of the
minimum of the function.
With this caveat in mind, the following example illustrates the standalone
usage of the line search.
```python
# Define a quadratic target with minimum at 1.3.
value_and_gradients_function = lambda x: ((x - 1.3) ** 2, 2 * (x-1.3))
# Set initial step size.
step_size = tf.constant(0.1)
ls_result = tfp.optimizer.linesearch.hager_zhang(
value_and_gradients_function, initial_step_size=step_size)
# Evaluate the results.
with tf.Session() as session:
results = session.run(ls_result)
# Ensure convergence.
assert(results.converged)
# If the line search converged, the left and the right ends of the
# bracketing interval are identical.
assert(results.left_pt == result.right_pt)
# Print the number of evaluations and the final step size.
print ("Final Step Size: %f, Evaluation: %d" % (results.left_pt,
results.func_evals))
```
### References:
[1]: <NAME>, <NAME>. A new conjugate gradient method with
guaranteed descent and an efficient line search. SIAM J. Optim., Vol 16. 1,
pp. 170-172. 2005.
https://www.math.lsu.edu/~hozhang/papers/cg_descent.pdf
[2]: <NAME>, <NAME>. Algorithm 851: CG_DESCENT, a conjugate
gradient method with guaranteed descent. ACM Transactions on Mathematical
Software, Vol 32., 1, pp. 113-137. 2006.
http://users.clas.ufl.edu/hager/papers/CG/cg_compare.pdf
[3]: <NAME>, <NAME>. Numerical Optimization. Springer Series in
Operations Research. pp 33-36. 2006
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a tuple of scalar tensors of real dtype containing
the value of the function and its derivative at that point.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
initial_step_size: (Optional) Scalar positive `Tensor` of real dtype. The
initial value to try to bracket the minimum. Default is `1.` as a float32.
Note that this point need not necessarily bracket the minimum for the line
search to work correctly but the supplied value must be greater than
0. A good initial value will make the search converge faster.
objective_at_zero: (Optional) Scalar `Tensor` of real dtype. If supplied,
the value of the function at `0.`. If not supplied, it will be computed.
grad_objective_at_zero: (Optional) Scalar `Tensor` of real dtype. If
supplied, the derivative of the function at `0.`. If not supplied, it
will be computed.
objective_at_initial_step_size: (Optional) Scalar `Tensor` of real dtype.
If supplied, the value of the function at `initial_step_size`.
If not supplied, it will be computed.
grad_objective_at_initial_step_size: (Optional) Scalar `Tensor` of real
dtype. If supplied, the derivative of the function at
`initial_step_size`. If not supplied, it will be computed.
threshold_use_approximate_wolfe_condition: Scalar positive `Tensor`
of real dtype. Corresponds to the parameter 'epsilon' in
[Hager and Zhang (2006)][2]. Used to estimate the
threshold at which the line search switches to approximate Wolfe
conditions.
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in
[Hager and Zhang (2006)][2].
If the secant**2 step does not shrink the bracketing interval by this
proportion, a bisection step is performed to reduce the interval width.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
step_size_shrink_param: Positive scalar `Tensor` of real dtype. Bounded
above by `1`. If the supplied step size is too big (i.e. either the
objective value or the gradient at that point is infinite), this factor
is used to shrink the step size until it is finite.
max_iterations: Positive scalar `Tensor` of integral dtype or None. The
maximum number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'hager_zhang' is used.
Returns:
results: A namedtuple containing the following attributes.
converged: Boolean scalar `Tensor`. Whether a point satisfying
Wolfe/Approx wolfe was found.
func_evals: Scalar int32 `Tensor`. Number of function evaluations made.
left_pt: Scalar `Tensor` of same dtype as `initial_step_size`. The
left end point of the final bracketing interval. If converged is True,
it is equal to `right_pt`. Otherwise, it corresponds to the last
interval computed.
objective_at_left_pt: Scalar `Tensor` of same dtype as
`objective_at_initial_step_size`. The function value at the left
end point. If converged is True, it is equal to `objective_at_right_pt`.
Otherwise, it corresponds to the last interval computed.
grad_objective_at_left_pt: Scalar `Tensor` of same dtype as
`grad_objective_at_initial_step_size`. The derivative of the function
at the left end point. If converged is True,
it is equal to `grad_objective_at_right_pt`. Otherwise it
corresponds to the last interval computed.
right_pt: Scalar `Tensor` of same dtype as `initial_step_size`.
The right end point of the final bracketing interval.
If converged is True, it is equal to 'step'. Otherwise,
it corresponds to the last interval computed.
objective_at_right_pt: Scalar `Tensor` of same dtype as
`objective_at_initial_step_size`.
The function value at the right end point. If converged is True, it
is equal to fn_step. Otherwise, it corresponds to the last
interval computed.
grad_objective_at_right_pt' Scalar `Tensor` of same dtype as
`grad_objective_at_initial_step_size`.
The derivative of the function at the right end point.
If converged is True, it is equal to the dfn_step.
Otherwise it corresponds to the last interval computed.
"""
with tf.name_scope(name, 'hager_zhang',
[initial_step_size,
objective_at_zero,
grad_objective_at_zero,
objective_at_initial_step_size,
grad_objective_at_initial_step_size,
threshold_use_approximate_wolfe_condition,
shrinkage_param,
expansion_param,
sufficient_decrease_param,
curvature_param]):
val_0, val_c_input, f_lim, prepare_evals = _prepare_args(
value_and_gradients_function,
initial_step_size,
objective_at_initial_step_size,
grad_objective_at_initial_step_size,
objective_at_zero,
grad_objective_at_zero,
threshold_use_approximate_wolfe_condition)
valid_inputs = (_is_finite(val_0) & (val_0.df < 0) &
tf.is_finite(val_c_input.x) & (val_c_input.x > 0))
def _invalid_inputs_fn():
return HagerZhangLineSearchResult(
converged=tf.convert_to_tensor(False, name='converged'),
failed=tf.convert_to_tensor(True, name='failed'),
func_evals=prepare_evals,
iterations=tf.convert_to_tensor(0),
left_pt=val_0.x,
objective_at_left_pt=val_0.f,
grad_objective_at_left_pt=val_0.df,
right_pt=val_0.x,
objective_at_right_pt=val_0.f,
grad_objective_at_right_pt=val_0.df)
def _valid_inputs_fn():
"""Performs bracketing and line search if inputs are valid."""
# If the value or the gradient at the supplied step is not finite,
# we attempt to repair it.
step_size_too_large = ~(tf.is_finite(val_c_input.df) &
tf.is_finite(val_c_input.f))
def _is_too_large_fn():
return _fix_step_size(value_and_gradients_function,
val_c_input,
step_size_shrink_param)
val_c, fix_evals = smart_cond.smart_cond(
step_size_too_large,
_is_too_large_fn,
lambda: (val_c_input, 0))
# Check if c is fixed now.
valid_at_c = _is_finite(val_c) & (val_c.x > 0)
def _failure_fn():
# If c is still not good, just return 0.
return HagerZhangLineSearchResult(
converged=tf.convert_to_tensor(True, name='converged'),
failed=tf.convert_to_tensor(False, name='failed'),
func_evals=prepare_evals + fix_evals,
iterations=tf.convert_to_tensor(0),
left_pt=val_0.x,
objective_at_left_pt=val_0.f,
grad_objective_at_left_pt=val_0.df,
right_pt=val_0.x,
objective_at_right_pt=val_0.f,
grad_objective_at_right_pt=val_0.df)
def success_fn():
"""Bracketing and searching to do if all inputs are valid."""
result = _bracket_and_search(
value_and_gradients_function,
val_0,
val_c,
f_lim,
max_iterations,
shrinkage_param=shrinkage_param,
expansion_param=expansion_param,
sufficient_decrease_param=sufficient_decrease_param,
curvature_param=curvature_param)
converged = tf.convert_to_tensor(result.found_wolfe, name='converged')
return HagerZhangLineSearchResult(
converged=converged,
failed=tf.convert_to_tensor(result.failed, name='failed'),
func_evals=result.num_evals + prepare_evals + fix_evals,
iterations=result.iteration,
left_pt=result.left.x,
objective_at_left_pt=result.left.f,
grad_objective_at_left_pt=result.left.df,
right_pt=result.right.x,
objective_at_right_pt=result.right.f,
grad_objective_at_right_pt=result.right.df)
return smart_cond.smart_cond(
valid_at_c,
true_fn=success_fn,
false_fn=_failure_fn)
return smart_cond.smart_cond(
valid_inputs,
true_fn=_valid_inputs_fn,
false_fn=_invalid_inputs_fn)
def _fix_step_size(value_and_gradients_function,
val_c_input,
step_size_shrink_param):
"""Shrinks the input step size until the value and grad become finite."""
# The maximum iterations permitted are determined as the number of halvings
# it takes to reduce 1 to 0 in the given dtype.
iter_max = np.ceil(-np.log2(_machine_eps(val_c_input.x.dtype)))
def _cond(i, c, f_c, df_c): # pylint: disable=unused-argument
return (i < iter_max) & ~(tf.is_finite(f_c) & tf.is_finite(df_c))
def _body(i, c, f_c, df_c): # pylint: disable=unused-argument
next_c = c * step_size_shrink_param
return (i + 1, next_c) + value_and_gradients_function(next_c)
evals, next_c, next_f, next_df = tf.while_loop(
_cond,
_body,
(0, val_c_input.x, val_c_input.f, val_c_input.df))
return _FnDFn(x=next_c, f=next_f, df=next_df), evals
_LineSearchInnerResult = collections.namedtuple('_LineSearchInnerResult', [
'iteration',
'found_wolfe',
'failed',
'num_evals',
'left',
'right'])
def _bracket_and_search(
value_and_gradients_function,
val_0,
val_c,
f_lim,
max_iterations,
shrinkage_param=None,
expansion_param=None,
sufficient_decrease_param=None,
curvature_param=None):
"""Brackets the minimum and performs a line search.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a tuple of scalar tensors of real dtype containing
the value of the function and its derivative at that point.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
val_0: Instance of `_FnDFn` containing the value and gradient of the
objective at 0. The gradient must be negative (i.e. must be a descent
direction).
val_c: Instance of `_FnDFn` containing the initial step size and the value
and gradient of the objective at the initial step size. The step size
must be positive and finite.
f_lim: Scalar `Tensor` of float dtype.
max_iterations: Positive scalar `Tensor` of integral dtype. The maximum
number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
Returns:
A namedtuple containing the following fields.
iteration: A scalar int32 `Tensor`. The number of iterations consumed.
found_wolfe: A scalar boolean `Tensor`. Indicates whether a point
satisfying the Wolfe conditions has been found. If this is True, the
interval will be degenerate (i.e. left and right below
will be identical).
failed: A scalar boolean `Tensor`. Indicates if invalid function or
gradient values were encountered (i.e. infinity or NaNs).
num_evals: A scalar int32 `Tensor`. The total number of function
evaluations made.
left: Instance of _FnDFn. The position and the associated value and
derivative at the updated left end point of the interval.
right: Instance of _FnDFn. The position and the associated value and
derivative at the updated right end point of the interval.
"""
bracket_result = _bracket(
value_and_gradients_function,
val_0,
val_c,
f_lim,
max_iterations,
expansion_param=expansion_param)
# If the bracketing failed, or we have already exhausted all the allowed
# iterations, we return an error.
failed = (~tf.convert_to_tensor(bracket_result.bracketed) |
tf.greater_equal(bracket_result.iteration, max_iterations))
def _bracketing_failed_fn():
return _LineSearchInnerResult(
iteration=bracket_result.iteration,
found_wolfe=False,
failed=True,
num_evals=bracket_result.num_evals,
left=val_0,
right=val_c)
def _bracketing_success_fn():
"""Performs line search."""
result = _line_search_after_bracketing(
value_and_gradients_function,
val_0,
bracket_result.left,
bracket_result.right,
f_lim,
bracket_result.iteration,
max_iterations,
sufficient_decrease_param=sufficient_decrease_param,
curvature_param=curvature_param,
shrinkage_param=shrinkage_param)
return _LineSearchInnerResult(
iteration=result.iteration,
found_wolfe=result.found_wolfe,
failed=result.failed,
num_evals=bracket_result.num_evals + result.num_evals,
left=result.left,
right=result.right)
return smart_cond.smart_cond(
failed,
true_fn=_bracketing_failed_fn,
false_fn=_bracketing_success_fn)
def _line_search_after_bracketing(
value_and_gradients_function,
val_0,
initial_left,
initial_right,
f_lim,
starting_iteration,
max_iterations,
sufficient_decrease_param=None,
curvature_param=None,
shrinkage_param=None):
"""The main loop of line search after the minimum has been bracketed.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a tuple of scalar tensors of real dtype containing
the value of the function and its derivative at that point.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at 0.).
val_0: Instance of `_FnDFn` containing the value and gradient of the
objective at 0. The gradient must be negative (i.e. must be a descent
direction).
initial_left: Instance of _FnDFn. The value and derivative of the function
evaluated at the left end point of the bracketing interval.
initial_right: Instance of _FnDFn. The value and derivative of the function
evaluated at the right end point of the bracketing interval.
f_lim: Scalar `Tensor` of float dtype.
starting_iteration: Scalar integer `Tensor` of the same dtype as
`max_iterations`. The number of iterations that have already been
consumed by the bracketing.
max_iterations: Positive scalar `Tensor` of integral dtype. The maximum
number of iterations to perform in the line search. The number of
iterations used to bracket the minimum are also counted against this
parameter.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to `delta` in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
shrinkage_param: Scalar positive Tensor of real dtype. Must be less than
`1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].
Returns:
A namedtuple containing the following fields.
iteration: A scalar int32 `Tensor`. The number of iterations consumed.
found_wolfe: A scalar boolean `Tensor`. Indicates whether a point
satisfying the Wolfe conditions has been found. If this is True, the
interval will be degenerate (i.e. left and right below
will be identical).
failed: A scalar boolean `Tensor`. Indicates if invalid function or
gradient values were encountered (i.e. infinity or NaNs).
num_evals: A scalar int32 `Tensor`. The total number of function
evaluations made.
left: Instance of _FnDFn. The position and the associated value and
derivative at the updated left end point of the interval.
right: Instance of _FnDFn. The position and the associated value and
derivative at the updated right end point of the interval.
"""
def _loop_cond(iteration, found_wolfe, failed, evals, val_left, val_right): # pylint:disable=unused-argument
"""Loop condition."""
eps = _next_after(val_right.x).eps
interval_shrunk = (val_right.x - val_left.x) <= eps
found_wolfe = tf.convert_to_tensor(found_wolfe)
return ((iteration < max_iterations) &
~(found_wolfe | failed | interval_shrunk))
def _loop_body(iteration, found_wolfe, failed, evals, val_left, val_right): # pylint:disable=unused-argument
"""The loop body."""
iteration += 1
secant2_result = _secant2(
value_and_gradients_function,
val_0,
val_left,
val_right,
f_lim,
sufficient_decrease_param=sufficient_decrease_param,
curvature_param=curvature_param)
evals += secant2_result.num_evals
def _failed_fn():
return _LineSearchInnerResult(
iteration=iteration,
found_wolfe=False,
failed=True,
num_evals=evals,
left=val_left,
right=val_right)
def _found_wolfe_fn():
return _LineSearchInnerResult(
iteration=iteration,
found_wolfe=True,
failed=False,
num_evals=evals,
left=secant2_result.left,
right=secant2_result.right)
def _default_fn():
"""Default action."""
new_width = secant2_result.right.x - secant2_result.left.x
old_width = val_right.x - val_left.x
sufficient_shrinkage = new_width < shrinkage_param * old_width
def _sufficient_shrinkage_fn():
"""Action to perform if secant2 shrank the interval sufficiently."""
func_is_flat = (
(_next_after(val_left.f).nextfloat >= val_right.f) &
(_next_after(secant2_result.left.f).nextfloat >=
secant2_result.right.f))
is_flat_retval = _LineSearchInnerResult(
iteration=iteration,
found_wolfe=True,
failed=False,
num_evals=evals,
left=secant2_result.left,
right=secant2_result.left)
not_is_flat_retval = _LineSearchInnerResult(
iteration=iteration,
found_wolfe=False,
failed=False,
num_evals=evals,
left=secant2_result.left,
right=secant2_result.right)
return smart_cond.smart_cond(
func_is_flat,
true_fn=lambda: is_flat_retval,
false_fn=lambda: not_is_flat_retval)
def _insufficient_shrinkage_fn():
"""Action to perform if secant2 didn't shrink the interval enough."""
update_result = _line_search_inner_bisection(
value_and_gradients_function,
secant2_result.left,
secant2_result.right,
f_lim)
return _LineSearchInnerResult(
iteration=iteration,
found_wolfe=False,
failed=update_result.failed,
num_evals=evals + update_result.num_evals,
left=update_result.left,
right=update_result.right)
return smart_cond.smart_cond(
sufficient_shrinkage,
true_fn=_sufficient_shrinkage_fn,
false_fn=_insufficient_shrinkage_fn)
return smart_cond.smart_case([
(secant2_result.failed, _failed_fn),
(secant2_result.found_wolfe, _found_wolfe_fn)
], default=_default_fn, exclusive=False)
initial_args = _LineSearchInnerResult(
iteration=starting_iteration,
found_wolfe=False,
failed=False,
num_evals=0,
left=initial_left,
right=initial_right)
raw_results = tf.while_loop(
_loop_cond,
_loop_body,
loop_vars=initial_args,
parallel_iterations=1)
# Check if we terminated because of interval being shrunk in which case
# we return success.
effective_wolfe = (
raw_results.found_wolfe | # Found Wolfe, or
(
~tf.convert_to_tensor(raw_results.failed, name='failed')
& # We didn't fail and didn't exceed the iterations.
(raw_results.iteration < max_iterations)))
return _LineSearchInnerResult(
iteration=raw_results.iteration,
found_wolfe=effective_wolfe,
failed=raw_results.failed,
num_evals=raw_results.num_evals,
left=raw_results.left,
right=raw_results.right)
def _line_search_inner_bisection(
value_and_gradients_function,
val_left,
val_right,
f_lim):
"""Performs bisection and updates the interval."""
midpoint = (val_left.x + val_right.x) / 2
f_m, df_m = value_and_gradients_function(midpoint)
val_mid = _FnDFn(x=midpoint, f=f_m, df=df_m)
val_mid_finite = _is_finite(val_mid)
def _success_fn():
"""Action to take if the midpoint evaluation succeeded."""
update_result = _update(
value_and_gradients_function,
val_left,
val_right,
val_mid,
f_lim)
return _UpdateResult(
failed=update_result.failed,
num_evals=update_result.num_evals + 1,
left=update_result.left,
right=update_result.right)
def _failed_fn():
return _UpdateResult(
failed=True,
num_evals=1,
left=val_left,
right=val_right)
return smart_cond.smart_cond(
val_mid_finite,
true_fn=_success_fn,
false_fn=_failed_fn)
def _satisfies_wolfe(val_0,
val_c,
f_lim,
sufficient_decrease_param,
curvature_param):
"""Checks whether the Wolfe or approx Wolfe conditions are satisfied.
The Wolfe conditions are a set of stopping criteria for an inexact line search
algorithm. Let f(a) be the function value along the search direction and
df(a) the derivative along the search direction evaluated a distance 'a'.
Here 'a' is the distance along the search direction. The Wolfe conditions are:
```None
f(a) <= f(0) + delta * a * df(0) (Armijo/Sufficient decrease condition)
df(a) >= sigma * df(0) (Weak curvature condition)
```
`delta` and `sigma` are two user supplied parameters satisfying:
`0 < delta < sigma <= 1.`. In the following, delta is called
`sufficient_decrease_param` and sigma is called `curvature_param`.
On a finite precision machine, the Wolfe conditions are difficult to satisfy
when one is close to the minimum. Hence, Hager-Zhang propose replacing
the sufficient decrease condition with the following condition on the
derivative in the vicinity of a minimum.
```None
df(a) <= (2 * delta - 1) * df(0) (Approx Wolfe sufficient decrease)
```
This condition is only used if one is near the minimum. This is tested using
```None
f(a) <= f(0) + epsilon * |f(0)|
```
The following function checks both the Wolfe and approx Wolfe conditions.
Here, `epsilon` is a small positive constant. In the following, the argument
`f_lim` corresponds to the product: epsilon * |f(0)|.
Args:
val_0: Instance of _FnDFn. The function and derivative value at 0.
val_c: Instance of _FnDFn. The function and derivative value at the
point to be tested.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[<NAME> (2005)][1].
Returns:
is_satisfied: A scalar boolean `Tensor` which is True if either the
Wolfe or approximate Wolfe conditions are satisfied.
"""
exact_wolfe_suff_dec = (sufficient_decrease_param * val_0.df >=
(val_c.f - val_0.f) / val_c.x)
wolfe_curvature = val_c.df >= curvature_param * val_0.df
exact_wolfe = exact_wolfe_suff_dec & wolfe_curvature
approx_wolfe_applies = val_c.f <= f_lim
approx_wolfe_suff_dec = ((2 * sufficient_decrease_param - 1) * val_0.df
>= val_c.df)
approx_wolfe = approx_wolfe_applies & approx_wolfe_suff_dec & wolfe_curvature
is_satisfied = exact_wolfe | approx_wolfe
return is_satisfied
def _secant(a, b, dfa, dfb):
"""Returns the secant interpolation for the minimum.
The secant method is a technique for finding roots of nonlinear functions.
When finding the minimum, one applies the secant method to the derivative
of the function.
For an arbitrary function and a bounding interval, the secant approximation
can produce the next point which is outside the bounding interval. However,
with the assumption of opposite slope condtion on the interval [a,b] the new
point c is always bracketed by [a,b]. Note that by assumption,
f'(a) < 0 and f'(b) > 0.
Hence c is a weighted average of a and b and thus always in [a, b].
Args:
a: A scalar real `Tensor`. The left end point of the initial interval.
b: A scalar real `Tensor`. The right end point of the initial interval.
dfa: A scalar real `Tensor`. The derivative of the function at the
left end point (i.e. a).
dfb: A scalar real `Tensor`. The derivative of the function at the
right end point (i.e. b).
Returns:
approx_minimum: A scalar real `Tensor`. An approximation to the point
at which the derivative vanishes.
"""
return (a * dfb - b * dfa) / (dfb - dfa)
_Secant2Result = collections.namedtuple('_Secant2Results', [
'found_wolfe',
'failed',
'num_evals',
'left',
'right'])
def _secant2(value_and_gradients_function,
val_0,
val_left,
val_right,
f_lim,
sufficient_decrease_param=0.1,
curvature_param=0.9,
name=None):
"""Performs the secant square procedure of <NAME>.
Given an interval that brackets a root, this procedure performs an update of
both end points using two intermediate points generated using the secant
interpolation. For details see the steps S1-S4 in [Hager and Zhang (2006)][2].
The interval [a, b] must satisfy the opposite slope conditions described in
the documentation for '_update'.
Args:
value_and_gradients_function: A Python callable that accepts a real
scalar tensor and returns a tuple
containing the value of the function and its derivative at that point.
val_0: Instance of _FnDFn. The function and derivative value at 0.
val_left: Instance of _FnDFn. The value and derivative of the function
evaluated at the left end point of the bracketing interval (labelled 'a'
above).
val_right: Instance of _FnDFn. The value and derivative of the function
evaluated at the right end point of the bracketing interval (labelled 'b'
above).
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
sufficient_decrease_param: Positive scalar `Tensor` of real dtype.
Bounded above by the curvature param. Corresponds to 'delta' in the
terminology of [Hager and Zhang (2006)][2].
curvature_param: Positive scalar `Tensor` of real dtype. Bounded above
by `1.`. Corresponds to 'sigma' in the terminology of
[Hager and Zhang (2006)][2].
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'secant2' is used.
Returns:
A namedtuple containing the following fields.
found_wolfe: A scalar boolean `Tensor`. Indicates whether a point
satisfying the Wolfe conditions has been found. If this is True, the
interval will be degenerate (i.e. val_left_bar and val_right_bar below
will be identical).
failed: A scalar boolean `Tensor`. Indicates if invalid function or
gradient values were encountered (i.e. infinity or NaNs).
num_evals: A scalar int32 `Tensor`. The total number of function
evaluations made.
left: Instance of _FnDFn. The position and the associated value and
derivative at the updated left end point of the interval.
right: Instance of _FnDFn. The position and the associated value and
derivative at the updated right end point of the interval.
"""
with tf.name_scope(name, 'secant2',
[val_0,
val_left,
val_right,
f_lim,
sufficient_decrease_param,
curvature_param]):
a, dfa = val_left.x, val_left.df
b, dfb = val_right.x, val_right.df
c = _secant(a, b, dfa, dfb) # This will always be s.t. a <= c <= b
fc, dfc = value_and_gradients_function(c)
val_c = _FnDFn(x=c, f=fc, df=dfc)
secant_failed = ~_is_finite(val_c)
def _secant_failed_fn():
return _Secant2Result(
found_wolfe=False,
failed=True,
num_evals=1,
left=val_left,
right=val_right)
secant_failed_case = secant_failed, _secant_failed_fn
found_wolfe = _satisfies_wolfe(
val_0,
val_c,
f_lim,
sufficient_decrease_param=sufficient_decrease_param,
curvature_param=curvature_param)
def _found_wolfe_fn():
return _Secant2Result(
found_wolfe=True,
failed=False,
num_evals=1,
left=val_c,
right=val_c)
found_wolfe_case = found_wolfe, _found_wolfe_fn
def _default_fn():
"""Default action."""
inner_result = _secant2_inner(
value_and_gradients_function,
val_0,
val_c,
val_left,
val_right,
f_lim,
sufficient_decrease_param=sufficient_decrease_param,
curvature_param=curvature_param)
return _Secant2Result(
found_wolfe=inner_result.found_wolfe,
failed=inner_result.failed,
num_evals=inner_result.num_evals + 1,
left=inner_result.left,
right=inner_result.right)
return smart_cond.smart_case([
secant_failed_case,
found_wolfe_case
], default=_default_fn, exclusive=False)
def _secant2_inner(value_and_gradients_function,
val_0,
val_c,
val_left,
val_right,
f_lim,
sufficient_decrease_param=None,
curvature_param=None):
"""Helper function for secant square."""
update_result = _update(value_and_gradients_function,
val_left,
val_right,
val_c,
f_lim)
update_worked = ~tf.convert_to_tensor(update_result.failed)
def _failed_fn():
return _Secant2Result(
found_wolfe=False,
failed=True,
num_evals=update_result.num_evals,
left=val_left,
right=val_right)
def _success_fn():
"""Graph to execute when the update above succeeded."""
def _do_secant(val_1, val_2):
return _secant(val_1.x, val_2.x, val_1.df, val_2.df), True
next_c, is_new = smart_cond.smart_case([
(tf.equal(update_result.right.x, val_c.x),
lambda: _do_secant(val_right, update_result.right)),
(tf.equal(update_result.left.x, val_c.x),
lambda: _do_secant(val_left, update_result.left))
], default=lambda: (val_c.x, False))
in_range = ((update_result.left.x <= next_c) &
(next_c <= update_result.right.x))
in_range_and_new = in_range & is_new
def in_range_and_new_fn():
"""Action to take when a new trial point is generated."""
f_c, df_c = value_and_gradients_function(next_c)
val_c = _FnDFn(x=next_c, f=f_c, df=df_c)
inner_result = _secant2_inner_update(
value_and_gradients_function,
val_0,
val_c,
update_result.left,
update_result.right,
f_lim,
check_stopping_condition=True,
sufficient_decrease_param=sufficient_decrease_param,
curvature_param=curvature_param)
return _Secant2Result(
found_wolfe=inner_result.found_wolfe,
failed=inner_result.failed,
num_evals=update_result.num_evals + inner_result.num_evals + 1,
left=inner_result.left,
right=inner_result.right)
in_range_and_new_case = in_range_and_new, in_range_and_new_fn
def _in_range_not_new_fn():
"""Action to take when no new trial point is generated."""
inner_result = _secant2_inner_update(
value_and_gradients_function,
val_0,
val_c,
update_result.left,
update_result.right,
f_lim,
check_stopping_condition=True,
sufficient_decrease_param=sufficient_decrease_param,
curvature_param=curvature_param)
return _Secant2Result(
found_wolfe=inner_result.found_wolfe,
failed=inner_result.failed,
num_evals=update_result.num_evals + inner_result.num_evals,
left=inner_result.left,
right=inner_result.right)
in_range_not_new_case = in_range, _in_range_not_new_fn
def _default_fn():
return _Secant2Result(
found_wolfe=False,
failed=False,
num_evals=update_result.num_evals,
left=update_result.left,
right=update_result.right)
return smart_cond.smart_case([
in_range_and_new_case,
in_range_not_new_case,
], default=_default_fn)
return smart_cond.smart_cond(
update_worked,
true_fn=_success_fn,
false_fn=_failed_fn)
def _secant2_inner_update(value_and_gradients_function,
val_0,
val_c,
val_left,
val_right,
f_lim,
check_stopping_condition=True,
sufficient_decrease_param=None,
curvature_param=None):
"""Helper function for secant-square step."""
def _default_fn():
"""Default Action."""
update_result = _update(value_and_gradients_function,
val_left,
val_right,
val_c,
f_lim)
return _Secant2Result(
found_wolfe=False,
failed=update_result.failed,
num_evals=update_result.num_evals,
left=update_result.left,
right=update_result.right)
if not check_stopping_condition:
return _default_fn()
def _secant_failed_fn():
return _Secant2Result(
found_wolfe=False,
failed=True,
num_evals=0,
left=val_left,
right=val_right)
secant_failed = ~_is_finite(val_c)
secant_failed_case = secant_failed, _secant_failed_fn
found_wolfe = _satisfies_wolfe(
val_0,
val_c,
f_lim,
sufficient_decrease_param=sufficient_decrease_param,
curvature_param=curvature_param)
def _found_wolfe_fn():
return _Secant2Result(
found_wolfe=True,
failed=False,
num_evals=0,
left=val_c,
right=val_c)
# If we have found a point satisfying the Wolfe conditions,
# we have converged.
found_wolfe_case = found_wolfe, _found_wolfe_fn
return smart_cond.smart_case([
secant_failed_case,
found_wolfe_case
], default=_default_fn, exclusive=False)
_UpdateResult = collections.namedtuple('_UpdateResult', [
'failed', # Boolean indicating whether the update failed.
'num_evals', # The total number of objective evaluations performed.
'left', # The left end point (instance of _FnDFn).
'right' # The right end point (instance of _FnDFn).
])
def _update(value_and_gradients_function,
val_left,
val_right,
val_trial,
f_lim):
"""Squeezes a bracketing interval containing the minimum.
Given an interval which brackets a minimum and a point in that interval,
finds a smaller nested interval which also brackets the minimum. If the
supplied point does not lie in the bracketing interval, the current interval
is returned.
The requirement of the interval bracketing a minimum is expressed through the
opposite slope conditions. Assume the left end point is 'a', the right
end point is 'b', the function to be minimized is 'f' and the derivative is
'df'. The update procedure relies on the following conditions being satisfied:
'''
f(a) <= f(0) + epsilon (1)
df(a) < 0 (2)
df(b) > 0 (3)
'''
In the first condition, epsilon is a small positive constant. The condition
demands that the function at the left end point be not much bigger than the
starting point (i.e. 0). This is an easy to satisfy condition because by
assumption, we are in a direction where the function value is decreasing.
The second and third conditions together demand that there is at least one
zero of the derivative in between a and b.
In addition to the interval, the update algorithm requires a third point to
be supplied. Usually, this point would lie within the interval [a, b]. If the
point is outside this interval, the current interval is returned. If the
point lies within the interval, the behaviour of the function and derivative
value at this point is used to squeeze the original interval in a manner that
preserves the opposite slope conditions.
For further details of this component, see the procedure U0-U3 on page 123 of
the [Hager and Zhang (2006)][2] article.
Note that this function does not explicitly verify whether the opposite slope
conditions are satisfied for the supplied interval. It is assumed that this
is so.
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a tuple containing the value of the function and its
derivative at that point.
val_left: Instance of _FnDFn. The value and derivative of the function
evaluated at the left end point of the bracketing interval (labelled 'a'
above).
val_right: Instance of _FnDFn. The value and derivative of the function
evaluated at the right end point of the bracketing interval (labelled 'b'
above).
val_trial: Instance of _FnDFn. The value and derivative of the function
evaluated at the trial point to be used to shrink the interval (labelled
'c' above).
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
Returns:
A namedtuple containing the following fields
failed: A boolean scalar `Tensor` indicating whether the objective
function failed to yield a finite value at the trial points.
num_evals: A scalar int32 `Tensor`. The total number of function evaluations
made.
left: Instance of _FnDFn. The position and the associated value and
derivative at the updated left end point of the interval.
right: Instance of _FnDFn. The position and the associated value and
derivative at the updated right end point of the interval.
"""
left, right = val_left.x, val_right.x
trial, f_trial, df_trial = val_trial.x, val_trial.f, val_trial.df
# If the intermediate point is not in the interval, do nothing.
def _out_of_range_fn():
return _UpdateResult(
failed=False,
num_evals=0,
left=val_left,
right=val_right)
is_out_of_range = (trial < left) | (trial > right)
out_of_range_case = is_out_of_range, _out_of_range_fn
# The new point is a valid right end point (has positive derivative).
def _can_update_right_fn():
return _UpdateResult(
failed=False,
num_evals=0,
left=val_left,
right=val_trial)
can_update_right = (df_trial >= 0), _can_update_right_fn
# The new point is a valid left end point because it has negative slope
# and the value at the point is not too large.
def _can_update_left_fn():
return _UpdateResult(
failed=False,
num_evals=0,
left=val_trial,
right=val_right)
can_update_left = (f_trial <= f_lim), _can_update_left_fn
def _default_fn():
"""Default Action."""
bisection_result = _bisect(value_and_gradients_function,
val_left,
val_trial,
f_lim)
return _UpdateResult(
failed=bisection_result.failed,
num_evals=bisection_result.num_evals,
left=bisection_result.left,
right=bisection_result.right)
return smart_cond.smart_case(
[
out_of_range_case,
can_update_right,
can_update_left
],
default=_default_fn,
exclusive=False)
_BisectionResult = collections.namedtuple('_BisectionResult', [
'stopped', # Boolean indicating whether bisection terminated gracefully.
'failed', # Boolean indicating whether objective evaluation failed.
'num_evals', # The total number of objective evaluations performed.
'left', # The left end point (instance of _FnDFn).
'right' # The right end point (instance of _FnDFn).
])
def _bisect(value_and_gradients_function,
initial_left,
initial_right,
f_lim):
"""Bisects an interval and updates to satisfy opposite slope conditions.
Corresponds to the step U3 in [Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a tuple of scalar tensors of real dtype containing
the value of the function and its derivative at that point.
In usual optimization application, this function would be generated by
projecting the multivariate objective function along some specific
direction. The direction is determined by some other procedure but should
be a descent direction (i.e. the derivative of the projected univariate
function must be negative at `0.`).
initial_left: Instance of _FnDFn. The value and derivative of the function
evaluated at the left end point of the current bracketing interval.
initial_right: Instance of _FnDFn. The value and derivative of the function
evaluated at the right end point of the current bracketing interval.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
Returns:
A namedtuple containing the following fields:
stopped: A scalar boolean tensor. Indicates whether the bisection
loop terminated normally (i.e. by finding an interval that satisfies
the opposite slope conditions).
failed: A scalar boolean tensor. Indicates whether the objective function
failed to produce a finite value.
num_evals: A scalar int32 tensor. The number of value and gradients
function evaluations.
final_left: Instance of _FnDFn. The value and derivative of the function
evaluated at the left end point of the bracketing interval found.
final_right: Instance of _FnDFn. The value and derivative of the function
evaluated at the right end point of the bracketing interval found.
"""
def _loop_cond(stopped, failed, evals, val_left, val_right): # pylint:disable=unused-argument
"""Loop conditional."""
stopped = tf.convert_to_tensor(stopped) # Ensure it is a tensor so | works.
eps = _next_after(val_right.x).eps
# Factor of 2 needed because we are computing the midpoint in the loop
# body and if the interval width falls belows twice the epsilon,
# the mid point will be indistinguishable from the endpoints and we will
# have an infinite loop.
interval_too_small = (val_right.x - val_left.x) <= 2 * eps
return ~(stopped | failed | interval_too_small)
# The case where the proposed point has negative slope but the value is
# too high. It is not a valid left end point but along with the current left
# end point, it encloses another minima. The following loop tries to narrow
# the interval so that it satisfies the opposite slope conditions.
def _loop_body(stopped, failed, evals, val_left, val_right): # pylint:disable=unused-argument
"""Updates the right end point to satisfy the opposite slope conditions."""
val_left_x = val_left.x
mid_pt = (val_left_x + val_right.x) / 2
f_mid, df_mid = value_and_gradients_function(mid_pt)
# The case conditions.
val_mid = _FnDFn(x=mid_pt, f=f_mid, df=df_mid)
evals += 1
mid_failed = ~_is_finite(val_mid)
def _failed_fn():
return _BisectionResult(
stopped=False,
failed=True,
num_evals=evals,
left=val_left,
right=val_right)
failed_case = (mid_failed, _failed_fn)
def _valid_right_fn():
return _BisectionResult(
stopped=True,
failed=False,
num_evals=evals,
left=val_left,
right=val_mid)
# The new point can be a valid right end point.
valid_right_case = (df_mid >= 0), _valid_right_fn
# It is a valid left end pt.
valid_left = (df_mid < 0) & (f_mid <= f_lim)
# Note that we must return found = False in this case because our target
# is to find a good right end point and improvements to the left end point
# are coincidental. Hence the loop must continue until we exit via
# the valid_right case.
def _valid_left_fn():
return _BisectionResult(
stopped=False,
failed=False,
num_evals=evals,
left=val_mid,
right=val_right)
valid_left_case = valid_left, _valid_left_fn
# To be explicit, this action applies when the new point has a positive
# slope but the function value at that point is too high. This is the
# same situation with which we started the loop in the first place. Hence
# we should just replace the old right end point and continue to loop.
def _default_fn():
return _BisectionResult(
stopped=False,
failed=False,
num_evals=evals,
left=val_left,
right=val_mid)
return smart_cond.smart_case([
failed_case,
valid_right_case,
valid_left_case
], default=_default_fn, exclusive=False)
initial_args = _BisectionResult(
stopped=tf.convert_to_tensor(False),
failed=False,
num_evals=0,
left=initial_left,
right=initial_right)
raw_results = tf.while_loop(_loop_cond,
_loop_body,
initial_args,
parallel_iterations=1)
return _BisectionResult(
stopped=(raw_results.stopped | ~raw_results.failed),
failed=raw_results.failed,
num_evals=raw_results.num_evals,
left=raw_results.left,
right=raw_results.right)
_BracketResult = collections.namedtuple('_BracketResult', [
'iteration', # Number of iterations taken to bracket.
'bracketed', # Boolean indicating whether bracketing succeeded.
'failed', # Boolean indicating whether objective evaluation failed.
'num_evals', # The total number of objective evaluations performed.
'left', # The left end point (instance of _FnDFn).
'right' # The right end point (instance of _FnDFn).
])
def _bracket(value_and_gradients_function,
val_0,
val_c,
f_lim,
max_iterations,
expansion_param=5.0):
"""Brackets the minimum given an initial starting point.
Applies the Hager Zhang bracketing algorithm to find an interval containing
a region with points satisfying Wolfe conditions. Uses the supplied initial
step size 'c' to find such an interval. The only condition on 'c' is that
it should be positive. For more details see steps B0-B3 in
[Hager and Zhang (2006)][2].
Args:
value_and_gradients_function: A Python callable that accepts a real scalar
tensor and returns a tuple containing the value of the function and
its derivative at that point.
val_0: Instance of _FnDFn. The function and derivative value at 0.
val_c: Instance of _FnDFn. The value and derivative of the function
evaluated at the initial trial point (labelled 'c' above).
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
max_iterations: Int32 scalar `Tensor`. The maximum number of iterations
permitted.
expansion_param: Scalar positive `Tensor` of real dtype. Must be greater
than `1.`. Used to expand the initial interval in case it does not bracket
a minimum.
Returns:
A namedtuple with the following fields.
iterations: An int32 scalar `Tensor`. The number of iterations performed.
Bounded above by `max_iterations` parameter.
bracketed: A boolean scalar `Tensor`. True if the minimum has been
bracketed by the returned interval.
failed: A boolean scalar `Tensor`. True if the an error was encountered
while bracketing.
num_evals: An int32 scalar `Tensor`. The number of times the objective was
evaluated.
left: Instance of _FnDFn. The position and the associated value and
derivative at the updated left end point of the interval found.
right: Instance of _FnDFn. The position and the associated value and
derivative at the updated right end point of the interval found.
"""
def _cond(iteration, bracketed, failed, *ignored_args): # pylint:disable=unused-argument
"""Loop cond."""
retval = tf.logical_not(bracketed | failed | (iteration >= max_iterations))
return retval
def _body(iteration, bracketed, failed, evals, val_left, val_right): # pylint:disable=unused-argument
"""Loop body to find the bracketing interval."""
iteration += 1
def _not_finite_fn():
return _BracketResult(iteration=iteration,
bracketed=False,
failed=True,
num_evals=evals,
left=val_left,
right=val_right)
# Check that the function or gradient are finite and quit if they aren't.
not_finite = ~_is_finite(val_left, val_right)
case0 = (not_finite, _not_finite_fn)
def _right_pt_found_fn():
return _BracketResult(iteration=iteration,
bracketed=True,
failed=False,
num_evals=evals,
left=val_left,
right=val_right)
# If the right point has an increasing derivative, then [left, right]
# encloses a minimum and we are done.
case1 = ((val_right.df >= 0), _right_pt_found_fn)
# This case applies if the point has negative derivative (i.e. it is almost
# suitable as a left endpoint.
def _case2_fn():
"""Case 2."""
bisection_result = _bisect(
value_and_gradients_function,
val_0,
val_right,
f_lim)
return _BracketResult(
iteration=iteration,
bracketed=True,
failed=bisection_result.failed,
num_evals=evals + bisection_result.num_evals,
left=bisection_result.left,
right=bisection_result.right)
case2 = (val_right.f > f_lim), _case2_fn
def _default_fn():
"""Expansion."""
next_right = expansion_param * val_right.x
f_next_right, df_next_right = value_and_gradients_function(next_right)
val_next_right = _FnDFn(x=next_right, f=f_next_right, df=df_next_right)
failed = ~_is_finite(val_next_right)
return _BracketResult(
iteration=iteration,
bracketed=False,
failed=failed,
num_evals=evals + 1,
left=val_right,
right=val_next_right)
return smart_cond.smart_case(
[
case0,
case1,
case2
],
default=_default_fn,
exclusive=False)
initial_vars = _BracketResult(
iteration=tf.convert_to_tensor(0),
bracketed=False,
failed=False,
num_evals=0,
left=val_0,
right=val_c)
return tf.while_loop(
_cond,
_body,
initial_vars,
parallel_iterations=1)
def _is_finite(val_1, val_2=None):
"""Checks if the supplied values are finite.
Args:
val_1: Instance of _FnDFn. The function and derivative value.
val_2: (Optional) Instance of _FnDFn. The function and derivative value.
Returns:
is_finite: Scalar boolean `Tensor` indicating whether the function value
and the gradient in `val_1` (and optionally) in `val_2` are all finite.
"""
val_1_finite = tf.is_finite(val_1.f) & tf.is_finite(val_1.df)
if val_2 is not None:
return val_1_finite & tf.is_finite(val_2.f) & tf.is_finite(val_2.df)
return val_1_finite
def _prepare_args(value_and_gradients_function,
initial_step_size,
objective_at_initial_step_size,
grad_objective_at_initial_step_size,
objective_at_zero,
grad_objective_at_zero,
threshold_use_approximate_wolfe_condition):
"""Prepares the arguments for the line search initialization.
Args:
value_and_gradients_function: A Python callable that accepts a real
scalar tensor and returns a tuple containing the value of the
function and its derivative at that point.
initial_step_size: Scalar positive `Tensor` of real dtype. The
initial value to try to bracket the minimum. Default is `1.`.
Note that this point need not necessarily bracket the minimum for the
line search to work correctly. However, a good value for it will make the
search converge faster.
objective_at_initial_step_size: Scalar `Tensor` of real dtype.
If supplied, the value of the function at `initial_step_size`. If None,
it will be computed.
grad_objective_at_initial_step_size: Scalar `Tensor` of real dtype.
If supplied, the
derivative of the function at `initial_step_size`. If None, it
will be computed.
objective_at_zero: Scalar `Tensor` of real dtype. If supplied, the value
of the function at `0.`. If it is None, it will be computed.
grad_objective_at_zero: Scalar `Tensor` of real dtype. If supplied, the
derivative of the function at `0.`. If None, it will be computed.
threshold_use_approximate_wolfe_condition: Scalar positive `Tensor` of
real dtype. Corresponds to the parameter 'epsilon' in
[Hager and Zhang (2006)][2]. Used to estimate the
threshold at which the line search switches to approximate Wolfe
conditions.
Returns:
val_0: An instance of `_FnDFn` containing the value and derivative of the
function at `0.`.
val_initial: An instance of `_FnDFn` containing the value and derivative of
the function at `initial_step_size`.
f_lim: Scalar `Tensor` of real dtype. The function value threshold for
the approximate Wolfe conditions to be checked.
eval_count: Scalar int32 `Tensor`. The number of target function
evaluations made by this function.
"""
eval_count = tf.convert_to_tensor(0)
if initial_step_size is not None:
initial_step_size = tf.convert_to_tensor(initial_step_size)
else:
initial_step_size = tf.convert_to_tensor(1.0, dtype=tf.float32)
if (objective_at_initial_step_size is None or
grad_objective_at_initial_step_size is None):
(
objective_at_initial_step_size,
grad_objective_at_initial_step_size
) = value_and_gradients_function(initial_step_size)
eval_count += 1
val_initial = _FnDFn(x=initial_step_size,
f=objective_at_initial_step_size,
df=grad_objective_at_initial_step_size)
x_0 = tf.zeros_like(initial_step_size)
if objective_at_zero is not None:
objective_at_zero = tf.convert_to_tensor(objective_at_zero)
if grad_objective_at_zero is not None:
grad_objective_at_zero = tf.convert_to_tensor(grad_objective_at_zero)
if objective_at_zero is None or grad_objective_at_zero is None:
(
objective_at_zero,
grad_objective_at_zero
) = value_and_gradients_function(x_0)
eval_count += 1
val_0 = _FnDFn(x=x_0, f=objective_at_zero, df=grad_objective_at_zero)
f_lim = objective_at_zero + (threshold_use_approximate_wolfe_condition *
tf.abs(objective_at_zero))
return val_0, val_initial, f_lim, eval_count
def _to_str(x):
"""Converts a bool tensor to a string with True/False values."""
x = tf.convert_to_tensor(x)
if x.dtype == tf.bool:
return tf.where(x, tf.fill(x.shape, 'True'), tf.fill(x.shape, 'False'))
return x
# A convenience function useful while debugging in the graph mode.
def _print(pass_through_tensor, values):
"""Wrapper for tf.Print which supports lists and namedtuples for printing."""
flat_values = []
for value in values:
# Checks if it is a namedtuple.
if hasattr(value, '_fields'):
for field in value._fields:
flat_values.extend([field, _to_str(getattr(value, field))])
continue
if isinstance(value, (list, tuple)):
for v in value:
flat_values.append(_to_str(v))
continue
flat_values.append(_to_str(value))
return tf.Print(pass_through_tensor, flat_values)
``` |
{
"source": "jihungen/lightweight-rest-tester",
"score": 2
} |
#### File: lightweight-rest-tester/rest_tester/command_line.py
```python
import unittest
import click
import sys
from rest_tester.main import generate_test_functions, run_test_functions
from rest_tester.options import Options
class TestsContainer(unittest.TestCase):
pass
@click.command()
@click.argument('test_suites_dir', type=click.Path(exists=True))
@click.option('--base_url', default=None, type=str, help='Base URL of API.')
@click.option('--auth', default=None, type=str, help='Authentication information: "user:pass"')
@click.option('--insecure', is_flag=True)
def main(test_suites_dir, base_url, auth, insecure):
options = Options(base_url=base_url, auth=auth, insecure=insecure)
generate_test_functions(TestsContainer, test_suites_dir, options)
was_successful = run_test_functions(TestsContainer)
if not was_successful:
print('Testing was NOT successful!')
sys.exit(1)
```
#### File: rest_tester/function/multiple.py
```python
from . import TestFunctionBuilder, TestFunction
class MultipleTargetsTestFunctionBuilder(TestFunctionBuilder):
def __init__(self, setting):
self._targets = setting.targets
def build(self):
test_function_name = self._generate_name(self._targets[0].api)
def test_function_aggregated(test_self):
for idx, target in enumerate(self._targets):
curr_test_function_name = test_function_name + ' (%s)' % str(idx + 1)
test_function = self._build_test_function(curr_test_function_name, target.api, target.tests)
test_function(test_self)
return [TestFunction(test_function_name, test_function_aggregated)]
```
#### File: rest_tester/setting/api.py
```python
from .method import TestMethod
class API(object):
KEY_URL = 'url'
KEY_METHOD = 'method'
KEY_PARAMS = 'params'
KEY_DATA = 'data'
def __init__(self, api_data, options):
self._url = api_data[self.KEY_URL]
self._method = TestMethod(api_data[self.KEY_METHOD])
self._params = api_data.get(self.KEY_PARAMS, {})
self._data = api_data.get(self.KEY_DATA)
self._options = options
@property
def url(self):
if self._options.base_url is not None:
return self._options.base_url + self._url
return self._url
@property
def method(self):
return self._method.method
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = value
@property
def data(self):
return self._data
@property
def auth(self):
return self._options.auth
@property
def insecure(self):
return self._options.insecure
```
#### File: test/function/__init__.py
```python
import time
def run_test_function_list(test_function_list, test_self):
time.sleep(1)
for test_function in test_function_list:
test_function.test_function(test_self)
```
#### File: test/function/test_single.py
```python
import os
import unittest
from requests.exceptions import Timeout
from jsonschema import ValidationError
from rest_tester.function.single import SingleTargetTestFunctionBuilder
from rest_tester.options import Options
from rest_tester.setting import TestSetting
from test import load_json_data
from test.function import run_test_function_list
class TestSingleTargetTestFunctionBuilder(unittest.TestCase):
current_dir_path = os.path.dirname(__file__)
def test_get(self):
json_file = '%s/resources/test_function_single_get.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
run_test_function_list(test_function_list, self)
def test_get_unexpected_timeout(self):
json_file = '%s/resources/test_function_single_get_unexpected_timeout.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
with self.assertRaises(Timeout):
run_test_function_list(test_function_list, self)
def test_get_unexpected_status_code(self):
json_file = '%s/resources/test_function_single_get_unexpected_status_code.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
with self.assertRaises(AssertionError):
run_test_function_list(test_function_list, self)
def test_get_unexpected_json_schema(self):
json_file = '%s/resources/test_function_single_get_unexpected_json_schema.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
with self.assertRaises(ValidationError):
run_test_function_list(test_function_list, self)
def test_delete(self):
json_file = '%s/resources/test_function_single_delete.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
run_test_function_list(test_function_list, self)
def test_patch(self):
json_file = '%s/resources/test_function_single_patch.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
run_test_function_list(test_function_list, self)
def test_post(self):
json_file = '%s/resources/test_function_single_post.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
run_test_function_list(test_function_list, self)
def test_put(self):
json_file = '%s/resources/test_function_single_put.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
run_test_function_list(test_function_list, self)
def test_put_multi_params(self):
json_file = '%s/resources/test_function_single_put_multi_params.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
run_test_function_list(test_function_list, self)
def test_put_unexpected_status_code(self):
json_file = '%s/resources/test_function_single_put_unexpected_status_code.json' % self.current_dir_path
test_function_list = self.generate_test_functions(json_file)
with self.assertRaises(AssertionError):
run_test_function_list(test_function_list, self)
@staticmethod
def generate_test_functions(json_file):
json_data = load_json_data(json_file)
setting = TestSetting(json_data, Options())
builder = SingleTargetTestFunctionBuilder(setting)
return builder.build()
if __name__ == '__main__':
unittest.main()
```
#### File: lightweight-rest-tester/test/__init__.py
```python
import json
def are_equal_lists(list1, list2):
"""Check if two lists contain same items regardless of their orders."""
return len(list1) == len(list2) and all(list1.count(i) == list2.count(i) for i in list1)
def load_json_data(json_file_path):
with open(json_file_path, 'r') as json_file:
return json.load(json_file)
```
#### File: test/setting/test_api.py
```python
import unittest
from rest_tester.options import Options
from rest_tester.setting import API
class TestAPI(unittest.TestCase):
def test_entire_information(self):
api_data = {
"url": "https://jsonplaceholder.typicode.com/comments",
"method": "get",
"params": {
"postId": 1
},
"data": {
"title": "foo",
"body": "bar",
"userId": 1
}
}
api = API(api_data, Options())
self.assertEqual(api_data['url'], api.url)
self.assertEqual(api_data['method'], api.method)
self.assertEqual(api_data['params'], api.params)
self.assertEqual(api_data['data'], api.data)
def test_essential_information(self):
api_data = {
"url": "https://jsonplaceholder.typicode.com/comments",
"method": "get"
}
api = API(api_data, Options())
self.assertEqual(api_data['url'], api.url)
self.assertEqual(api_data['method'], api.method)
def test_add_base_url(self):
base_url = "https://jsonplaceholder.typicode.com"
api_data = {
"url": "/comments",
"method": "get"
}
api = API(api_data, Options(base_url=base_url))
self.assertEqual(base_url + api_data['url'], api.url)
self.assertEqual(api_data['method'], api.method)
def test_add_auth(self):
user = 'user'
password = '<PASSWORD>'
api_data = {
"url": "/comments",
"method": "get"
}
api = API(api_data, Options(auth=user + ':' + password))
expected = (user, password)
self.assertEqual(expected, api.auth)
def test_add_insecure(self):
insecure = True
api_data = {
"url": "/comments",
"method": "get"
}
api = API(api_data, Options(insecure=insecure))
self.assertTrue(api.insecure)
def test_missing_information(self):
api_data = {
"params": {
"postId": 1
}
}
with self.assertRaises(KeyError):
API(api_data, Options())
if __name__ == '__main__':
unittest.main()
```
#### File: test/setting/test_setting.py
```python
import os
import unittest
from rest_tester.options import Options
from test.setting import convert_api_to_dict, convert_tests_to_dict
from rest_tester.setting import TestSetting, TestTarget
from test import load_json_data
class TestTestSetting(unittest.TestCase):
current_dir_path = os.path.dirname(__file__)
def test_single(self):
json_file = '%s/resources/test_setting_single.json' % self.current_dir_path
json_data = load_json_data(json_file)
setting = TestSetting(json_data, Options())
self.assertFalse(setting.has_multiple_targets())
self.assertEqual(
json_data[TestTarget.KEY_API],
convert_api_to_dict(setting.targets[0].api)
)
self.assertEqual(
json_data[TestTarget.KEY_TESTS],
convert_tests_to_dict(setting.targets[0].tests)
)
def test_multiple(self):
json_file = '%s/resources/test_setting_multiple.json' % self.current_dir_path
json_data = load_json_data(json_file)
setting = TestSetting(json_data, Options())
self.assertTrue(setting.has_multiple_targets())
for idx, target in enumerate(setting.targets):
self.assertEqual(
json_data[idx][TestTarget.KEY_API],
convert_api_to_dict(target.api)
)
self.assertEqual(
json_data[idx][TestTarget.KEY_TESTS],
convert_tests_to_dict(target.tests)
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jihunhamm/MinimaxFilter",
"score": 2
} |
#### File: MinimaxFilter/src/filterAlg_Linear.py
```python
import numpy as np
class FilterAlg:
pass
'''
def __init__(self,X,hyperparams):
self.X = X
self.hyperparams = hyperparams
return
def g(self,u):
pass
return
'''
class Linear(FilterAlg):
'''
def __init__(self, X, d):
self.X = X
self.d = d
def reset(self, X, d):
self.X = X
self.d = d
'''
@staticmethod
def init(hparams):
D = hparams['D']
d = hparams['d']
# random normal
u = np.random.normal(size=(D*d,))
return u
@staticmethod
def g(u,X,hparams):
d = hparams['d']
#l = hparams['l']
D,N = X.shape
W = u.reshape((D,d))
return np.dot(W.T,X)
@staticmethod
def dgdu(u,X,hparams): # u.size x d x N = D*d x d*N
d = hparams['d']
D,N = X.shape
# Jacobian: u.size x d x N
#d = hparams['d']
#l = hparams['l']
#g(X;u) = [u1...ud]'X
# dgiduj = I[i=j]*X
dg = np.zeros((D,d,d,N))
for i in range(d):
dg[:,i,i,:] = X
return dg.reshape((D*d, d, N))
```
#### File: MinimaxFilter/src/minimaxFilter.py
```python
import numpy as np
from scipy.optimize import minimize
import kiwiel
#reload(kiwiel)
import alternatingOptim
def init(W0,method1):
if (method1=='kiwiel'):
pass
elif (method1=='alt'):
alternatingOptim.run.tsum = np.zeros(W0.flatten().shape)
def run(W0,W1,W2,rho,method1,maxiter_main,*args):
#X,y1,y2,\
#filt0,alg1,alg2,hparams0,hparams1,hparams2):
# util: min_u min_w f1(u,w) = min_u -max_w -f1(u,w)
# priv: max_u min_v f2(u,v) = -min_u -min_v f2(u,v) = -min_u max_v -f2(u,v)
# Joint task: min_u [-rho*max_w -f1(u,w) + max_v -f2(u,v)] = min_u Phi(u)
# where Phi(u) = -rho*max_w -f1(u,w) + max_v -f2(u,v)
# = -rho Phi1(u) + Phi2(u), where
# Phi1(u) = max_w -f1, Phi2(u) = max_v -f2
# Also let f(u,wv) = rho*f1(u,w) - f2(u,v).
# Note, max_uv f(u,wv) = rho*max_w f1(u,w) + max_v -f2(u,v)
# is not the same as Phi(u) = -rho*Phi1(u) + Phi2(u)
u = W0.flatten()
w = W1.flatten()
v = W2.flatten()
'''#% check gradients
if 0
u = randn(size(u)); v = randn(size(v)); w = randn(size(v));
check_gradient(@(u)ftemp1(u,w,v,X(:,1:100),y1(:,1:100),y2(:,1:100),K1,K2,rho,lambda0,lambda1,lambda2),u(:));
%
q = randn(size(u));
check_gradient(@(v)f_lin(u,v,q,X(:,1:100),y1(:,1:100),K1,lambda0,lambda1),v(:));
%
end
'''
if (method1=='kiwiel'):
u,wv = kiwiel.run(u,(w,v),maxiter_main,_f,_dfdu,_Phi,_Phi_lin,rho,*args)
elif (method1=='alt'):
u,wv = alternatingOptim.run(u,(w,v),maxiter_main,_f,_dfdu,_Phi,_Phi_lin,rho,*args)
else:
print 'Unknown method'
exit()
w,v = wv
#% Check dfdx
#function [fval,dfdu] = ftemp1(u,w,v,X,y1,y2,K1,K2,rho,lambda0,lambda1,lambda2)
# [fval,~,dfdu] = f_joint(u,w,v,X,y1,y2,K1,K2,rho,lambda0,lambda1,lambda2);
#end
return (u,w,v)
def _f(u,wv,\
rho,X,y1,y2,filt0,alg1,alg2,hparams0,hparams1,hparams2):
# f(u,wv) = rho*f1(u,w) - f2(u,v).
D,N = X.shape
w,v = wv
G = filt0.g(u,X,hparams0)
#d = G.shape[0]
f1 = alg1.f(w,G,y1,hparams1)
f2 = alg2.f(v,G,y2,hparams2)
fval = rho*f1 - f2 + .5*hparams0['l']*(u**2).sum()
assert np.isnan(fval)==False
return fval
def _dfdu(u,wv,\
rho,X,y1,y2,filt0,alg1,alg2,hparams0,hparams1,hparams2):
# f(u,wv) = rho*f1(u,w) - f2(u,v).
D,N = X.shape
w,v = wv
d = hparams0['d']
# dgdu: u.size x d x Nsub
# If dgdu is too large, subsample X and limit dgdu
MAX_MEMORY = 6.*1024*1024*1024 # 6GB
Nsub = round(MAX_MEMORY/(u.size*d*8.))
Nsub = min(Nsub,N)
ind = np.random.choice(range(N),size=(Nsub,),replace=False)
tG = filt0.g(u,X[:,ind],hparams0)
dgdu = filt0.dgdu(u,X[:,ind],hparams0).reshape((u.size,d*Nsub)) # u.size x d x N
df1 = alg1.dfdu(w,tG,y1[ind],dgdu,hparams1)
df2 = alg2.dfdu(v,tG,y2[ind],dgdu,hparams2)
dfdu = rho*df1 - df2 + hparams0['l']*u
assert np.isnan(dfdu).any()==False
return dfdu
def _Phi(u,wv,maxiter,\
rho,X,y1,y2,filt0,alg1,alg2,hparams0,hparams1,hparams2):
# Phi(u) = -rho*max_w -f1(u,w) + max_v -f2(u,v)
# = -rho Phi1(u) + Phi2(u), where
# Phi1(u) = max_w -f1, Phi2(u) = max_v -f2
w,v = wv
# Phi1(u) = max_w -f_util(u,w) = -min_w f_util(u,w)
G = filt0.g(u,X,hparams0)
res = minimize(alg1.f, w, args=(G,y1,hparams1),\
method='BFGS', jac=alg1.dfdv, options={'disp':False, 'maxiter':maxiter})
w = res.x
Phiu1 = -res.fun
# Phi2(u) = max_v -f_priv(u,v) = -min_v f_priv(u,v)
res = minimize(alg2.f, v, args=(G,y2,hparams2),\
method='BFGS',jac=alg2.dfdv, options={'disp':False, 'maxiter':maxiter})
v = res.x
Phiu2 = -res.fun
Phiu = -rho*Phiu1 + Phiu2 + .5*hparams0['l']*(u**2).sum()
assert np.isnan(w).any()==False
assert np.isnan(v).any()==False
assert np.isnan(Phiu)==False
return (Phiu,(w,v))
'''
def _dPhidu(u,wv,maxiter,\
rho,X,y1,y2,filt0,alg1,alg2,hparams0,hparams1,hparams2):
# dPhidu = rho* df1du(u,wh) - df2du(u,vh)
w,v = wv
G = filt0.g(u,X,hparams0)
# Phi1(u) = max_w -f_util(u,w) = -min_w f_util(u,w)
res = minimize(alg1.f, w, args=(G,y1,hparams1),\
method='BFGS', jac=alg1.dfdv, options={'disp':False, 'maxiter':maxiter})
w = res.x
#Phiu1 = -res.fun
# Phi2(u) = max_v -f_priv(u,v) = -min_v f_priv(u,v)
res = minimize(alg2.f, v, args=(G,y2,hparams2),\
method='BFGS',jac=alg2.dfdv, options={'disp':False, 'maxiter':maxiter})
v = res.x
#Phiu2 = -res.fun
dPhiu = _dfdu(u,(w,v),\
rho,X,y1,y2,filt0,alg1,alg2,hparams0,hparams1,hparams2)
assert np.isnan(w).any()==False
assert np.isnan(v).any()==False
assert np.isnan(dPhiu).any()==False
return dPhiu
'''
def _Phi_lin(u,wv,q,maxiter,\
rho,X,y1,y2,filt0,alg1,alg2,hparams0,hparams1,hparams2):
w,v = wv
d = hparams1['d']
N = X.shape[1]
# dgdu: u.size x d x Nsub
# If dgdu is too large, subsample X and limit dgdu to 2GB
MAX_MEMORY = 8.*1024*1024*1024 # 8GB
Nsub = round(MAX_MEMORY/(u.size*d*8.))
Nsub = min(Nsub,N)
ind = np.random.choice(range(N),size=(Nsub,),replace=False)
tG = filt0.g(u,X[:,ind],hparams0)
dgdu = filt0.dgdu(u,X[:,ind],hparams0).reshape((u.size,d*Nsub)) # u.size x d x N
# Phi_lin(u) = -rho*max_w -f1(u,w) + max_v -f2(u,v)
# = -rho Phi1lin(u) + Phi2lin(u), where
# Phi1lin(u) = max_w -f1lin, Phi2lin(u) = max_v -f2lin
res = minimize(alg1.flin, w, args=(q,tG,y1[ind],dgdu,hparams1), \
method='BFGS',jac=alg1.dflindv, options={'disp':False, 'maxiter':maxiter})
w = res.x
Phiu1 = -res.fun
res = minimize(alg2.flin, v, args=(q,tG,y2[ind],dgdu,hparams2),\
method='BFGS',jac=alg2.dflindv, options={'disp':False, 'maxiter':maxiter})
v = res.x
Phiu2 = -res.fun
Phiu = -rho*Phiu1 + Phiu2 + .5*hparams0['l']*(u**2).sum()
assert np.isnan(w).any()==False
assert np.isnan(v).any()==False
assert np.isnan(Phiu)==False
return (Phiu,(w,v))
def selftest1():
## Linear filter
import privacyLDA
from filterAlg_Linear import Linear
from learningAlg import mlogreg
# Generate data
D0 = 5
K1 = 2
K2 = 3
NperClass = 100
N = NperClass*K1*K2
#l = 1.0e-3
X = np.zeros((D0,NperClass,K1,K2))
y1 = np.zeros((NperClass,K1,K2),dtype=int)
y2 = np.zeros((NperClass,K1,K2),dtype=int)
bias1 = np.random.normal(scale=1.0,size=(D0,K1))
bias2 = np.random.normal(scale=1.0,size=(D0,K2))
for k1 in range(K1):
for k2 in range(K2):
X[:,:,k1,k2] = \
np.random.normal(scale=1.0, size=(D0,NperClass)) \
+ np.tile(bias1[:,k1].reshape((D0,1)),(1,NperClass)) \
+ np.tile(bias2[:,k2].reshape((D0,1)),(1,NperClass))
y1[:,k1,k2] = k1*np.ones((NperClass,))
y2[:,k1,k2] = k2*np.ones((NperClass,))
X = X.reshape((D0,N))
y1 = y1.reshape((N,))
y2 = y2.reshape((N,))
Ntrain = np.floor(N/2.)
#Ntest = N - Ntrain
ind = np.random.choice(range(N),size=(N,),replace=False)
ind_train = ind[:Ntrain]
ind_test = ind[Ntrain:]
###########################################################################
maxiter = 30
maxiter_main = 1
maxiter_final = 50
rho = 10.
lambda0 = 1e-8
lambda1 = 1e-8
lambda2 = 1e-8
d = 2
hparams0 = {'d':d, 'l':lambda0, 'D':D0}
hparams1 = {'K':K1, 'l':lambda1, 'd':d}
hparams2 = {'K':K2,'l':lambda2, 'd':d}
if False:
U,dd = privacyLDA.run(X[:,ind_train],y1[ind_train],y2[ind_train])
w0_init = U[:,0:d].flatten()
else:
w0_init = Linear.init(hparams0)
#print (W0**2).sum()
w1_init = mlogreg.init(hparams1)
w2_init = mlogreg.init(hparams2)
print '\n\nKiwiel''s method'
w0 = w0_init
w1 = w1_init
w2 = w2_init
for iter in range(maxiter):
#print (W0**2).sum()
G_train = Linear.g(w0,X[:,ind_train],hparams0)
# Full training
tW1,f1 = mlogreg.train(G_train,y1[ind_train],hparams1,None,maxiter_final)
tW2,f2 = mlogreg.train(G_train,y2[ind_train],hparams2,None,maxiter_final)
# Testing error
G_test = Linear.g(w0,X[:,ind_test],hparams0)
rate1,_ = mlogreg.accuracy(tW1,G_test,y1[ind_test])
rate2,_ = mlogreg.accuracy(tW2,G_test,y2[ind_test])
print 'rate_tar= %.2f, rate_subj= %.2f' % (rate1,rate2)
# run one iteration
w0,w1,w2 = run(w0,w1,w2,rho,'kiwiel',maxiter_main,\
X[:,ind_train],y1[ind_train],y2[ind_train],\
Linear,mlogreg,mlogreg,\
hparams0,hparams1,hparams2)
#val = _f(w0,(w1,w2),\
# rho,X[:,ind_train],y1[ind_train],y2[ind_train],\
# NN1,mlogreg,mlogreg,\
# hparams0,hparams1,hparams2)
#print 'val=', val, '\n'
print '\n\nAlternating optimization'
w0 = w0_init
w1 = w1_init
w2 = w2_init
for iter in range(maxiter):
#print (W0**2).sum()
G_train = Linear.g(w0,X[:,ind_train],hparams0)
# Full training
tW1,f1 = mlogreg.train(G_train,y1[ind_train],hparams1,None,maxiter_final)
tW2,f2 = mlogreg.train(G_train,y2[ind_train],hparams2,None,maxiter_final)
# Testing error
G_test = Linear.g(w0,X[:,ind_test],hparams0)
rate1,_ = mlogreg.accuracy(tW1,G_test,y1[ind_test])
rate2,_ = mlogreg.accuracy(tW2,G_test,y2[ind_test])
print 'rate_tar= %.2f, rate_subj= %.2f' % (rate1,rate2)
# run one iteration
w0,w1,w2 = run(w0,w1,w2,rho,'alt',maxiter_main,\
X[:,ind_train],y1[ind_train],y2[ind_train],\
Linear,mlogreg,mlogreg,\
hparams0,hparams1,hparams2)
def selftest2():
## Simple neural network filter
from filterAlg_NN import NN1
from learningAlg import mlogreg
# Generate data
D0 = 5
K1 = 2
K2 = 3
NperClass = 100
N = NperClass*K1*K2
#l = 1.0e-3
X = np.zeros((D0,NperClass,K1,K2))
y1 = np.zeros((NperClass,K1,K2),dtype=int)
y2 = np.zeros((NperClass,K1,K2),dtype=int)
bias1 = np.random.normal(scale=1.0,size=(D0,K1))
bias2 = np.random.normal(scale=1.0,size=(D0,K2))
for k1 in range(K1):
for k2 in range(K2):
X[:,:,k1,k2] = \
np.random.normal(scale=1.0, size=(D0,NperClass)) \
+ np.tile(bias1[:,k1].reshape((D0,1)),(1,NperClass)) \
+ np.tile(bias2[:,k2].reshape((D0,1)),(1,NperClass))
y1[:,k1,k2] = k1*np.ones((NperClass,))
y2[:,k1,k2] = k2*np.ones((NperClass,))
X = X.reshape((D0,N))
y1 = y1.reshape((N,))
y2 = y2.reshape((N,))
Ntrain = np.floor(N/2.)
#Ntest = N - Ntrain
ind = np.random.choice(range(N),size=(N,),replace=False)
ind_train = ind[:Ntrain]
ind_test = ind[Ntrain:]
###########################################################################
maxiter = 30
maxiter_main = 1
maxiter_final = 50
rho = 10.
lambda0 = 1e-8
lambda1 = 1e-8
lambda2 = 1e-8
d = 2
hparams0 = {'D':D0, 'nhs':[3,3,d], 'activation':'sigmoid', 'l':lambda0}
hparams1 = {'K':K1, 'l':lambda1, 'd':d}
hparams2 = {'K':K2,'l':lambda2, 'd':d}
w0_init = NN1.init(hparams0)
w1_init = mlogreg.init(hparams1)
w2_init = mlogreg.init(hparams2)
print '\n\nKiwiel''s method'
w0 = w0_init
w1 = w1_init
w2 = w2_init
for iter in range(maxiter):
#print (W0**2).sum()
G_train = NN1.g(w0,X[:,ind_train],hparams0)
# Full training
tW1,f1 = mlogreg.train(G_train,y1[ind_train],hparams1,None,maxiter_final)
tW2,f2 = mlogreg.train(G_train,y2[ind_train],hparams2,None,maxiter_final)
# Testing error
G_test = NN1.g(w0,X[:,ind_test],hparams0)
rate1,_ = mlogreg.accuracy(tW1,G_test,y1[ind_test])
rate2,_ = mlogreg.accuracy(tW2,G_test,y2[ind_test])
print 'rate_tar= %.2f, rate_subj= %.2f' % (rate1,rate2)
# run one iteration
w0,w1,w2 = run(w0,w1,w2,rho,'kiwiel',maxiter_main,\
X[:,ind_train],y1[ind_train],y2[ind_train],\
NN1,mlogreg,mlogreg,\
hparams0,hparams1,hparams2)
#val = _f(w0,(w1,w2),\
# rho,X[:,ind_train],y1[ind_train],y2[ind_train],\
# NN1,mlogreg,mlogreg,\
# hparams0,hparams1,hparams2)
#print 'val=', val, '\n'
print '\n\nAlternating optimization'
w0 = w0_init
w1 = w1_init
w2 = w2_init
for iter in range(maxiter):
#print (W0**2).sum()
G_train = NN1.g(w0,X[:,ind_train],hparams0)
# Full training
tW1,f1 = mlogreg.train(G_train,y1[ind_train],hparams1,None,maxiter_final)
tW2,f2 = mlogreg.train(G_train,y2[ind_train],hparams2,None,maxiter_final)
# Testing error
G_test = NN1.g(w0,X[:,ind_test],hparams0)
rate1,_ = mlogreg.accuracy(tW1,G_test,y1[ind_test])
rate2,_ = mlogreg.accuracy(tW2,G_test,y2[ind_test])
print 'rate_tar= %.2f, rate_subj= %.2f' % (rate1,rate2)
# run one iteration
w0,w1,w2 = run(w0,w1,w2,rho,'alt',maxiter_main,\
X[:,ind_train],y1[ind_train],y2[ind_train],\
NN1,mlogreg,mlogreg,\
hparams0,hparams1,hparams2)
'''
################################################################################
## Compare kiwiel and alternating
maxiter = 100
w0_init = NN1.init(hparams0)
w1_init = mlogreg.init(hparams1)
w2_init = mlogreg.init(hparams2)
w0,w1,w2 = run(w0_init,w1_init,w2_init,rho,'kiwiel',maxiter,\
X[:,ind_train],y1[ind_train],y2[ind_train],\
NN1,mlogreg,mlogreg,\
hparams0,hparams1,hparams2)
G_train = NN1.g(w0,X[:,ind_train],hparams0)
# Full training
tW1,f1 = mlogreg.train(G_train,y1[ind_train],hparams1,None,maxiter_final)
tW2,f2 = mlogreg.train(G_train,y2[ind_train],hparams2,None,maxiter_final)
# Testing error
G_test = NN1.g(w0,X[:,ind_test],hparams0)
rate1,_ = mlogreg.accuracy(tW1,G_test,y1[ind_test])
rate2,_ = mlogreg.accuracy(tW2,G_test,y2[ind_test])
print 'Kiwiel: rate_tar= %.2f, rate_subj= %.2f' % (rate1,rate2)
w0,w1,w2 = run(w0_init,w1_init,w2_init,rho,'alternating',maxiter,\
X[:,ind_train],y1[ind_train],y2[ind_train],\
NN1,mlogreg,mlogreg,\
hparams0,hparams1,hparams2)
G_train = NN1.g(w0,X[:,ind_train],hparams0)
# Full training
tW1,f1 = mlogreg.train(G_train,y1[ind_train],hparams1,None,maxiter_final)
tW2,f2 = mlogreg.train(G_train,y2[ind_train],hparams2,None,maxiter_final)
# Testing error
G_test = NN1.g(w0,X[:,ind_test],hparams0)
rate1,_ = mlogreg.accuracy(tW1,G_test,y1[ind_test])
rate2,_ = mlogreg.accuracy(tW2,G_test,y2[ind_test])
print 'Alternating:rate_tar= %.2f, rate_subj= %.2f' % (rate1,rate2)
'''
```
#### File: MinimaxFilter/src/sparse_autoencoder.py
```python
import numpy as np
def sigmoid(x):
indp = np.where(x>=0)
indn = np.where(x<0)
tx = np.zeros(x.shape)
tx[indp] = 1./(1.+np.exp(-x[indp]))
tx[indn] = np.exp(x[indn])/(1.+np.exp(x[indn]))
return tx
def sigmoid_prime(x):
return sigmoid(x) * (1 - sigmoid(x))
def KL_divergence(x, y):
return x * (np.log(x+1E-20)-np.log(y+1E-20)) + (1 - x) * (np.log(1 - x+1E-20) - np.log(1 - y+1E-20))
def initialize(hidden_size, visible_size):
r = np.sqrt(6) / np.sqrt(hidden_size + visible_size + 1)
W1 = np.random.random((hidden_size, visible_size)) * 2 * r - r
W2 = np.random.random((visible_size, hidden_size)) * 2 * r - r
b1 = np.zeros(hidden_size, dtype=np.float64)
b2 = np.zeros(visible_size, dtype=np.float64)
theta = np.concatenate((W1.reshape(hidden_size * visible_size),
W2.reshape(hidden_size * visible_size),
b1.reshape(hidden_size),
b2.reshape(visible_size)))
return theta
def sparse_autoencoder_cost(theta, visible_size, hidden_size,
lambda_, sparsity_param, beta, data):
W1 = theta[0:hidden_size * visible_size].reshape(hidden_size, visible_size)
W2 = theta[hidden_size * visible_size:2 * hidden_size * visible_size].reshape(visible_size, hidden_size)
b1 = theta[2 * hidden_size * visible_size:2 * hidden_size * visible_size + hidden_size]
b2 = theta[2 * hidden_size * visible_size + hidden_size:]
m = data.shape[1]
z2 = W1.dot(data) + np.tile(b1, (m, 1)).transpose()
a2 = sigmoid(z2)
z3 = W2.dot(a2) + np.tile(b2, (m, 1)).transpose()
h = sigmoid(z3)
cost = np.sum((h - data) ** 2) / (2 * m) + \
(lambda_ / 2) * (np.sum(W1 ** 2) + np.sum(W2 ** 2))# + \
sparsity_delta = 0
delta3 = -(data - h) * sigmoid_prime(z3)
delta2 = (W2.transpose().dot(delta3) + beta * sparsity_delta) * sigmoid_prime(z2)
W1grad = delta2.dot(data.transpose()) / m + lambda_ * W1
W2grad = delta3.dot(a2.transpose()) / m + lambda_ * W2
b1grad = np.sum(delta2, axis=1) / m
b2grad = np.sum(delta3, axis=1) / m
grad = np.concatenate((W1grad.reshape(hidden_size * visible_size),
W2grad.reshape(hidden_size * visible_size),
b1grad.reshape(hidden_size),
b2grad.reshape(visible_size)))
return cost, grad
def sparse_autoencoder(theta, hidden_size, visible_size, data):
W1 = theta[0:hidden_size * visible_size].reshape(hidden_size, visible_size)
b1 = theta[2 * hidden_size * visible_size:2 * hidden_size * visible_size + hidden_size]
m = data.shape[1]
z2 = W1.dot(data) + np.tile(b1, (m, 1)).transpose()
a2 = sigmoid(z2)
return a2
def sparse_autoencoder_linear_cost(theta, visible_size, hidden_size,
lambda_, sparsity_param, beta, data):
W1 = theta[0:hidden_size * visible_size].reshape(hidden_size, visible_size)
W2 = theta[hidden_size * visible_size:2 * hidden_size * visible_size].reshape(visible_size, hidden_size)
b1 = theta[2 * hidden_size * visible_size:2 * hidden_size * visible_size + hidden_size]
b2 = theta[2 * hidden_size * visible_size + hidden_size:]
m = data.shape[1]
z2 = W1.dot(data) + np.tile(b1, (m, 1)).transpose()
a2 = sigmoid(z2)
z3 = W2.dot(a2) + np.tile(b2, (m, 1)).transpose()
h = z3
cost = np.sum((h - data) ** 2) / (2 * m) + \
(lambda_ / 2) * (np.sum(W1 ** 2) + np.sum(W2 ** 2))
sparsity_delta = 0.
delta3 = -(data - h)
delta2 = (W2.transpose().dot(delta3) + beta * sparsity_delta) * sigmoid_prime(z2)
W1grad = delta2.dot(data.transpose()) / m + lambda_ * W1
W2grad = delta3.dot(a2.transpose()) / m + lambda_ * W2
b1grad = np.sum(delta2, axis=1) / m
b2grad = np.sum(delta3, axis=1) / m
grad = np.concatenate((W1grad.reshape(hidden_size * visible_size),
W2grad.reshape(hidden_size * visible_size),
b1grad.reshape(hidden_size),
b2grad.reshape(visible_size)))
return cost, grad
``` |
{
"source": "jihun-hong/snap-python",
"score": 3
} |
#### File: snapx/classes/coreviews.py
```python
from collections.abc import Mapping
from .attrdict import AttributeDict
class AtlasView(Mapping):
"""This is a view into a dict-of-dict-like structure.
This view shows a certain node's neighbors and their edge attributes.
Note that unlike NetworkX's AltasView, we need both the
underlying graph and the ID for the node of interest in
order to accomplish the same effect, hence the difference
in the API."""
__slots__ = ("_graph", "_node")
def __getstate__(self):
raise NotImplementedError("TODO")
def __setstate__(self, state):
raise NotImplementedError("TODO")
def __init__(self, g, n):
"""Initialize with the input node and graph"""
self._graph = g
self._node = n
if not isinstance(n, int):
raise TypeError("Node ID must be int.")
if n not in g:
raise KeyError("Node must be present in graph.")
def __len__(self):
return sum(1 for d in self)
def __iter__(self):
ni = self._graph.snap_graph.GetNI(self._node)
for dst in ni.GetOutEdges():
yield dst
def __getitem__(self, key):
return AttributeDict(self._graph, (self._node, key))
def copy(self):
raise NotImplementedError("TODO")
def __str__(self):
strs = []
for k in iter(self):
strs.append(str(k) + ": " + str(self[k]))
return "{" + ", ".join(strs) + "}"
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.__str__())
class AdjacencyView(Mapping):
"""This is a view into a dict-of-dict-of-dict-like data structure.
This view shows all nodes' neighbors and their edge attributes.
"""
__slots__ = ("_graph",)
def __init__(self, g):
self._graph = g
def __getstate__(self):
raise NotImplementedError("TODO")
def __setstate__(self, state):
raise NotImplementedError("TODO")
def __getitem__(self, node):
return AtlasView(self._graph, node)
def __len__(self):
return sum(1 for n in self)
def __iter__(self):
for n in self._graph:
yield n
def __str__(self):
strs = []
for n in iter(self):
strs.append(str(n) + ": " + str(self[n]))
return "{" + ", ".join(strs) + "}"
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.__str__())
def copy(self):
raise NotImplementedError("TODO")
class FilterAtlas(Mapping): # nodedict, nbrdict, keydict
def __init__(self, d, NODE_OK):
self._atlas = d
self.NODE_OK = NODE_OK
def __len__(self):
return sum(1 for n in self)
def __iter__(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return (n for n in self.NODE_OK.nodes if n in self._atlas)
return (n for n in self._atlas if self.NODE_OK(n))
def __getitem__(self, key):
if key in self._atlas and self.NODE_OK(key):
return self._atlas[key]
raise KeyError("Key {} not found".format(key))
def copy(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return {u: self._atlas[u] for u in self.NODE_OK.nodes if u in self._atlas}
return {u: d for u, d in self._atlas.items() if self.NODE_OK(u)}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self._atlas.__repr__(), self.NODE_OK._repr__())
class FilterAdjacency(Mapping): # edgedict
def __init__(self, d, NODE_OK, EDGE_OK):
self._atlas = d
self.NODE_OK = NODE_OK
self.EDGE_OK = EDGE_OK
def __len__(self):
return sum(1 for n in self)
def __iter__(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return (n for n in self.NODE_OK.nodes if n in self._atlas)
return (n for n in self._atlas if self.NODE_OK(n))
def __getitem__(self, node):
if node in self._atlas and self.NODE_OK(node):
def new_node_ok(nbr):
return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr)
return FilterAtlas(self._atlas[node], new_node_ok)
raise KeyError("Key {} not found".format(key))
def copy(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return {
u: {
v: d
for v, d in self._atlas[u].items()
if self.NODE_OK(v)
if self.EDGE_OK(u, v)
}
for u in self.NODE_OK.nodes
if u in self._atlas
}
return {
u: {v: d for v, d in nbrs.items() if self.NODE_OK(v) if self.EDGE_OK(u, v)}
for u, nbrs in self._atlas.items()
if self.NODE_OK(u)
}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return "{}({}, {}, {})".format(self.__class__.__name__, self._atlas.__repr__(), self.NODE_OK__repr__(), self.EDGE_OK.__repr__())
class FilterMultiAdjacency(FilterAdjacency): # multiedgedict
def __getitem__(self, node):
if node in self._atlas and self.NODE_OK(node):
def edge_ok(nbr, key):
return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key)
return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok)
raise KeyError("Key {} not found".format(node))
def copy(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
my_nodes = self.NODE_OK.nodes
return {
u: {
v: {k: d for k, d in kd.items() if self.EDGE_OK(u, v, k)}
for v, kd in self._atlas[u].items()
if v in my_nodes
}
for u in my_nodes
if u in self._atlas
}
return {
u: {
v: {k: d for k, d in kd.items() if self.EDGE_OK(u, v, k)}
for v, kd in nbrs.items()
if self.NODE_OK(v)
}
for u, nbrs in self._atlas.items()
if self.NODE_OK(u)
}
```
#### File: snap-python/swig/setup.py
```python
import os
import platform
import shutil
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext
import distutils.dir_util
SNAPPY_VERSION = '6.0.0'
class SwigExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class PkgBuild(build_ext):
def run(self):
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
if not os.path.exists(extdir):
os.makedirs(extdir)
snap_obj = '_snap.so'
if platform.uname()[0].find('Windows') == 0:
snap_obj = '_snap.pyd'
# SWIG generated SNAP .py
shutil.copy('snap.py', extdir)
# compiled SNAP object library
shutil.copy(snap_obj, extdir)
# __init__ to import snapx as a module
shutil.copy('snap/__init__.py', extdir)
# snapx implementation
distutils.dir_util.copy_tree(
'../snapx/snapx', os.path.join(extdir, 'snapx'))
with open('../README.md') as f:
LONG_DESCRIPTION = f.read()
setup(
name='snap-stanford',
version=SNAPPY_VERSION,
author="snap.stanford.edu",
description="SNAP (Stanford Network Analysis Platform) Python",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="http://snap.stanford.edu",
license="3-clause BSD, http://snap.stanford.edu/snap/license.html",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Topic :: Scientific/Engineering"
],
zip_safe=False,
cmdclass=dict(build_ext=PkgBuild),
ext_modules=[SwigExtension('snap.')],
)
``` |
{
"source": "jihunim/uiautomator2",
"score": 2
} |
#### File: uiautomator2/cli/install.py
```python
import re
import time
import requests
import uiautomator2
from logzero import logger
import logging
logger.setLevel(logging.DEBUG)
# def oppo_install(self, apk_url):
# m = hashlib.md5()
# m.update(apk_url.encode('utf-8'))
# key = m.hexdigest()[8:16]
# dst = "/sdcard/atx-" + key + ".apk" # ๅฎ่ฃ
ๅ
ไผๅจๅฎ่ฃ
ๅฎๅ่ชๅจๅ ้ค
# d = uiautomator2.connect(self._device_url)
# print(d.info, dst)
# d.push_url(apk_url, dst)
# # d.push("/Users/codeskyblue/workdir/atxcrawler/apks/ApiDemos-debug.apk", dst) # For debug
# with d.session("com.coloros.filemanager") as s:
# s(text=u"ๆๆๆไปถ").click()
# s(className="android.widget.ListView").scroll.to(textContains=key)
# s(textContains=key).click()
# btn_done = d(className="android.widget.Button", text=u"ๅฎๆ")
# while not btn_done.exists:
# s(text="็ปง็ปญๅฎ่ฃ
ๆง็ๆฌ").click_exists()
# s(text="ๆ ่ง้ฃ้ฉๅฎ่ฃ
").click_exists()
# s(text="้ๆฐๅฎ่ฃ
").click_exists()
# # ่ชๅจๆธ
้คๅฎ่ฃ
ๅ
ๅๆฎ็
# if s(resourceId=
# "com.android.packageinstaller:id/install_confirm_panel"
# ).exists:
# # ้่ฟๅ็งป็นๅป<ๅฎ่ฃ
>
# s(resourceId=
# "com.android.packageinstaller:id/bottom_button_layout"
# ).click(offset=(0.75, 0.5))
# btn_done.click()
# def vivo_install(self, apk_url):
# print("Vivo detected, open u2 watchers")
# u = uiautomator2.connect_wifi(self._device_url)
# u.watcher("AUTO_INSTALL").when(
# textMatches="ๅฅฝ|ๅฎ่ฃ
", className="android.widget.Button").click()
# u.watchers.watched = True
# self.pm_install(apk_url)
def install_apk(device_url, apk_url):
"""
Args:
device_url: udid, device_ip or serial(when usb connected)
"""
psurl = pkgserv_addr(device_url)
_http_install_apk(psurl, apk_url)
def pkgserv_addr(device_url):
"""
ๆ นๆฎ่ฎพๅค็บฟ่ทๅๅฐatxserver็ๅฐๅ๏ผ็ถๅๅ่ทๅๅฐu2init็ๅฐๅ๏ผ็ดๆฅๅๅณๅฎๆฏๆ ็บฟๅฎ่ฃ
่ฟๆฏๆๅจๅฎ่ฃ
Returns:
Package API url
"""
logger.info("device url %s", device_url)
d = uiautomator2.connect(device_url)
devinfo = d.device_info
serial = devinfo['serial']
logger.info("serial %s, udid %s", serial, devinfo['udid'])
aserver_url = devinfo.get(
"serverUrl",
"http://wifiphone.nie.netease.com") # TODO(atx-agent should udpate)
logger.info("atx-server url %s", aserver_url)
r = requests.get(
aserver_url + "/devices/" + devinfo['udid'] + "/info").json()
pvd = r.get('provider')
if not pvd:
logger.info("u2init not connected")
return "http://" + d.wlan_ip + ":7912/packages"
pkg_url = 'http://%s:%d/devices/%s/pkgs' % (pvd['ip'], pvd['port'], serial)
logger.info("package url %s", pkg_url)
return pkg_url
def _http_install_apk(pkg_restapi, apk_url):
""" install apk from u2init """
resp = requests.post(pkg_restapi, data={"url": apk_url}).json()
if not resp.get('success'):
raise RuntimeError(resp.get('description'))
id = resp['data']['id']
logger.info("install id %s", id)
_wait_installed(pkg_restapi + "/" + id)
def _wait_installed(query_url):
""" query until install finished """
while True:
data = safe_getjson(query_url)
status = data.get('status')
logger.debug("%s %s", status, data.get('description'))
if status in ("success", "failure"):
break
time.sleep(1)
def safe_getjson(url):
""" get rest api """
r = requests.get(url).json()
desc = r.get('description')
if not r.get('success'):
raise RuntimeError(desc)
return r.get('data')
def main():
# ins = U2Installer("http://localhost:17000")
# apk_url = "http://arch.s3.netease.com/hzdev-appci/h35_trunk_RelWithDebInfo_373397.apk" # 1.8G large apk
apk_url = "https://gohttp.nie.netease.com/tools/apks/qrcodescan-2.6.0-green.apk"
# command line
# python -m uiautomator2.cli install 10.240.174.43 http://arch.s3.netease.com/hzdev-appci/h35_trunk_RelWithDebInfo_373397.apk -s http://wifiphone.nie.netease.com
# psurl = pkgserv_addr("3578298f")
psurl = pkgserv_addr("10.242.163.69")
install_apk(psurl, apk_url)
if __name__ == '__main__':
main()
``` |
{
"source": "Jihunn-Kim/khu_capstone_1",
"score": 2
} |
#### File: khu_capstone_1/demo/demo.py
```python
import sys
from PyQt5 import QtCore, QtWidgets, QtGui
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QVBoxLayout, QLineEdit, QPlainTextEdit, QStyleFactory, QTableWidget,
QAbstractItemView, QTableWidgetItem, QGridLayout, QPushButton, QCheckBox, QComboBox, QHeaderView, QGridLayout,
QSpacerItem, QSizePolicy)
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QThread, QWaitCondition, QMutex, Qt
from PyQt5.QtGui import QPalette, QPixmap, QBrush, QFont
import torch
import pandas as pd
import numpy as np
import time
import model
# import qtmodern.styles
# import random
class MyThread(QThread):
# ์๊ทธ๋ ์ ์ธ
change_value = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
self.cond = QWaitCondition()
self.mutex = QMutex()
self._status = False
self._attack = False
self._read_speed = 1000
self.consume = dict()
self.consume['stop'] = 'Stop!!'
def __del__(self):
self.wait()
# ์ถ๋ก ๋ฐ ๊ธฐ๋ก ์์
def run(self):
net = model.OneNet(self.packet_num)
# net.load_state_dict(torch.load('model_weight_%d.pth' % self.packet_num))
net.load_state_dict(torch.load('99.pth', map_location='cpu'))
net.to(self.device)
net.eval()
packet_state = torch.zeros(1, model.STATE_DIM).to(self.device)
inference_count = 0
accuracy = 0.0
normal_idx = 0
abnormal_idx = 0
te_no_load = np.load('./fuzzy_tensor_normal_numpy.npy')
te_ab_load = np.load('./fuzzy_tensor_abnormal_numpy.npy')
no_load = np.load('./fuzzy_normal_numpy.npy')
ab_load = np.load('./fuzzy_abnormal_numpy.npy')
while True:
self.mutex.lock()
if not self._status:
self.consume['type'] = 'end'
self.change_value.emit(self.consume)
self.cond.wait(self.mutex)
if not self._attack:
inputs = torch.from_numpy(te_no_load[normal_idx]).float()
labels = 1
else:
inputs = torch.from_numpy(te_ab_load[abnormal_idx]).float()
labels = 0
inputs = inputs.to(self.device)
with torch.no_grad():
time_temp = time.time()
outputs, packet_state = net(inputs, packet_state)
time_temp = time.time() - time_temp
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
_, preds = torch.max(outputs, 1)
inference_count += 1
# print(preds.item(), labels)
if preds.item() == labels:
self.consume['check'] = 'ok'
accuracy += 1.0
else:
self.consume['check'] = 'no'
accuracy += 0.0
self.consume['type'] = 'start'
self.consume['acc'] = accuracy / inference_count * 100.0
self.consume['time'] = round(time_temp, 6)
# ๋ฐ๋ณต
if not self._attack:
self.consume['packet'] = no_load[normal_idx]
normal_idx += 1
if normal_idx == len(no_load):
normal_idx = 0
else:
self.consume['packet'] = ab_load[abnormal_idx]
abnormal_idx += 1
if abnormal_idx == len(ab_load):
abnormal_idx = 0
self.change_value.emit(self.consume)
self.msleep(self._read_speed) # QThread์์ ์ ๊ณตํ๋ sleep
self.mutex.unlock()
def toggle_status(self):
self._status = not self._status
if self._status:
self.cond.wakeAll()
def toggle_attack(self):
self._attack = not self._attack
def parameter(self, packet_num, device):
self.packet_num = packet_num
self.device = device
def set_speed(self, value):
self._read_speed = int(value)
@property
def status(self):
return self._status
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.prev_packet_num = 0
self.setupUI()
def setupUI(self):
self.setWindowTitle("Detection")
self.resize(740, 400)
# ๋ฉ์ธ ์ํ ๋ ์ด์์
self.main_horizontalLayout = QtWidgets.QHBoxLayout()
# ์ผ์ชฝ ์์ง ๋ ์ด์์
self.left_verticalLayout = QtWidgets.QVBoxLayout()
# ํจํท ๋ณด์ฌ์ค ๊ณณ
self.scrollArea = QtWidgets.QScrollArea()
self.scrollArea.setWidgetResizable(True)
# self.packet_area = QPlainTextEdit()
# self.scrollArea.setWidget(self.packet_area)
# ํ
์ด๋ธ ์์
self.table = QTableWidget()
self.table.setSelectionMode(QAbstractItemView.SingleSelection)
# row, column ๊ฐฏ์ ์ค์ ํด์ผ๋ง tablewidget ์ฌ์ฉํ ์์๋ค.
self.table.setColumnCount(10)
self.table.setRowCount(0)
# column header
self.table.setHorizontalHeaderLabels(["ID"])
self.table.horizontalHeaderItem(0).setTextAlignment(Qt.AlignCenter) # header ์ ๋ ฌ ๋ฐฉ์
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers) # edit ๊ธ์ง ๋ชจ๋
self.table.setShowGrid(False) # grid line ์จ๊ธฐ๊ธฐ
self.table.verticalHeader().setVisible(False) # row header ์จ๊ธฐ๊ธฐ
# ํ
์ด๋ธ ๋
self.scrollArea.setWidget(self.table)
self.left_verticalLayout.addWidget(self.scrollArea)
#
# ์ ํ๋ ๋ณด์ฌ์ค ๊ณณ
self.accuracy_horizontalLayout = QtWidgets.QHBoxLayout()
self.accuracy_groupBox = QtWidgets.QGroupBox()
self.accuracy_groupBox.setTitle("Log")
self.accuracy_formLayout = QtWidgets.QGridLayout()
# self.accuracy_formLayout.setRowStretch(0, 1)
# self.accuracy_formLayout.setRowStretch(2, 1)
# self.accuracy_formLayout.setRowStretch(4, 1)
self.now_accuracy = QLabel("?")
self.accuracy_formLayout.addWidget(QLabel("Accuracy:"), 0, 0)
self.accuracy_formLayout.addWidget(self.now_accuracy, 0, 1)
self.now_inference_time = QLabel("?")
self.accuracy_formLayout.addWidget(QLabel("Inference Time:"), 1, 0)
self.accuracy_formLayout.addWidget(self.now_inference_time, 1, 1)
self.accuracy_formLayout.setAlignment(Qt.AlignLeft)
self.accuracy_groupBox.setLayout(self.accuracy_formLayout)
self.accuracy_horizontalLayout.addWidget(self.accuracy_groupBox)
self.left_verticalLayout.addLayout(self.accuracy_horizontalLayout)
self.left_verticalLayout.setStretchFactor(self.scrollArea, 3)
self.left_verticalLayout.setStretchFactor(self.accuracy_horizontalLayout, 1)
#
# ์ผ์ชฝ ๋
self.main_horizontalLayout.addLayout(self.left_verticalLayout)
# ์ค๋ฅธ์ชฝ ์์
# ์ค๋ฅธ์ชฝ ์์ง ๋ ์ด์์
self.right_verticalLayout = QtWidgets.QVBoxLayout()
# ์ฝ์ ํจํท ์ซ์
self.parameter_groupBox = QtWidgets.QGroupBox()
self.parameter_groupBox.setTitle("Parameter")
# group ๋ฐ์ค ์์ grid
self.parameter_formLayout = QtWidgets.QGridLayout()
self.packet_num_line = QLineEdit()
self.parameter_formLayout.addWidget(QLabel("Packet num:"), 0, 0)
self.parameter_formLayout.addWidget(self.packet_num_line, 0, 1)
self.parameter_formLayout.addWidget(QLabel("(1 ~ 1)"), 1, 0)
self.parameter_formLayout.addWidget(QLabel(""), 2, 0) # grid spacing ...?
# csv ์ฝ๋ ์๋ ์ ํ์ฉ
self.time_combo = QComboBox()
self.time_combo.addItems(["0.25s", "0.5s", "1.0s", "0.1s"])
self.parameter_formLayout.addWidget(QLabel("Packet read speed:"), 3, 0)
self.parameter_formLayout.addWidget(self.time_combo, 3, 1)
# ๋ฒํผ
self.start_pushButton = QtWidgets.QPushButton("Start")
self.start_pushButton.setCheckable(True)
self.start_pushButton.toggled.connect(self.start_toggle)
self.attack_pushButton = QtWidgets.QPushButton("Attack")
self.attack_pushButton.setCheckable(True)
self.attack_pushButton.toggled.connect(self.attack_toggle)
self.parameter_formLayout.addWidget(QLabel(""), 4, 0) # grid spacing ...?
self.parameter_formLayout.addWidget(QLabel(""), 5, 0) # grid spacing ...?
# self.parameter_formLayout.setRowStretch(4, 1)
# self.parameter_formLayout.setRowStretch(2, 1)
# vspacer = QtGui.QSpacerItem(
# QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
# layout.addItem(vspacer, last_row, 0, 1, -1)
# hspacer = QtGui.QSpacerItem(
# QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
# layout.addItem(hspacer, 0, last_column, -1, 1)
self.parameter_formLayout.addWidget(self.start_pushButton, 6, 0)
self.parameter_formLayout.addWidget(self.attack_pushButton, 6, 1)
self.parameter_formLayout.setRowStretch(7, 1)
# self.parameter_formLayout.setVerticalSpacing(50)
# self.parameter_formLayout.setContentsMargins(5, 5, 5, 5) # left, top, right, bottom
self.parameter_groupBox.setLayout(self.parameter_formLayout)
self.right_verticalLayout.addWidget(self.parameter_groupBox)
self.main_horizontalLayout.addLayout(self.right_verticalLayout)
self.main_horizontalLayout.setStretchFactor(self.left_verticalLayout, 2)
self.main_horizontalLayout.setStretchFactor(self.right_verticalLayout, 1)
# ์ค๋ฅธ์ชฝ ๋
self.setLayout(self.main_horizontalLayout)
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.show()
def start_demo(self):
# ์
๋ ฅ ํ์ธ
packet_num = self.packet_num_line.text()
if packet_num == '':
print("Empty Value Not Allowed")
self.packet_num_line.setFocus()
return
packet_num = int(packet_num)
if packet_num < 1 or packet_num > 1:
print("too many packet")
self.packet_num_line.setFocus()
return
else:
self.packet_num_line.clearFocus()
# ์ด๊ธฐํ
self.add_spanRow_text('Start!! Please wait')
# ์ฝ๋ ํจํท์ด ๋ฌ๋ผ์ก์, ์๋ก ์์
if self.prev_packet_num != packet_num:
self.prev_packet_num = packet_num
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.predict_thread = MyThread()
self.predict_thread.parameter(packet_num, device)
self.predict_thread.change_value.connect(self.update_line_edit)
self.predict_thread.start()
csv_read_speed = float(self.time_combo.currentText()[:-1])
self.predict_thread.set_speed(csv_read_speed * 1000)
def update_line_edit(self, consume):
if consume['type'] == 'start':
self.now_accuracy.setText(str(consume['acc']))
self.now_inference_time.setText(str(consume['time']))
if consume['check'] == 'ok': # ๋ง์ถค
color = QtGui.QColor(150, 255, 150) # Red, Green, Blue, Alpha
else:
color = QtGui.QColor(255, 150, 150)
next_row = self.table.rowCount()
self.table.insertRow(next_row) # row ์ถ๊ฐ
col_idx = 0
for consume_packet in consume['packet']:
self.table.setItem(next_row, col_idx, QTableWidgetItem(str(consume_packet)))
self.table.item(next_row, col_idx).setBackground(color)
col_idx += 1
self.table.scrollToBottom()
else:
self.add_spanRow_text(consume['stop'])
def add_row_text(self, text):
next_row = self.table.rowCount()
self.table.insertRow(next_row) # row ์ถ๊ฐ
self.table.setItem(next_row, 0, QTableWidgetItem(text))
self.table.scrollToBottom()
def add_spanRow_text(self, text):
next_row = self.table.rowCount()
self.table.insertRow(next_row) # row ์ถ๊ฐ
self.table.setSpan(next_row, 0, 1, 10) # 1 x 10 ํฌ๊ธฐ์ span ์์ฑ
self.table.setItem(next_row, 0, QTableWidgetItem(text))
self.table.scrollToBottom()
@pyqtSlot(bool)
def start_toggle(self, state):
# self.start_pushButton.setStyleSheet("background-color: %s" % ({True: "green", False: "red"}[state]))
self.start_pushButton.setText({True: "Stop", False: "Start"}[state])
self.packet_num_line.setEnabled({True: False, False: True}[state])
if state:
self.start_demo()
else:
# self.packet_area.appendPlainText('Trying to stop..')
self.add_spanRow_text('Trying to stop..')
self.predict_thread.toggle_status()
@pyqtSlot(bool)
def attack_toggle(self, state):
# self.attack_pushButton.setStyleSheet("background-color: %s" % ({True: "green", False: "red"}[state]))
self.attack_pushButton.setText({True: "Stop", False: "Attack"}[state])
self.predict_thread.toggle_attack()
if __name__ == "__main__":
app = QApplication(sys.argv)
# qtmodern.styles.light(app)
# app.setStyle(QStyleFactory.create('Fusion'))
ex = MyApp()
sys.exit(app.exec_())
```
#### File: Jihunn-Kim/khu_capstone_1/fed_train.py
```python
import copy
import argparse
import time
import math
import numpy as np
import os
from collections import OrderedDict
import torch
import torch.optim as optim
import torch.nn as nn
import model
import utils
import dataset
# for google colab reload
import importlib
importlib.reload(model)
importlib.reload(utils)
importlib.reload(dataset)
## paramter
# shared
criterion = nn.CrossEntropyLoss()
C = 0.1
#
# prox
mu = 0.001
#
# time weight
twa_exp = 1.1
#
# dynamic weight
H = 0.5
P = 0.1
G = 0.1
R = 0.1
alpha, beta, gamma = 40.0/100.0, 40.0/100.0, 20.0/100.0
#
## end
def add_args(parser):
parser.add_argument('--packet_num', type=int, default=1,
help='packet number used in training, 1 ~ 3 for OneNet')
parser.add_argument('--model', type=str, default='cnn',
help='model used for training, one for cnn, default=one')
parser.add_argument('--fold_num', type=int, default=0,
help='5-fold, 0 ~ 4')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate')
parser.add_argument('--n_nets', type=int, default=100,
help='number of workers in a distributed cluster')
parser.add_argument('--comm_type', type=str, default='edge',
help='type of communication, [fedavg, fedprox, fedtwa, feddw, edge]')
parser.add_argument('--comm_round', type=int, default=50,
help='how many round of communications we shoud use')
parser.add_argument('--weight_save_path', type=str, default='./weights',
help='model weight save path')
args = parser.parse_args(args=[])
return args
def test_model(fed_model, args, testloader, device, cr):
fed_model.to(device)
fed_model.eval()
cnt = 0
step_acc = 0.0
with torch.no_grad():
if args.model == 'one':
packet_state = torch.zeros(args.batch_size, model.STATE_DIM).to(device)
for i, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.to(device), labels.to(device)
if args.model == 'one':
outputs, packet_state = fed_model(inputs, packet_state)
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
elif args.model == 'cnn':
outputs = fed_model(inputs)
_, preds = torch.max(outputs, 1)
cnt += inputs.shape[0]
corr_sum = torch.sum(preds == labels.data)
step_acc += corr_sum.double()
if i % 200 == 0:
print('test [%4d/%4d] acc: %.3f' % (i, len(testloader), (step_acc / cnt).item()))
# break
fed_accuracy = (step_acc / cnt).item()
print('test acc', fed_accuracy)
fed_model.to('cpu')
fed_model.train()
torch.save(fed_model.state_dict(), os.path.join(args.weight_save_path, '%s_%d_%.4f.pth' % (args.comm_type, cr, fed_accuracy)))
def start_fedavg(fed_model, args,
train_data_set,
data_idx_map,
net_data_count,
testloader,
edges,
device):
print("start fed avg")
num_edge = int(max(C * args.n_nets, 1))
total_data_count = 0
for _, data_count in net_data_count.items():
total_data_count += data_count
print("total data: %d" % total_data_count)
for cr in range(1, args.comm_round + 1):
print("Communication round : %d" % (cr))
np.random.seed(cr) # make sure for each comparison, select the same clients each round
selected_edge = np.random.choice(args.n_nets, num_edge, replace=False)
print("selected edge", selected_edge)
for edge_progress, edge_index in enumerate(selected_edge):
train_data_set.set_idx_map(data_idx_map[edge_index])
if args.model == 'one':
sampler = dataset.BatchIntervalSampler(len(train_data_set), args.batch_size)
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
elif args.model == 'cnn':
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size,
shuffle=False, num_workers=2)
print("[%2d/%2d] edge: %d, data len: %d" % (edge_progress, len(selected_edge), edge_index, len(train_data_set)))
edges[edge_index] = copy.deepcopy(fed_model)
edges[edge_index].to(device)
edges[edge_index].train()
edge_opt = optim.Adam(params=edges[edge_index].parameters(), lr=args.lr)
# train
if args.model == 'one':
packet_state = torch.zeros(args.batch_size, model.STATE_DIM).to(device)
for data_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
if args.model == 'one':
edge_pred, packet_state = edges[edge_index](inputs, packet_state)
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
elif args.model == 'cnn':
edge_pred = edges[edge_index](inputs)
edge_opt.zero_grad()
edge_loss = criterion(edge_pred, labels)
edge_loss.backward()
edge_opt.step()
edge_loss = edge_loss.item()
if data_idx % 100 == 0:
print('[%4d] loss: %.3f' % (data_idx, edge_loss))
# break
edges[edge_index].to('cpu')
# cal weight using fed avg
update_state = OrderedDict()
for k, edge in enumerate(edges):
local_state = edge.state_dict()
for key in fed_model.state_dict().keys():
if k == 0:
update_state[key] = local_state[key] * (net_data_count[k] / total_data_count)
else:
update_state[key] += local_state[key] * (net_data_count[k] / total_data_count)
fed_model.load_state_dict(update_state)
if cr % 10 == 0 and cr > 35:
test_model(fed_model, args, testloader, device, cr)
def start_fedprox(fed_model, args,
train_data_set,
data_idx_map,
testloader,
device):
print("start fed prox")
num_edge = int(max(C * args.n_nets, 1))
fed_model.to(device)
for cr in range(1, args.comm_round + 1):
print("Communication round : %d" % (cr))
edge_weight_dict = {}
fed_weight_dict = {}
for fed_name, fed_param in fed_model.named_parameters():
edge_weight_dict[fed_name] = []
fed_weight_dict[fed_name] = fed_param
np.random.seed(cr) # make sure for each comparison, select the same clients each round
selected_edge = np.random.choice(args.n_nets, num_edge, replace=False)
print("selected edge", selected_edge)
for edge_progress, edge_index in enumerate(selected_edge):
train_data_set.set_idx_map(data_idx_map[edge_index])
if args.model == 'one':
sampler = dataset.BatchIntervalSampler(len(train_data_set), args.batch_size)
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
elif args.model == 'cnn':
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size,
shuffle=False, num_workers=2)
print("[%2d/%2d] edge: %d, data len: %d" % (edge_progress, len(selected_edge), edge_index, len(train_data_set)))
edge_model = copy.deepcopy(fed_model)
edge_model.to(device)
edge_model.train()
edge_opt = optim.Adam(params=edge_model.parameters(),lr=args.lr)
# train
packet_state = torch.zeros(args.batch_size, model.STATE_DIM).to(device)
for data_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
if args.model == 'one':
edge_pred, packet_state = edge_model(inputs, packet_state)
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
elif args.model == 'cnn':
edge_pred = edge_model(inputs)
edge_opt.zero_grad()
edge_loss = criterion(edge_pred, labels)
# prox term
fed_prox_reg = 0.0
for edge_name, edge_param in edge_model.named_parameters():
fed_prox_reg += ((mu / 2) * torch.norm((fed_weight_dict[edge_name] - edge_param))**2)
edge_loss += fed_prox_reg
edge_loss.backward()
edge_opt.step()
edge_loss = edge_loss.item()
if data_idx % 100 == 0:
print('[%4d] loss: %.3f' % (data_idx, edge_loss))
# break
edge_model.to('cpu')
# save edge weight
for edge_name, edge_param in edge_model.named_parameters():
edge_weight_dict[edge_name].append(edge_param)
fed_model.to('cpu')
# cal weight, / number of edge
for fed_name, fed_param in fed_model.named_parameters():
fed_param.data.copy_( sum(weight / num_edge for weight in edge_weight_dict[fed_name]) )
fed_model.to(device)
if cr % 10 == 0:
test_model(fed_model, args, testloader, device, cr)
fed_model.to(device)
def start_fedtwa(fed_model, args,
train_data_set,
data_idx_map,
net_data_count,
testloader,
edges,
device):
# TEFL, without asynchronous model update
print("start fed temporally weighted aggregation")
time_stamp = [0 for worker in range(args.n_nets)]
num_edge = int(max(C * args.n_nets, 1))
total_data_count = 0
for _, data_count in net_data_count.items():
total_data_count += data_count
print("total data: %d" % total_data_count)
for cr in range(1, args.comm_round + 1):
print("Communication round : %d" % (cr))
np.random.seed(cr) # make sure for each comparison, select the same clients each round
selected_edge = np.random.choice(args.n_nets, num_edge, replace=False)
print("selected edge", selected_edge)
for edge_progress, edge_index in enumerate(selected_edge):
time_stamp[edge_index] = cr
train_data_set.set_idx_map(data_idx_map[edge_index])
if args.model == 'one':
sampler = dataset.BatchIntervalSampler(len(train_data_set), args.batch_size)
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
elif args.model == 'cnn':
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size,
shuffle=False, num_workers=2)
print("[%2d/%2d] edge: %d, data len: %d" % (edge_progress, len(selected_edge), edge_index, len(train_data_set)))
edges[edge_index] = copy.deepcopy(fed_model)
edges[edge_index].to(device)
edges[edge_index].train()
edge_opt = optim.Adam(params=edges[edge_index].parameters(), lr=args.lr)
# train
packet_state = torch.zeros(args.batch_size, model.STATE_DIM).to(device)
for data_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.float().to(device), labels.long().to(device)
if args.model == 'one':
edge_pred, packet_state = edges[edge_index](inputs, packet_state)
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
elif args.model == 'cnn':
edge_pred = edges[edge_index](inputs)
edge_opt.zero_grad()
edge_loss = criterion(edge_pred, labels)
edge_loss.backward()
edge_opt.step()
edge_loss = edge_loss.item()
if data_idx % 100 == 0:
print('[%4d] loss: %.3f' % (data_idx, edge_loss))
# break
edges[edge_index].to('cpu')
# cal weight using time stamp
# in paper, cr - time_stamp[k] used, but error is high
update_state = OrderedDict()
for k, edge in enumerate(edges):
local_state = edge.state_dict()
for key in fed_model.state_dict().keys():
if k == 0:
update_state[key] = local_state[key] * (net_data_count[k] / total_data_count) * math.pow(twa_exp, -(cr -2 - time_stamp[k]))
else:
update_state[key] += local_state[key] * (net_data_count[k] / total_data_count) * math.pow(twa_exp, -(cr -2 - time_stamp[k]))
fed_model.load_state_dict(update_state)
if cr % 10 == 0:
test_model(fed_model, args, testloader, device, cr)
def start_feddw(fed_model, args,
train_data_set,
data_idx_map,
net_data_count,
testloader,
local_test_loader,
edges,
device):
print("start fed Node-aware Dynamic Weighting")
worker_selected_frequency = [0 for worker in range(args.n_nets)]
num_edge = int(max(G * args.n_nets, 1))
# cal data weight for selecting participants
total_data_count = 0
for _, data_count in net_data_count.items():
total_data_count += data_count
print("total data: %d" % total_data_count)
total_data_weight = 0.0
net_weight_dict = {}
for net_key, data_count in net_data_count.items():
net_data_count[net_key] = data_count / total_data_count
net_weight_dict[net_key] = total_data_count / data_count
total_data_weight += net_weight_dict[net_key]
for net_key, data_count in net_weight_dict.items():
net_weight_dict[net_key] = net_weight_dict[net_key] / total_data_weight
# end
worker_local_accuracy = [0 for worker in range(args.n_nets)]
for cr in range(1, args.comm_round + 1):
print("Communication round : %d" % (cr))
# select participants
candidates = []
sum_frequency = sum(worker_selected_frequency)
if sum_frequency == 0:
sum_frequency = 1
for worker_index in range(args.n_nets):
candidates.append((H * worker_selected_frequency[worker_index] / sum_frequency + (1 - H) * net_weight_dict[worker_index], worker_index))
candidates = sorted(candidates)[:int(R * args.n_nets)]
candidates = [temp[1] for temp in candidates]
np.random.seed(cr)
selected_edge = np.random.choice(candidates, num_edge, replace=False)
# end select
# weighted frequency
avg_selected_frequency = sum(worker_selected_frequency) / len(worker_selected_frequency)
weighted_frequency = [P * (avg_selected_frequency - worker_frequency) for worker_frequency in worker_selected_frequency]
frequency_prime = min(weighted_frequency)
weighted_frequency = [frequency + frequency_prime + 1 for frequency in weighted_frequency]
# end weigthed
print("selected edge", selected_edge)
for edge_progress, edge_index in enumerate(selected_edge):
worker_selected_frequency[edge_index] += 1
train_data_set.set_idx_map(data_idx_map[edge_index])
if args.model == 'one':
sampler = dataset.BatchIntervalSampler(len(train_data_set), args.batch_size)
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
elif args.model == 'cnn':
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size,
shuffle=False, num_workers=2)
print("[%2d/%2d] edge: %d, data len: %d" % (edge_progress, len(selected_edge), edge_index, len(train_data_set)))
edges[edge_index] = copy.deepcopy(fed_model)
edges[edge_index].to(device)
edges[edge_index].train()
edge_opt = optim.Adam(params=edges[edge_index].parameters(), lr=args.lr)
# train
if args.model == 'one':
packet_state = torch.zeros(args.batch_size, model.STATE_DIM).to(device)
for data_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
if args.model == 'one':
edge_pred, packet_state = edges[edge_index](inputs, packet_state)
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
elif args.model == 'cnn':
edge_pred = edges[edge_index](inputs)
edge_opt.zero_grad()
edge_loss = criterion(edge_pred, labels)
edge_loss.backward()
edge_opt.step()
edge_loss = edge_loss.item()
if data_idx % 100 == 0:
print('[%4d] loss: %.3f' % (data_idx, edge_loss))
# break
# get edge accuracy using subset of testset
edges[edge_index].eval()
print("[%2d/%2d] edge: %d, cal local accuracy" % (edge_progress, len(selected_edge), edge_index))
cnt = 0
step_acc = 0.0
with torch.no_grad():
if args.model == 'one':
packet_state = torch.zeros(args.batch_size, model.STATE_DIM).to(device)
for inputs, labels in local_test_loader:
inputs, labels = inputs.to(device), labels.to(device)
if args.model == 'one':
edge_pred, packet_state = edges[edge_index](inputs, packet_state)
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
elif args.model == 'cnn':
edge_pred = edges[edge_index](inputs)
_, preds = torch.max(edge_pred, 1)
loss = criterion(edge_pred, labels)
cnt += inputs.shape[0]
corr_sum = torch.sum(preds == labels.data)
step_acc += corr_sum.double()
# break
worker_local_accuracy[edge_index] = (step_acc / cnt).item()
print('edge local accuracy', worker_local_accuracy[edge_index])
edges[edge_index].to('cpu')
# cal weight dynamically
sum_accuracy = sum(worker_local_accuracy)
sum_weighted_frequency = sum(weighted_frequency)
update_state = OrderedDict()
for k, edge in enumerate(edges):
local_state = edge.state_dict()
for key in fed_model.state_dict().keys():
if k == 0:
update_state[key] = local_state[key] \
* (net_data_count[k] * alpha \
+ worker_local_accuracy[k] / sum_accuracy * beta \
+ weighted_frequency[k] / sum_weighted_frequency * gamma)
else:
update_state[key] += local_state[key] \
* (net_data_count[k] * alpha \
+ worker_local_accuracy[k] / sum_accuracy * beta \
+ weighted_frequency[k] / sum_weighted_frequency * gamma)
fed_model.load_state_dict(update_state)
if cr % 10 == 0:
test_model(fed_model, args, testloader, device, cr)
def start_only_edge(args,
train_data_set,
data_idx_map,
testloader,
edges,
device):
print("start only edge")
total_epoch = int(args.comm_round * C)
for cr in range(1, total_epoch + 1):
print("Edge round : %d" % (cr))
edge_accuracy_list = []
for edge_index, edge_model in enumerate(edges):
train_data_set.set_idx_map(data_idx_map[edge_index])
if args.model == 'one':
sampler = dataset.BatchIntervalSampler(len(train_data_set), args.batch_size)
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
elif args.model == 'cnn':
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size,
shuffle=False, num_workers=2)
print("edge[%2d/%2d] data len: %d" % (edge_index, len(edges), len(train_data_set)))
edge_model.to(device)
edge_model.train()
edge_opt = optim.Adam(params=edge_model.parameters(),lr=args.lr)
# train
packet_state = torch.zeros(args.batch_size, model.STATE_DIM).to(device)
for data_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.float().to(device), labels.long().to(device)
if args.model == 'one':
edge_pred, packet_state = edge_model(inputs, packet_state)
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
elif args.model == 'cnn':
edge_pred = edge_model(inputs)
edge_opt.zero_grad()
edge_loss = criterion(edge_pred, labels)
edge_loss.backward()
edge_opt.step()
edge_loss = edge_loss.item()
if data_idx % 100 == 0:
print('[%4d] loss: %.3f' % (data_idx, edge_loss))
# break
# test
if cr < 4:
continue
edge_model.eval()
total_loss = 0.0
cnt = 0
step_acc = 0.0
with torch.no_grad():
if args.model == 'one':
packet_state = torch.zeros(args.batch_size, model.STATE_DIM).to(device)
for i, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.float().to(device), labels.long().to(device)
if args.model == 'one':
outputs, packet_state = edge_model(inputs, packet_state)
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
elif args.model == 'cnn':
outputs = edge_model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
cnt += inputs.shape[0]
corr_sum = torch.sum(preds == labels.data)
step_acc += corr_sum.double()
running_loss = loss.item() * inputs.shape[0]
total_loss += running_loss
if i % 200 == 0:
print('test [%4d] loss: %.3f' % (i, loss.item()))
# break
edge_accuracy = (step_acc / cnt).item()
edge_accuracy_list.append(edge_accuracy)
print("edge[%2d/%2d] acc: %.4f" % (edge_index, len(edges), edge_accuracy))
edge_model.to('cpu')
if cr < 4:
continue
edge_accuracy_avg = sum(edge_accuracy_list) / len(edge_accuracy_list)
torch.save(edges[0].state_dict(), os.path.join(args.weight_save_path, 'edge_%d_%.4f.pth' % (cr, edge_accuracy_avg)))
def start_train():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device:', device)
args = add_args(argparse.ArgumentParser())
# make weight folder
os.makedirs(args.weight_save_path, exist_ok=True)
# for reproductivity
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
print("Loading data...")
if args.model == 'one':
train_data_set, data_idx_map, net_data_count, test_data_set = dataset.GetCanDataset(args.n_nets, args.fold_num, args.packet_num, "./dataset/Mixed_dataset.csv", "./dataset/Mixed_dataset_1.txt")
sampler = dataset.BatchIntervalSampler(len(test_data_set), args.batch_size)
testloader = torch.utils.data.DataLoader(test_data_set, batch_size=args.batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
elif args.model == 'cnn':
train_data_set, data_idx_map, net_data_count, test_data_set = dataset.GetCanDatasetCNN(args.n_nets, args.fold_num, "./dataset/Mixed_dataset.csv", "./dataset/Mixed_dataset_CNN8.txt")
testloader = torch.utils.data.DataLoader(test_data_set, batch_size=args.batch_size,
shuffle=False, num_workers=2)
if args.model == 'one':
fed_model = model.OneNet(8, args.packet_num)
edges = [model.OneNet(8, args.packet_num) for _ in range(args.n_nets)]
elif args.model == 'cnn':
fed_model = model.CnnNet()
edges = [model.CnnNet() for _ in range(args.n_nets)]
if args.comm_type == "fedavg":
start_fedavg(fed_model, args,
train_data_set,
data_idx_map,
net_data_count,
testloader,
edges,
device)
elif args.comm_type == "fedprox":
start_fedprox(fed_model, args,
train_data_set,
data_idx_map,
testloader,
device)
elif args.comm_type == "fedtwa":
start_fedtwa(fed_model, args,
train_data_set,
data_idx_map,
net_data_count,
testloader,
edges,
device)
elif args.comm_type == "feddw":
local_test_set = copy.deepcopy(test_data_set)
# in paper, mnist train 60,000 / test 10,000 / 1,000 - 10%
# CAN train ~ 1,400,000 / test 300,000 / for speed 15,000 - 5%
local_test_idx = [idx for idx in range(0, len(local_test_set) // 20)]
local_test_set.set_idx_map(local_test_idx)
sampler = dataset.BatchIntervalSampler(len(local_test_set), args.batch_size)
local_test_loader = torch.utils.data.DataLoader(local_test_set, batch_size=args.batch_size, sampler=sampler,
shuffle=False, num_workers=2, drop_last=True)
start_feddw(fed_model, args,
train_data_set,
data_idx_map,
net_data_count,
testloader,
local_test_loader,
edges,
device)
elif args.comm_type == "edge":
start_only_edge(args,
train_data_set,
data_idx_map,
testloader,
edges,
device)
if __name__ == "__main__":
start_train()
```
#### File: Jihunn-Kim/khu_capstone_1/model.py
```python
import torch.nn as nn
import torch
import const
import densenet
STATE_DIM = 8 * 32
class OneNet(nn.Module):
def __init__(self, input_dim, packet_num):
super(OneNet, self).__init__()
IN_DIM = input_dim * packet_num # byte
FEATURE_DIM = 32
# transform the given packet into a tensor which is in a good feature space
self.feature_layer = nn.Sequential(
nn.Linear(IN_DIM, 32),
nn.ReLU(),
nn.Linear(32, FEATURE_DIM),
nn.ReLU()
)
# generates the current state 's'
self.f = nn.Sequential(
nn.Linear(STATE_DIM + FEATURE_DIM, STATE_DIM),
nn.ReLU(),
nn.Linear(STATE_DIM, STATE_DIM),
nn.ReLU()
)
# check whether the given packet is malicious
self.g = nn.Sequential(
nn.Linear(STATE_DIM + FEATURE_DIM, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 2),
)
def forward(self, x, s):
x = self.feature_layer(x)
x = torch.cat((x, s), 1)
s2 = self.f(x)
x2 = self.g(x)
return x2, s2
class CnnNet(nn.Module):
def __init__(self):
super(CnnNet, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 2, 3, padding=1),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(2, 4, 3, padding=1),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(4, 8, 3, padding=1),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc4 = nn.Linear(8, 2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = torch.flatten(x, 1)
x = self.fc4(x)
return x
class DenseNet(nn.Module):
def __init__(self):
super(Net, self).__init__()
cnn_model = densenet.DenseNet(num_classes=2)
self.features = nn.Sequential(
cnn_model
)
def forward(self, x):
x = self.features(x)
return x
```
#### File: khu_capstone_1/quantization/model.py
```python
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.quantization import QuantStub, DeQuantStub
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 8, 3),
nn.ReLU(True),
)
self.conv2 = nn.Sequential(
nn.Conv2d(8, 8, 3),
nn.ReLU(True),
)
self.conv3 = nn.Sequential(
nn.Conv2d(8, 8, 3),
nn.ReLU(True),
)
self.fc4 = nn.Linear(8 * 23 * 23, 2)
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = torch.flatten(x, 1)
x = self.fc4(x)
x = self.dequant(x)
return x
def fuse_model(self):
for m in self.modules():
if type(m) == nn.Sequential:
torch.quantization.fuse_modules(m, ['0', '1'], inplace=True)
```
#### File: khu_capstone_1/quantization/tensorRT_test.py
```python
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
import torch
import pycuda.autoinit
import dataset
import model
import time
# print(dir(trt))
tensorrt_file_name = 'bert.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
context = engine.create_execution_context()
# class HostDeviceMem(object):
# def __init__(self, host_mem, device_mem):
# self.host = host_mem
# self.device = device_mem
# def __str__(self):
# return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
# def __repr__(self):
# return self.__str__()
# inputs, outputs, bindings, stream = [], [], [], []
# for binding in engine:
# size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
# dtype = trt.nptype(engine.get_binding_dtype(binding))
# host_mem = cuda.pagelocked_empty(size, dtype)
# device_mem = cuda.mem_alloc(host_mem.nbytes)
# bindings.append(int(device_mem))
# if engine.binding_is_input(binding):
# inputs.append( HostDeviceMem(host_mem, device_mem) )
# else:
# outputs.append(HostDeviceMem(host_mem, device_mem))
# input_ids = np.ones([1, 1, 29, 29])
# numpy_array_input = [input_ids]
# hosts = [input.host for input in inputs]
# trt_types = [trt.int32]
# for numpy_array, host, trt_types in zip(numpy_array_input, hosts, trt_types):
# numpy_array = np.asarray(numpy_array).ravel()
# np.copyto(host, numpy_array)
# def do_inference(context, bindings, inputs, outputs, stream):
# [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# stream.synchronize()
# return [out.host for out in outputs]
# trt_outputs = do_inference(
# context=context,
# bindings=bindings,
# inputs=inputs,
# outputs=outputs,
# stream=stream)
def infer(context, input_img, output_size, batch_size):
# Load engine
# engine = context.get_engine()
# assert(engine.get_nb_bindings() == 2)
# Convert input data to float32
input_img = input_img.astype(np.float32)
# Create host buffer to receive data
output = np.empty(output_size, dtype = np.float32)
# Allocate device memory
d_input = cuda.mem_alloc(batch_size * input_img.size * input_img.dtype.itemsize)
d_output = cuda.mem_alloc(batch_size * output.size * output.dtype.itemsize)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Transfer input data to device
cuda.memcpy_htod_async(d_input, input_img, stream)
# Execute model
context.execute_async(batch_size, bindings, stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# Synchronize threads
stream.synchronize()
# Return predictions
return output
# kwargs = {"./dataset/DoS_dataset.csv" : './DoS_dataset.txt'}
# train_data_set, data_idx_map, net_class_count, net_data_count, test_data_set = dataset.GetCanDatasetUsingTxtKwarg(100, 0, **kwargs)
# testloader = torch.utils.data.DataLoader(test_data_set, batch_size=256,
# shuffle=False, num_workers=2)
check_time = time.time()
cnt = 0
temp = np.ones([256, 1, 29, 29])
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
trt_outputs = infer(context, temp, (256, 2), 256)
print(trt_outputs.shape)
# print(trt_outputs)
# print(np.argmax(trt_outputs, axis=0))
# cnt += 1
# if cnt == 100:
# break
print(time.time() - check_time)
tensorrt_file_name = 'bert_int.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
context = engine.create_execution_context()
check_time = time.time()
cnt = 0
temp = np.ones([256, 1, 29, 29])
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
trt_outputs = infer(context, temp, (256, 2), 256)
print(trt_outputs.shape)
# print(trt_outputs)
# print(np.argmax(trt_outputs, axis=0))
# cnt += 1
# if cnt == 100:
# break
print(time.time() - check_time)
test_model = model.Net().cuda()
check_time = time.time()
cnt = 0
temp = torch.randn(256, 1, 29, 29).cuda()
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
# inputs = inputs.float().cuda()
normal_outputs = test_model(temp)
# print(normal_outputs)
print(normal_outputs.shape)
cnt += 1
if cnt == 100:
break
print(time.time() - check_time)
import tensorrt as trt
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import time
model_path = "bert.onnx"
input_size = 32
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# def build_engine(model_path):
# with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
# builder.max_workspace_size = 1<<20
# builder.max_batch_size = 1
# with open(model_path, "rb") as f:
# parser.parse(f.read())
# engine = builder.build_cuda_engine(network)
# return engine
def alloc_buf(engine):
# host cpu mem
h_in_size = trt.volume(engine.get_binding_shape(0))
h_out_size = trt.volume(engine.get_binding_shape(1))
h_in_dtype = trt.nptype(engine.get_binding_dtype(0))
h_out_dtype = trt.nptype(engine.get_binding_dtype(1))
in_cpu = cuda.pagelocked_empty(h_in_size, h_in_dtype)
out_cpu = cuda.pagelocked_empty(h_out_size, h_out_dtype)
# allocate gpu mem
in_gpu = cuda.mem_alloc(in_cpu.nbytes)
out_gpu = cuda.mem_alloc(out_cpu.nbytes)
stream = cuda.Stream()
return in_cpu, out_cpu, in_gpu, out_gpu, stream
def inference(engine, context, inputs, out_cpu, in_gpu, out_gpu, stream):
# async version
# with engine.create_execution_context() as context: # cost time to initialize
# cuda.memcpy_htod_async(in_gpu, inputs, stream)
# context.execute_async(1, [int(in_gpu), int(out_gpu)], stream.handle, None)
# cuda.memcpy_dtoh_async(out_cpu, out_gpu, stream)
# stream.synchronize()
# sync version
cuda.memcpy_htod(in_gpu, inputs)
context.execute(1, [int(in_gpu), int(out_gpu)])
cuda.memcpy_dtoh(out_cpu, out_gpu)
return out_cpu
if __name__ == "__main__":
inputs = np.random.random((1, 1, 29, 29)).astype(np.float32)
tensorrt_file_name = '/content/drive/My Drive/capstone1/CAN/bert.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
# engine = build_engine(model_path)
context = engine.create_execution_context()
for _ in range(10):
t1 = time.time()
in_cpu, out_cpu, in_gpu, out_gpu, stream = alloc_buf(engine)
res = inference(engine, context, inputs.reshape(-1), out_cpu, in_gpu, out_gpu, stream)
print(res)
print("cost time: ", time.time()-t1)
``` |
{
"source": "jihunroh/ProjectEuler-Python",
"score": 3
} |
#### File: ProjectEuler-Python/ProjectEulerCommons/Factors.py
```python
from math import sqrt
from ProjectEulerCommons.Base import prod
from ProjectEulerCommons.PrimeNumbers import generate_prime_below
def factorize(n):
FactorsList = [1, n]
for divisor in [a for a in range(2, int(sqrt(n)) + 1)]:
if n % divisor == 0:
FactorsList.append(divisor)
if (int(n / divisor) != divisor):
FactorsList.append(int(n / divisor))
FactorsList.sort()
return FactorsList
def prime_factorize(n):
prime_factor_dict = {}
while n % 2 == 0:
try:
prime_factor_dict[2] += 1
except KeyError:
prime_factor_dict[2] = 1
n = n / 2
divisor = 3
while not n == 1:
if n % divisor == 0:
try:
prime_factor_dict[divisor] += 1
except KeyError:
prime_factor_dict[divisor] = 1
n = n / divisor
else:
divisor += 2
return prime_factor_dict
def get_LCM(number_list):
factors, result = {}, 1
prime_factors_dict_list = [prime_factorize(x) for x in number_list]
for prime_factors_dict in prime_factors_dict_list:
for factor in prime_factors_dict.keys():
factors[factor] = max([prime_factors_dict2.get(factor, 0) for prime_factors_dict2 in prime_factors_dict_list])
for factor, power in factors.items():
result *= factor**power
return result
def get_GCD(numberlist):
return int(prod(numberlist) / get_LCM(numberlist))
```
#### File: ProjectEuler-Python/ProjectEulerCommons/PalindromicNumbers.py
```python
def is_palindromic(n):
n = str(n)
length = len(n)
for i in range(0, length):
if not n[i] == n[length - i - 1]:
return False
return True
```
#### File: jihunroh/ProjectEuler-Python/ProjectEuler.Problem.018.py
```python
from ProjectEulerCommons.Base import *
triangle = [list(map(int, line.split(' '))) + [0] * (15 - 1 - i) for i, line in enumerate(
"""75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23""".splitlines())]
def reduce_to_local_maximum_path(triangle):
if len(triangle) == 1:
return triangle[0][0]
else:
return reduce_to_local_maximum_path(triangle[0:len(triangle) - 2] + [[step + max(triangle[-1][i:i+2]) for i, step in enumerate(triangle[-2]) if step is not 0]])
Answer(
reduce_to_local_maximum_path(triangle)
)
"""
------------------------------------------------
ProjectEuler.Problem.018.py
The Answer is: 1074
Time Elasped: 0.005049943923950195sec
------------------------------------------------
"""
```
#### File: jihunroh/ProjectEuler-Python/ProjectEuler.Problem.028.py
```python
from ProjectEulerCommons.Base import *
def leftup(n): # 1, 7, 21, 43
return 4 * n ** 2 - 6 * n + 3
def leftdown(n): # 1, 5, 17, 37
return 4 * n ** 2 - 8 * n + 5
def rightup(n): # 1, 9, 25, 49
return 4 * n ** 2 - 4 * n + 1
def rightdown(n): # 1, 3, 13, 31
return 4 * n ** 2 - 10 * n + 7
Answer(
1 + sum([sum([leftup(i), leftdown(i), rightup(i), rightdown(i)]) for i in range(2, int(1001 / 2) + 2)])
)
"""
------------------------------------------------
ProjectEuler.Problem.028.py
The Answer is: 669171001
Time Elasped: 0.0069446563720703125sec
------------------------------------------------
"""
```
#### File: jihunroh/ProjectEuler-Python/ProjectEuler.Problem.037.py
```python
from ProjectEulerCommons.Base import *
from ProjectEulerCommons.PrimeNumbers import generate_prime, is_prime
def is_truncatable_prime(n):
if n in [2, 3, 5, 7]:
return False
numberlist = [int(str(n)[i:]) for i in range(1, len(str(n)))] + [int(str(n)[:j]) for j in range(1, len(str(n)))]
for n in numberlist:
if not is_prime(n):
return False
return True
Answer(
sum([num for num in islice(filter(is_truncatable_prime, generate_prime()), 11)])
)
"""
------------------------------------------------
ProjectEuler.Problem.037.py
The Answer is: 748317
Time Elasped: 7.705394268035889sec
------------------------------------------------
"""
```
#### File: jihunroh/ProjectEuler-Python/ProjectEuler.Problem.055.py
```python
from ProjectEulerCommons.Base import *
from ProjectEulerCommons.PalindromicNumbers import is_palindromic
def is_Lychrel_number(n):
num_processed = n
for count_process in range(1, 50):
num_processed = num_processed + int(''.join(reversed(str(num_processed))))
if is_palindromic(num_processed):
return False
return True
Answer(
quantify(range(10000 + 1), is_Lychrel_number)
)
"""
------------------------------------------------
ProjectEuler.Problem.055.py
The Answer is: 249
Time Elasped: 0.10667896270751953sec
------------------------------------------------
"""
```
#### File: jihunroh/ProjectEuler-Python/ProjectEuler.Problem.067.py
```python
from ProjectEulerCommons.Base import *
with open('ProjectEuler.Problem.067.triangle.txt', 'r') as f:
triangle = [list(map(int, line.split(' '))) + [0] * (15 - 1 - i) for i, line in enumerate([line.replace('\n', '') for line in f.readlines()])]
def reduce_to_local_maximum_path(triangle):
if len(triangle) == 1:
return triangle[0][0]
else:
return reduce_to_local_maximum_path(triangle[0:len(triangle) - 2] + [[step + max(triangle[-1][i:i+2]) for i, step in enumerate(triangle[-2]) if step is not 0]])
Answer(
reduce_to_local_maximum_path(triangle)
)
"""
------------------------------------------------
ProjectEuler.Problem.067.py
The Answer is: 7273
Time Elasped: 0.009425640106201172sec
------------------------------------------------
"""
```
#### File: jihunroh/ProjectEuler-Python/ProjectEuler.Problem.089.py
```python
from ProjectEulerCommons.Base import *
def shorten_roman(roman):
shorten_dic = {
'VIIII': 'IX', #9
'LXXXX': 'XC', #90
'DCCCC': 'CM', #900
'IIII' : 'IV', #4
'XXXX' : 'XL', #40
'CCCC' : 'CD' #400
}
for key, value in shorten_dic.items():
roman = roman.replace(key, value)
return roman
with open('ProjectEuler.Problem.089.roman.txt', 'r') as f:
roman_num_list = [line.replace('\n', '') for line in f.readlines()]
Answer(
sum(map(len, roman_num_list)) - sum(map(len, map(shorten_roman, roman_num_list)))
)
"""
------------------------------------------------
ProjectEuler.Problem.089.py
The Answer is: 743
Time Elasped: 0.014960527420043945sec
------------------------------------------------
"""
``` |
{
"source": "jihwahn1018/ovirt-engine",
"score": 2
} |
#### File: pythonlib/ovirt_engine/java.py
```python
import gettext
import os
import subprocess
from . import base
from . import config
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine')
class Java(base.Base):
def __init__(self, component=None):
super(Java, self).__init__()
self._component = component if component else 'engine'
def getJavaHome(self):
p = subprocess.Popen(
args=(
os.path.join(
config.ENGINE_USR,
'bin',
'java-home',
),
'--component=%s' % self._component,
),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8', 'replace').splitlines()
stderr = stderr.decode('utf-8', 'replace').splitlines()
if p.returncode != 0:
raise RuntimeError(
_(
'Cannot get JAVA_HOME{error} make sure supported '
'JRE is installed'
).format(
error='(%s)' % stderr if stderr else '',
)
)
javaHome = stdout[0]
self.logger.debug('JAVA_HOME: %s', javaHome)
return javaHome
# vim: expandtab tabstop=4 shiftwidth=4
```
#### File: ovirt_engine_setup/engine/vdcoption.py
```python
import gettext
from otopi import util
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class VdcOption():
def __init__(
self,
statement,
):
self._statement = statement
def getVdcOptionVersions(
self,
name,
type=str,
ownConnection=False,
):
result = self._statement.execute(
statement="""
select version, option_value
from vdc_options
where option_name = %(name)s
""",
args=dict(
name=name,
),
ownConnection=ownConnection,
)
if len(result) == 0:
raise RuntimeError(
_('Cannot locate application option {name}').format(
name=name,
)
)
return dict([
(
r['version'],
(
r['option_value']
if type != bool
else r['option_value'].lower() not in ('false', '0')
)
) for r in result
])
def getVdcOption(
self,
name,
version='general',
type=str,
ownConnection=False,
):
return self.getVdcOptionVersions(
name=name,
type=type,
ownConnection=ownConnection,
)[version]
def updateVdcOptions(
self,
options,
ownConnection=False,
):
for option in options:
name = option['name']
value = option['value']
version = option.get('version', 'general')
if option.get('encrypt', False):
# AFAICT there aren't anymore users of this function that
# ask to encrypt. The only ones I know of were:
# AdminPassword - the engine admin password
# LocalAdminPassword - for Windows Guests admin password
# in plugins/ovirt-engine-setup/ovirt-engine/config/options.py
# Both removed 5 years ago.
raise RuntimeError(_(
'encrypting vdc options is not supported'
))
if isinstance(value, bool):
value = 'true' if value else 'false'
res = self._statement.execute(
statement="""
select count(*) as count
from vdc_options
where
option_name=%(name)s and
version=%(version)s
""",
args=dict(
name=name,
version=version,
),
ownConnection=ownConnection,
)
if res[0]['count'] == 0:
self._statement.execute(
statement="""
select fn_db_add_config_value (
%(name)s,
%(value)s,
%(version)s
)
""",
args=dict(
name=name,
version=version,
value=value,
),
ownConnection=ownConnection,
)
else:
self._statement.execute(
statement="""
select fn_db_update_config_value (
%(name)s,
%(value)s,
%(version)s
)
""",
args=dict(
name=name,
version=version,
value=value,
),
ownConnection=ownConnection,
)
# vim: expandtab tabstop=4 shiftwidth=4
```
#### File: cinderlib/core/misc.py
```python
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.cinderlib import constants as oclcons
@util.export
class Plugin(plugin.PluginBase):
"""Misc plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_BOOT,
before=(
osetupcons.Stages.SECRETS_FILTERED_FROM_SETUP_ATTRS_MODULES,
),
)
def _boot(self):
self.environment[
osetupcons.CoreEnv.SETUP_ATTRS_MODULES
].append(
oclcons,
)
# vim: expandtab tabstop=4 shiftwidth=4
```
#### File: ovirt-engine/db/dbmsupgrade.py
```python
import gettext
from otopi import constants as otopicons
from otopi import plugin
from otopi import util
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import database
from ovirt_engine_setup.engine_common import postgres
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Local Postgres upgrade plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._upgrade_approved = False
self._upgrade_approved_inplace = False
self._upgrade_approved_cleanupold = False
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
condition=lambda self: (
self.environment[
oenginecons.CoreEnv.ENABLE
] and self.environment[
oenginecons.EngineDBEnv.NEED_DBMSUPGRADE
]
),
name=oengcommcons.Stages.DB_CUST_UPGRADEDBMS_ENGINE,
before=(
oengcommcons.Stages.DIALOG_TITLES_E_DATABASE,
),
after=(
oengcommcons.Stages.DB_CONNECTION_CUSTOMIZATION,
oengcommcons.Stages.DIALOG_TITLES_S_DATABASE,
),
)
def _customization(self):
dbovirtutils = database.OvirtUtils(
plugin=self,
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
)
(
self._upgrade_approved,
self._upgrade_approved_inplace,
self._upgrade_approved_cleanupold
) = dbovirtutils.DBMSUpgradeCustomizationHelper('engine')
@plugin.event(
stage=plugin.Stages.STAGE_EARLY_MISC,
condition=lambda self: self._upgrade_approved,
name=oengcommcons.Stages.DB_UPGRADEDBMS_ENGINE,
)
def _updateDBMS(self):
self.logger.info(_('Upgrading PostgreSQL'))
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
postgres.DBMSUpgradeTransaction(
parent=self,
inplace=self._upgrade_approved_inplace,
cleanupold=self._upgrade_approved_cleanupold,
upgrade_from=self.environment[
oengcommcons.ProvisioningEnv.OLD_POSTGRES_SERVICE
],
)
)
# vim: expandtab tabstop=4 shiftwidth=4
```
#### File: ovirt-engine/system/selinux.py
```python
import gettext
import re
from os import listdir
from os.path import isfile
from os.path import join
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
SELinux configuration plugin.
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = True
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment[osetupcons.SystemEnv.SELINUX_CONTEXTS] = []
self.environment[osetupcons.SystemEnv.SELINUX_RESTORE_PATHS] = []
self.environment[osetupcons.SystemEnv.SELINUX_BOOLEANS] = []
self.environment[osetupcons.SystemEnv.SELINUX_PORTS] = []
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
condition=lambda self: self._enabled,
)
def _setup(self):
self.command.detect('selinuxenabled')
self.command.detect('semanage')
self.command.detect('semodule')
self.command.detect('restorecon')
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self._enabled,
)
def _validation(self):
if (
self.environment[osetupcons.CoreEnv.DEVELOPER_MODE] or
self.command.get('selinuxenabled', optional=True) is None
):
self._enabled = False
else:
rc, stdout, stderr = self.execute(
(
self.command.get('selinuxenabled'),
),
raiseOnError=False,
)
self._enabled = rc == 0
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: self._enabled,
name=osetupcons.Stages.SETUP_SELINUX,
priority=plugin.Stages.PRIORITY_LOW,
)
def _misc(self):
selinux_dir = oenginecons.FileLocations.ANSIBLE_RUNNER_SERVICE_SELINUX
for f in listdir(selinux_dir):
file_path = join(selinux_dir, f)
if isfile(file_path):
self.logger.info(
_(
'Install selinux module {}'.format(file_path)
)
)
rc, stdout, stderr = self.execute(
(
self.command.get('semodule'),
'-i', file_path
)
)
if rc != 0:
self.logger.info(
_('Failed to apply SELINUX file {f}'.format(f=f))
)
for entry in self.environment[osetupcons.SystemEnv.SELINUX_PORTS]:
rc, stdout, stderr = self.execute(
(self.command.get('semanage'), 'port', '-l')
)
if not any(
re.match(
'{t}.*{p}'.format(t=entry['type'], p=entry['port']),
line
) for line in stdout
):
rc, stdout, stderr = self.execute(
(
self.command.get('semanage'),
'port',
'-a',
'-t', entry['type'],
'-p', entry['protocol'],
entry['port']
),
)
for entry in self.environment[osetupcons.SystemEnv.SELINUX_CONTEXTS]:
rc, stdout, stderr = self.execute(
(self.command.get('semanage'), 'fcontext', '-C', '-l')
)
if not any(
re.match(
'{p}.*{t}'.format(p=entry['pattern'], t=entry['type']),
line
) for line in stdout
):
rc, stdout, stderr = self.execute(
(
self.command.get('semanage'),
'fcontext',
'-a',
'-t', entry['type'],
entry['pattern']
)
)
for path in self.environment[
osetupcons.SystemEnv.SELINUX_RESTORE_PATHS
]:
rc, stdout, stderr = self.execute(
(
self.command.get('restorecon'),
'-r',
path
)
)
if rc != 0:
self.logger.error(
_('Failed to refresh SELINUX context for {path}').format(
path=path
)
)
for entry in self.environment[osetupcons.SystemEnv.SELINUX_BOOLEANS]:
rc, stdout, stderr = self.execute(
(
self.command.get('semanage'),
'boolean',
'--modify',
'--{state}'.format(state=entry['state']),
entry['boolean']
)
)
if rc != 0:
self.logger.error(
_(
'Failed to modify selinux boolean {boolean}, please '
'make sure it is set to {state}.'
).format(
boolean=entry['boolean'],
state=entry['state'],
)
)
# vim: expandtab tabstop=4 shiftwidth=4
``` |
{
"source": "jihwan1ua/CMPUT404_LAB9",
"score": 2
} |
#### File: iguana/client/views.py
```python
from django.shortcuts import render
from django.template import RequestContext
def example_view(request):
context = RequestContext(request)
return render(request, 'client/index.html', context)
``` |
{
"source": "Jihwankim818/NumbPie6-9",
"score": 3
} |
#### File: NumbPie6-9/floodsystem/analysis.py
```python
import numpy as np
from matplotlib.dates import date2num
def polyfit(dates, levels, p):
x = date2num(dates)
d0 = x[0]
z = x - d0
p_coeff = np.polyfit(z,levels,p)
poly = np.poly1d(p_coeff)
return poly, d0
```
#### File: NumbPie6-9/floodsystem/plot.py
```python
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import numpy as np
from .analysis import polyfit
from matplotlib.dates import date2num
def plot_water_levels(station, dates, levels):
plt.plot(dates, levels)
plt.plot(dates, [station.typical_range[0]]*len(dates))
plt.plot(dates, [station.typical_range[1]]*len(dates))
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45)
plt.title(station.name)
plt.tight_layout()
plt.show()
def plot_water_level_with_fit(station, dates, levels, p):
poly, d0 = polyfit(dates, levels, p)
hi = date2num(dates)
plt.plot(dates, levels, '-')
plt.plot(dates, [station.typical_range[0]]*len(dates))
plt.plot(dates, [station.typical_range[1]]*len(dates))
plt.plot(dates, poly(hi-d0))
plt.ylabel('water level (m)')
plt.xticks(rotation=45)
plt.title(station.name)
plt.tight_layout()
plt.show()
```
#### File: Jihwankim818/NumbPie6-9/Task1B.py
```python
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_by_distance
def run():
"""Requirements for Task 1B"""
#define Stations
stations = build_station_list()
#Generate list
x = stations_by_distance(stations, (52.2053, 0.1218))
#Print closest 10 and furthest 10
print(x[:10])
print("------------")
print(x[-10:])
if __name__ == "__main__":
print("*** Task 1B: CUED Part IA Flood Warning System ***")
run()
```
#### File: Jihwankim818/NumbPie6-9/test_1C.py
```python
from os import stat
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_within_radius
stations = build_station_list()
def test_stations_within_radius():
x = stations_within_radius(stations, (52.2053, 0.1218), 10)
assert len(x) == 11
``` |
{
"source": "Jihwan-Kimm/Autoware_On_Embedded",
"score": 2
} |
#### File: topic_tools/test/test_relay_stealth.py
```python
import rospy
import unittest
from std_msgs.msg import String
class TestRelayStealth(unittest.TestCase):
def out_callback(self, msg):
self.out_msg_count += 1
def monitor_callback(self, msg):
self.monitor_msg_count += 1
def test_stealth_relay(self):
self.out_msg_count = 0
self.monitor_msg_count = 0
sub_out = rospy.Subscriber("/relay_stealth/output", String,
self.out_callback, queue_size=1)
for i in range(5):
if sub_out.get_num_connections() == 0:
rospy.sleep(1)
self.assertTrue(sub_out.get_num_connections() > 0)
rospy.sleep(5)
self.assertEqual(self.out_msg_count, 0)
sub_monitor = rospy.Subscriber("/original_topic/relay", String,
self.monitor_callback, queue_size=1)
rospy.sleep(5)
self.assertGreater(self.monitor_msg_count, 0)
self.assertGreater(self.out_msg_count, 0)
cnt = self.out_msg_count
sub_monitor.unregister()
rospy.sleep(3)
self.assertLess(abs(cnt - self.out_msg_count), 30)
if __name__ == '__main__':
import rostest
rospy.init_node("test_relay_stealth")
rostest.rosrun("topic_tools", "test_relay_stealth", TestRelayStealth)
```
#### File: svl/lgsvl/utils.py
```python
from .geometry import Vector, Transform
import math
import inspect
def accepts(*types):
def check_accepts(f):
assert len(types) + 1 == f.__code__.co_argcount
def new_f(*args, **kwargs):
names = inspect.getfullargspec(f)[0]
it = zip(args[1:], types, names[1:])
for (a, t, n) in it:
if not isinstance(a, t):
raise TypeError("Argument '{}' should have '{}' type".format(n, t))
return f(*args, **kwargs)
new_f.__name__ = f.__name__
return new_f
return check_accepts
class ObjectState:
def __init__(self, transform=None, velocity=None, angular_velocity=None):
if transform is None:
transform = Transform()
if velocity is None:
velocity = Vector()
if angular_velocity is None:
angular_velocity = Vector()
self.transform = transform
self.velocity = velocity
self.angular_velocity = angular_velocity
@property
def position(self):
return self.transform.position
@property
def rotation(self):
return self.transform.rotation
@property
def speed(self):
return math.sqrt(
self.velocity.x * self.velocity.x
+ self.velocity.y * self.velocity.y
+ self.velocity.z * self.velocity.z
)
@staticmethod
def from_json(j):
return ObjectState(
Transform.from_json(j["transform"]),
Vector.from_json(j["velocity"]),
Vector.from_json(j["angular_velocity"]),
)
def to_json(self):
return {
"transform": self.transform.to_json(),
"velocity": self.velocity.to_json(),
"angular_velocity": self.angular_velocity.to_json(),
}
def __repr__(self):
return str(
{
"transform": str(self.transform),
"velocity": str(self.velocity),
"angular_velocity": str(self.angular_velocity),
}
)
def transform_to_matrix(tr):
px = tr.position.x
py = tr.position.y
pz = tr.position.z
ax = tr.rotation.x * math.pi / 180.0
ay = tr.rotation.y * math.pi / 180.0
az = tr.rotation.z * math.pi / 180.0
sx, cx = math.sin(ax), math.cos(ax)
sy, cy = math.sin(ay), math.cos(ay)
sz, cz = math.sin(az), math.cos(az)
# Unity uses left-handed coordinate system, Rz * Rx * Ry order
return [
[sx * sy * sz + cy * cz, cx * sz, sx * cy * sz - sy * cz, 0.0],
[sx * sy * cz - cy * sz, cx * cz, sy * sz + sx * cy * cz, 0.0],
[cx * sy, -sx, cx * cy, 0.0],
[px, py, pz, 1.0],
]
def transform_to_forward(tr):
ax = tr.rotation.x * math.pi / 180.0
sx, cx = math.sin(ax), math.cos(ax)
ay = tr.rotation.y * math.pi / 180.0
sy, cy = math.sin(ay), math.cos(ay)
return Vector(cx * sy, -sx, cx * cy)
def transform_to_up(tr):
ax = tr.rotation.x * math.pi / 180.0
ay = tr.rotation.y * math.pi / 180.0
az = tr.rotation.z * math.pi / 180.0
sx, cx = math.sin(ax), math.cos(ax)
sy, cy = math.sin(ay), math.cos(ay)
sz, cz = math.sin(az), math.cos(az)
return Vector(sx * sy * cz - cy * sz, cx * cz, sy * sz + sx * cy * cz)
def transform_to_right(tr):
ax = tr.rotation.x * math.pi / 180.0
ay = tr.rotation.y * math.pi / 180.0
az = tr.rotation.z * math.pi / 180.0
sx, cx = math.sin(ax), math.cos(ax)
sy, cy = math.sin(ay), math.cos(ay)
sz, cz = math.sin(az), math.cos(az)
return Vector(sx * sy * sz + cy * cz, cx * sz, sx * cy * sz - sy * cz)
def vector_dot(a, b):
return a.x * b.x + a.y * b.y + a.z * b.z
# this works only with transformation matrices (no scaling, no projection)
def matrix_inverse(m):
x = Vector(m[0][0], m[0][1], m[0][2])
y = Vector(m[1][0], m[1][1], m[1][2])
z = Vector(m[2][0], m[2][1], m[2][2])
v = Vector(m[3][0], m[3][1], m[3][2])
a = -vector_dot(v, x)
b = -vector_dot(v, y)
c = -vector_dot(v, z)
return [
[x.x, y.x, z.x, 0.0],
[x.y, y.y, z.y, 0.0],
[x.z, y.z, z.z, 0.0],
[a, b, c, 1.0],
]
def matrix_multiply(a, b):
r = [[0, 0, 0, 0] for t in range(4)]
for i in range(4):
for j in range(4):
for k in range(4):
r[i][j] += a[i][k] * b[k][j]
return r
def vector_multiply(v, m):
tmp = [None] * 3
for i in range(3):
tmp[i] = v.x * m[0][i] + v.y * m[1][i] + v.z * m[2][i] + m[3][i]
return Vector(tmp[0], tmp[1], tmp[2])
``` |
{
"source": "jihwanK/personal_projects",
"score": 3
} |
#### File: non-overlapping/streaming/real_learning.py
```python
import psycopg2
# offline learning
# with open('output.tsv', 'r') as ostream:
# output = ostream.readlines()
# for line in output:
# tracklet_id, cctv_id, timestamp, grid_list, avg_size_grid, std_size_grid, avg_speed_grid, std_speed_grid = line.split()
def push_raw_to_entry_exit_log(fname):
with open(fname, 'r') as ostream:
lines = ostream.readlines()
prev_tracklet_id = -99
prev_speeds_avg = [0,0]
for line in lines:
tracklet_id, cctv_id, timestamp, grid_list, avg_size_by_grid, std_size_by_grid, avg_speed_by_grid, std_speed_by_grid = line.split()
# if int(cctv_id) % 2 == 0:
# cctv_id = 19216800000200
# else:
# cctv_id = 19216800000201
passed_grids = grid_list.split(',')
speeds_avg = avg_speed_by_grid.split(',')
sizes_avg = avg_size_by_grid.split(',')
sql = '''
INSERT INTO EXIT_ENTRY_LOG (timestamp, cctv_id, tracklet_id, grid_id, speed, size, exit_entry)
VALUES
(%(timestamp)s, %(cctv_id)s, %(tracklet_id)s, %(grid_id)s, %(speed)s, %(size)s, %(exit_entry)s)'''
if prev_tracklet_id != tracklet_id:
######################
# for the last frame #
######################
if int(prev_tracklet_id) > 0:
# if the speed from the parser comes out to be -1
# we will choose the second to last one
if float(prev_speeds_avg[-1]) < 0 and len(prev_speeds_avg) == 1:
exit_speed = prev_prev_speeds_avg[-1]
elif float(prev_speeds_avg[-1]) < 0 and len(prev_speeds_avg) > 1:
exit_speed = prev_speeds_avg[len(prev_speeds_avg)-2]
elif float(prev_speeds_avg[-1]) < 0 and prev_prev_tracklet_id != prev_tracklet_id:
exit_speed = prev_speeds_avg[len(prev_speeds_avg)-2]
else:
exit_speed = prev_speeds_avg[-1]
exit_timestamp = int(prev_timestamp) + 500
exit_grid = prev_passed_grids[-1]
# exit_speed = prev_speeds_avg[-1]
exit_size = prev_sizes_avg[-1]
exit_data = {
'timestamp': exit_timestamp,
'cctv_id': prev_cctv_id,
'tracklet_id': prev_tracklet_id,
'grid_id': exit_grid,
'speed': exit_speed,
'size': exit_size,
'exit_entry': 'exit',
}
cur.execute(sql, exit_data)
#######################
# for the first frame #
#######################
entry_timestamp = timestamp
entry_grid = passed_grids[0]
entry_speed = speeds_avg[0]
entry_size = sizes_avg[0]
entry_data = {
'timestamp': entry_timestamp,
'cctv_id': cctv_id,
'tracklet_id': tracklet_id,
'grid_id': entry_grid,
'speed': entry_speed,
'size': entry_size,
'exit_entry': 'entry',
}
cur.execute(sql, entry_data)
prev_prev_speeds_avg = prev_speeds_avg
prev_prev_tracklet_id = prev_tracklet_id
prev_tracklet_id = tracklet_id
prev_cctv_id = cctv_id
prev_timestamp = timestamp
prev_passed_grids = passed_grids
prev_speeds_avg = speeds_avg
prev_sizes_avg = sizes_avg
conn.commit()
# need to convert push_to_link() into [one single SQL]
def push_to_link_sql():
# join the table on cctv_id which is linking; can get this info from LINK_INIT
linked_cctv_info_sql = 'SELECT * FROM LINK_INIT'
cur.execute(linked_cctv_info_sql)
linked_cctv_infos = cur.fetchall()
for linked_cctv_info in linked_cctv_infos:
sql = '''
INSERT INTO LINK (log_a_id, log_b_id, timegap)
SELECT cctv_a.log_id, cctv_b.log_id, abs(cctv_a.timestamp-cctv_b.timestamp)
FROM EXIT_ENTRY_LOG AS cctv_a
INNER JOIN EXIT_ENTRY_LOG AS cctv_b ON (cctv_a.cctv_id=%(cctv_a_id)s AND cctv_b.cctv_id=%(cctv_b_id)s)
WHERE
(abs(cctv_a.timestamp-cctv_b.timestamp) <= 5500) AND
(
(cctv_a.exit_entry='entry' AND cctv_b.exit_entry='exit' AND cctv_a.timestamp > cctv_b.timestamp) OR
(cctv_b.exit_entry='entry' AND cctv_a.exit_entry='exit' AND cctv_a.timestamp < cctv_b.timestamp)
)
'''
data = {
'cctv_a_id': linked_cctv_info[0],
'cctv_b_id': linked_cctv_info[1],
}
cur.execute(sql, data)
conn.commit()
def push_to_link():
fetch_exit_log = 'SELECT * FROM EXIT_ENTRY_LOG WHERE exit_entry=\'exit\' ORDER BY timestamp'
cur.execute(fetch_exit_log)
exits = cur.fetchall() # returns list of tuples
fetch_entry_log = 'SELECT * FROM EXIT_ENTRY_LOG WHERE exit_entry=\'entry\' ORDER BY timestamp'
cur.execute(fetch_entry_log)
entries = cur.fetchall()
for exit_line in exits:
connected_cctv_info = 'SELECT * FROM LINK_INIT WHERE cctv_a_id=%(cctv)s OR cctv_b_id=%(cctv)s'
cctv = {
'cctv': exit_line[2],
}
cur.execute(connected_cctv_info, cctv)
link_inits = cur.fetchall()
for link_init in link_inits:
# exit_line[2] -> cctv_id
if link_init[0] == exit_line[2]:
# check link_init[0] (cctv_a) whether it's within time boundary (link_init[2], link_init[3])
# link_init[2] -> lower_boundary
# link_init[3] -> upper_boundary
for entry_line in entries:
if (link_init[1] == entry_line[2]) and (link_init[2] <= (entry_line[1] - exit_line[1]) <= link_init[3]):
# valid link
insert_sql = 'INSERT INTO LINK VALUES (%(log_a_id)s, %(log_b_id)s, %(timegap)s)'
if entry_line[0] < exit_line[0]:
insert_data = {
'timegap': entry_line[1] - exit_line[1],
'log_a_id': entry_line[0],
'log_b_id': exit_line[0],
}
else:
insert_data = {
'timegap': entry_line[1] - exit_line[1],
'log_a_id': exit_line[0],
'log_b_id': entry_line[0],
}
cur.execute(insert_sql, insert_data)
elif link_init[1] == exit_line[2]:
# check link_init[1] (cctv_b) whether it's within time boundary (link_init[2], link_init[3])
for entry_line in entries:
if (link_init[0] == entry_line[2]) and (link_init[2] <= (entry_line[1] - exit_line[1]) <= link_init[3]):
# valid link
insert_sql = 'INSERT INTO LINK VALUES (%(log_a_id)s, %(log_b_id)s, %(timegap)s)'
if entry_line[0] < exit_line[0]:
insert_data = {
'timegap': entry_line[1] - exit_line[1],
'log_a_id': entry_line[0],
'log_b_id': exit_line[0],
}
else:
insert_data = {
'timegap': entry_line[1] - exit_line[1],
'log_a_id': exit_line[0],
'log_b_id': entry_line[0],
}
cur.execute(insert_sql, insert_data)
conn.commit()
def tryit():
sql = '''
INSERT INTO grid_mapping_table (cctv_a_id, grid_a_id, cctv_b_id, grid_b_id,
time_lower_bound, time_upper_bound, timegap_avg, timegap_std, count,
speed_variation_rate, size_variation_rate)
SELECT *
FROM
(SELECT s1.cctv_a_id, s1.grid_a_id, s1.cctv_b_id, s1.grid_b_id,
(s1.timegap_avg-s1.timegap_std), (s1.timegap_avg+s1.timegap_std), s1.timegap_avg, s1.timegap_std, cnt,
GREATEST(0, s3.speed_avg/s2.speed_avg), s3.size_avg/s2.size_avg
FROM GRID_INFO AS s2
INNER JOIN
(SELECT e1.cctv_id AS cctv_a_id, e1.grid_id AS grid_a_id, e2.cctv_id AS cctv_b_id, e2.grid_id AS grid_b_id,
avg(abs(e1.timestamp-e2.timestamp)) AS timegap_avg, COALESCE(stddev(abs(e1.timestamp-e2.timestamp)), 0) AS timegap_std, count(*) AS cnt
FROM ((LINK
INNER JOIN EXIT_ENTRY_LOG AS e1 ON LINK.log_a_id = e1.log_id)
INNER JOIN EXIT_ENTRY_LOG AS e2 ON LINK.log_b_id = e2.log_id)
GROUP BY e1.cctv_id, e1.grid_id, e2.cctv_id, e2.grid_id) AS s1
ON s2.cctv_id=s1.cctv_a_id and s2.grid_id=s1.grid_a_id
INNER JOIN GRID_INFO AS s3 ON s3.cctv_id=s1.cctv_b_id and s3.grid_id=s1.grid_b_id) AS r
'''
cur.execute(sql)
conn.commit()
def push_link_info():
link_info_sql = '''
SELECT e1.cctv_id, e1.grid_id, e2.cctv_id, e2.grid_id,
avg(abs(e1.timestamp-e2.timestamp)), stddev(abs(e1.timestamp-e2.timestamp)), count(*)
FROM ((LINK
INNER JOIN EXIT_ENTRY_LOG AS e1 ON LINK.log_a_id = e1.log_id)
INNER JOIN EXIT_ENTRY_LOG AS e2 ON LINK.log_b_id = e2.log_id)
GROUP BY e1.cctv_id, e1.grid_id, e2.cctv_id, e2.grid_id'''
cur.execute(link_info_sql)
infos = cur.fetchall()
for info in infos:
push_sql = '''
INSERT INTO grid_mapping_table (cctv_a_id, grid_a_id, cctv_b_id, grid_b_id,
time_lower_bound, time_upper_bound, timegap_avg, timegap_std, count,
speed_variation_rate, size_variation_rate)
VALUES (%(cctv_a_id)s, %(grid_a_id)s, %(cctv_b_id)s, %(grid_b_id)s,
%(time_lower_bound)s, %(time_upper_bound)s, %(timegap_avg)s, %(timegap_std)s, %(count)s,
%(speed_variation_rate)s, %(size_variation_rate)s)'''
if info[5] == 0:
std = 500
elif info[5] is None:
std = 500
else:
std = info[5]
grid_info = 'SELECT size_avg, speed_avg FROM GRID_INFO WHERE cctv_id=%(cctv_id)s and grid_id=%(grid_id)s'
data_ca = {
'cctv_id': info[0],
'grid_id': info[1],
}
data_cb = {
'cctv_id': info[2],
'grid_id': info[3],
}
cur.execute(grid_info, data_ca)
avg_ca = cur.fetchone()
cur.execute(grid_info, data_cb)
avg_cb = cur.fetchone()
size_rate = avg_cb[0] / avg_ca[0]
# if the value of the speed is -1, set the speed_rate to -1 to make sure it is anomaly
if avg_ca[1] == -1 or avg_cb[1] == -1:
speed_rate = -1
else:
speed_rate = avg_cb[1] / avg_ca[1]
push_data = {
'cctv_a_id': info[0],
'grid_a_id': info[1],
'cctv_b_id': info[2],
'grid_b_id': info[3],
'time_lower_bound': float(info[4]) - 2*float(std),
'time_upper_bound': float(info[4]) + 2*float(std),
'timegap_avg': info[4],
'timegap_std': std,
'count': info[6],
'speed_variation_rate': speed_rate,
'size_variation_rate': size_rate,
}
cur.execute(push_sql, push_data)
conn.commit()
def push_grid_info():
# only for valid link
grid_info_sql = '''
INSERT INTO GRID_INFO (cctv_id, grid_id, size_avg, size_std, speed_avg, speed_std, count)
SELECT e.cctv_id, e.grid_id, avg(e.size), COALESCE(stddev(e.size), 0), avg(e.speed), COALESCE(stddev(e.speed), 0), count(*)
FROM LINK AS l INNER JOIN EXIT_ENTRY_LOG AS e ON l.log_a_id = e.log_id OR l.log_b_id = e.log_id
GROUP BY e.cctv_id, e.grid_id'''
cur.execute(grid_info_sql)
conn.commit()
def delete_all_table():
sql = '''
DELETE FROM EXIT_ENTRY_LOG;
ALTER SEQUENCE exit_entry_log_log_id_seq RESTART ;
DELETE FROM grid_mapping_table;
ALTER SEQUENCE link_info_link_info_id_seq RESTART ;
DELETE FROM GRID_INFO;
ALTER SEQUENCE grid_info_grid_info_id_seq RESTART ;
DELETE FROM LINK;'''
cur.execute(sql)
conn.commit()
def start():
global conn
global cur
conn = psycopg2.connect(host="localhost", database="mct_streaming", user="mct", password="password")
cur = conn.cursor()
def finish():
cur.close()
conn.close()
start()
delete_all_table()
push_raw_to_entry_exit_log('../logs/final/first_cctv1.tsv')
push_raw_to_entry_exit_log('../logs/final/first_cctv2.tsv')
# push_to_link()
push_to_link_sql()
push_grid_info()
push_link_info()
# tryit()
finish()
```
#### File: non-overlapping/streaming/real_online_streaming.py
```python
import psycopg2
##############
# log schema #
##############
# [0]: timestamp
# [1]: cctv_id
# [2]: tracklet
# [3]: grid
# [4]: size
# [5]: speed
def start():
global conn
global cur
global entry_log_buffer
global exit_log_buffer
global buffer
global candidates
global true_link
entry_log_buffer = []
exit_log_buffer = []
buffer = dict()
candidates = dict()
true_link = list()
# conn = psycopg2.connect(host="localhost", database="mct", user="mct", password="password")
conn = psycopg2.connect(host="localhost", database="mct_streaming", user="mct", password="password")
cur = conn.cursor()
def finish():
cur.close()
conn.close()
def push_the_raw_log(log, exit_entry):
sql = '''INSERT INTO exit_entry_log (timestamp, cctv_id, tracklet_id, grid_id, speed, size, exit_entry)
VALUES (%(timestamp)s, %(cctv_id)s, %(tracklet_id)s, %(grid_id)s, %(speed)s, %(size)s, %(exit_entry)s)
'''
data = {
'timestamp': log[0],
'cctv_id': log[1],
'tracklet_id': log[2],
'grid_id': log[3],
'speed': log[5],
'size': log[4],
'exit_entry': exit_entry,
}
cur.execute(sql, data)
conn.commit()
def get_grid_score(cctv_a, grid_a, cctv_b, grid_b):
sql = '''
SELECT T.cnt/B.cnt AS rate
FROM
(SELECT cctv_a_id, grid_a_id, sum(count) AS cnt
FROM grid_mapping_table
GROUP BY (cctv_a_id, grid_a_id)) AS B,
(SELECT cctv_a_id, grid_a_id, cctv_b_id, grid_b_id, count AS cnt
FROM grid_mapping_table
WHERE (cctv_a_id=%(cctv_a)s AND grid_a_id=%(grid_a)s AND cctv_b_id=%(cctv_b)s AND grid_b_id=%(grid_b)s)
OR (cctv_a_id=%(cctv_b)s AND grid_a_id=%(grid_b)s AND cctv_b_id=%(cctv_a)s AND grid_b_id=%(grid_a)s)) AS T
WHERE B.cctv_a_id=T.cctv_a_id AND B.grid_a_id=T.grid_a_id
'''
data = {
'cctv_a': cctv_a,
'grid_a': grid_a,
'cctv_b': cctv_b,
'grid_b': grid_b,
}
cur.execute(sql, data)
result = cur.fetchone()
if result is None:
return 0
else:
return float(result[0])
def get_the_size_and_speed_variation_rate(cctv_a, grid_a, cctv_b, grid_b):
sql = '''
SELECT speed_variation_rate, size_variation_rate FROM grid_mapping_table
WHERE (cctv_a_id=%(cctv_a)s AND grid_a_id=%(grid_a)s AND cctv_b_id=%(cctv_b)s AND grid_b_id=%(grid_b)s)
OR (cctv_a_id=%(cctv_b)s AND grid_a_id=%(grid_b)s AND cctv_b_id=%(cctv_a)s AND grid_b_id=%(grid_a)s)
'''
data = {
'cctv_a': cctv_a,
'grid_a': grid_a,
'cctv_b': cctv_b,
'grid_b': grid_b,
}
cur.execute(sql, data)
result = cur.fetchone()
if result is None:
return (0, 0)
else:
return result
def get_connected_cctv_list(cctv_id):
sql = '''
SELECT cctv_a_id AS cctv_id FROM link_init WHERE cctv_b_id=%(cctv_id)s
UNION
SELECT cctv_b_id AS cctv_id FROM link_init WHERE cctv_a_id=%(cctv_id)s
'''
data = {
'cctv_id': cctv_id,
}
cur.execute(sql, data)
results = cur.fetchall()
return results
def get_timegaps(cctv_a, grid_a, cctv_b, grid_b):
sql = '''
SELECT time_lower_bound, time_upper_bound FROM grid_mapping_table
WHERE (cctv_a_id=%(cctv_a)s AND grid_a_id=%(grid_a)s AND cctv_b_id=%(cctv_b)s AND grid_b_id=%(grid_b)s)
OR (cctv_a_id=%(cctv_b)s AND grid_a_id=%(grid_b)s AND cctv_b_id=%(cctv_a)s AND grid_b_id=%(grid_a)s)
'''
data = {
'cctv_a': cctv_a,
'grid_a': grid_a,
'cctv_b': cctv_b,
'grid_b': grid_b,
}
cur.execute(sql, data)
result = cur.fetchone()
if result is None:
return (0, 0)
else:
return result
def get_final_score(speed_rate, size_rate, grid_score, exit_log, entry_log):
# grid score
final_score = grid_score
# rate = b / a
if exit_log[1] < entry_log[1]:
a = exit_log
b = entry_log
else:
a = entry_log
b = exit_log
# size
estimated_size = a[4] * size_rate
final_score *= 1 - (abs(estimated_size - b[4]) / b[4])
# speed, for now, is not so accurate to use
# speed
# estimated_speed = a[5] * speed_rate
# final_score *= 1 - (abs(estimated_speed - b[5]) / b[5])
return final_score
def algorithm(line):
unit_time = 1_000
segmented_log = line.split('\t')
# segmented_log[0]
# 1: human
# 100: head
if segmented_log[1] == '1':
key = (segmented_log[2], segmented_log[0]) # buffer_key = (cctv_id, tracklet_id)
grids = list(map(int, segmented_log[4].split(',')))
sizes = list(map(float, segmented_log[5].split(',')))
speeds = list(map(float, segmented_log[9].split(',')))
log = ([segmented_log[3],], segmented_log[2], segmented_log[0], grids, sizes, speeds)
##########################
##########################
# INSERT THE EXIT LOG
##########################
##########################
# insert new 'exit log'
# if the value of speed is -1, because of the default setting,
# find the earlier one and check whether it's -1 and if so, do the same thing again
for buffer_log in buffer.values():
# compares the current_time (segmented_log[3]) to buffer_log timestamp
if (int(segmented_log[3]) - int(buffer_log[0][-1])) > unit_time:
buffer.pop( (buffer_log[1], buffer_log[2]) )
if buffer_log[5][-1] == -1:
if buffer_log[5][len(buffer_log[5])-2] == -1:
speed = buffer_log[5][len(buffer_log[5])-3]
else:
speed = buffer_log[5][len(buffer_log[5])-2]
else:
speed = buffer_log[5][-1]
exit_log = (int(buffer_log[0][-1])+500, int(buffer_log[1]), int(buffer_log[2]), buffer_log[3][-1], buffer_log[4][-1], speed)
exit_log_buffer.append(exit_log)
push_the_raw_log(exit_log, 'exit')
break # is neccesary?
##########################
##########################
# INSERT THE ENTRY LOG AND UPDATE THE LOG
##########################
##########################
# if the key, (cctv_id, tracklet_id), is not in the buffer's keylist
# insert new 'entry log'
if key not in buffer.keys():
entry_log = (int(segmented_log[3]), int(segmented_log[2]), int(segmented_log[0]), grids[0], sizes[0], speeds[0])
buffer[key] = log
entry_log_buffer.append(entry_log)
push_the_raw_log(entry_log, 'entry')
# unless
# update the 'existing log'
else:
timestamp, cctv_id, tracklet_id, grid_list, size_list, speed_list = buffer[key]
timestamp.extend(log[0])
grid_list.extend(log[3])
size_list.extend(log[4])
speed_list.extend(log[5])
updated_log = (timestamp, cctv_id, tracklet_id, grid_list, size_list, speed_list)
buffer[key] = updated_log
if len(exit_log_buffer) > 0:
make_candidates(exit_log_buffer, entry_log_buffer, log[0])
##########################
# LINK THE LINKABLE LOGS
##########################
def make_candidates(exit_log_buffer, entry_log_buffer, current_log):
wait_for_evaluation = 1_000
for exit_log in exit_log_buffer:
for entry_log in entry_log_buffer:
print("true_link", true_link)
print("pair", (exit_log[1], exit_log[2], entry_log[1], entry_log[2]) )
if (exit_log[1], exit_log[2], entry_log[1], entry_log[2]) in true_link:
try:
exit_log_buffer.remove(exit_log)
entry_log_buffer.remove(entry_log)
except:
pass
break
cctv_list = get_connected_cctv_list(exit_log[1])
# can make it return True or False
if (entry_log[1],) in cctv_list:
# if there's no timegap information in the mapping table for given logs, ignore them
# if the timegap is None, get_timegaps() will return (0, 0), so that nothing can be met
lower_bound, upper_bound = get_timegaps(entry_log[1], entry_log[3], exit_log[1], exit_log[3])
# print(lower_bound, upper_bound, entry_log[0]-exit_log[0])
if (lower_bound <= (entry_log[0] - exit_log[0]) <= upper_bound):
speed_rate, size_rate = get_the_size_and_speed_variation_rate(exit_log[1], exit_log[3], entry_log[1], entry_log[3])
grid_score = get_grid_score(exit_log[1], exit_log[3], entry_log[1], entry_log[3])
final_score = get_final_score(speed_rate, size_rate, grid_score, exit_log, entry_log)
# print(final_score)
if final_score == 0:
pass
elif exit_log in candidates.values():
if entry_log not in candidates[exit_log]:
candidate_value = candidates[exit_log]
candidate_value.extend( (entry_log, final_score) )
candidates[exit_log] = candidate_value
else:
candidates[exit_log] = [(entry_log, final_score),]
# candidate->entry->timestamp + 500ms
if (exit_log in candidates.keys()) and (int(current_log[-1]) > candidates[exit_log][0][0][0] + wait_for_evaluation):
evalute_candidate(candidates, exit_log)
result = (exit_log, candidates[exit_log][0], candidates[exit_log][1])
connet_tracklet((result[0], result[1]), result[2])
exit_log_buffer.remove(exit_log)
entry_log_buffer.remove(candidates[exit_log][0])
true_link.append((exit_log[1], exit_log[2], candidates[exit_log][0][1], candidates[exit_log][0][2]))
candidates.pop(exit_log)
#####################################
# evaluate the candidate dictionary #
#####################################
def evalute_candidate(candidates, candidate_key):
# for candidate_key in candidates.keys():
max_score = 0
connected_value = []
for candidate_value in candidates[candidate_key]:
if max_score < candidate_value[1]:
max_score = candidate_value[1]
connected_value = candidate_value
candidates[candidate_key] = connected_value
##########################
# update exit_entry_link #
##########################
def connet_tracklet(connectable_tracklet, score):
sql = '''
SELECT trajectory_id FROM exit_entry_link
WHERE (cctv_a_id=%(cctv_a)s AND tracklet_a_id=%(tracklet_a)s)
OR (cctv_b_id=%(cctv_b)s AND tracklet_b_id=%(tracklet_b)s)
OR (cctv_a_id=%(cctv_b)s AND tracklet_a_id=%(tracklet_b)s)
OR (cctv_b_id=%(cctv_a)s AND tracklet_b_id=%(tracklet_a)s)
'''
if connectable_tracklet[0][1] < connectable_tracklet[1][1]:
data = {
'cctv_a': connectable_tracklet[0][1],
'tracklet_a': connectable_tracklet[0][2],
'cctv_b': connectable_tracklet[1][1],
'tracklet_b': connectable_tracklet[1][2],
}
else:
data = {
'cctv_a': connectable_tracklet[1][1],
'tracklet_a': connectable_tracklet[1][2],
'cctv_b': connectable_tracklet[0][1],
'tracklet_b': connectable_tracklet[0][2],
}
cur.execute(sql, data)
result = cur.fetchone()
if result is not None:
trajectory = result
else:
max_id = 'SELECT max(trajectory_id) FROM exit_entry_link'
cur.execute(max_id)
max_trajectory_id = cur.fetchone()[0]
if max_trajectory_id is None:
trajectory = 1
else:
trajectory = max_trajectory_id + 1
sql_insert = '''
INSERT INTO exit_entry_link VALUES (
%(timestamp_a)s, %(cctv_a)s, %(grid_a)s, %(tracklet_a)s, %(speed_a)s, %(size_a)s,
%(timestamp_b)s, %(cctv_b)s, %(grid_b)s, %(tracklet_b)s, %(speed_b)s, %(size_b)s,
%(trajectory_id)s, %(score)s)
'''
if connectable_tracklet[0][1] < connectable_tracklet[1][1]:
data = {
'trajectory_id': trajectory,
'timestamp_a': connectable_tracklet[0][0],
'cctv_a': connectable_tracklet[0][1],
'tracklet_a': connectable_tracklet[0][2],
'grid_a': connectable_tracklet[0][3],
'speed_a': connectable_tracklet[0][5],
'size_a': connectable_tracklet[0][4],
'timestamp_b': connectable_tracklet[1][0],
'cctv_b': connectable_tracklet[1][1],
'tracklet_b': connectable_tracklet[1][2],
'grid_b': connectable_tracklet[1][3],
'speed_b': connectable_tracklet[1][5],
'size_b': connectable_tracklet[1][4],
'score': score,
}
else:
data = {
'trajectory_id': trajectory,
'timestamp_a': connectable_tracklet[0][0],
'cctv_a': connectable_tracklet[1][1],
'tracklet_a': connectable_tracklet[1][2],
'grid_a': connectable_tracklet[1][3],
'speed_a': connectable_tracklet[1][5],
'size_a': connectable_tracklet[1][4],
'timestamp_b': connectable_tracklet[1][0],
'cctv_b': connectable_tracklet[0][1],
'tracklet_b': connectable_tracklet[0][2],
'grid_b': connectable_tracklet[0][3],
'speed_b': connectable_tracklet[0][5],
'size_b': connectable_tracklet[0][4],
'score': score,
}
cur.execute(sql_insert, data)
conn.commit()
def main():
start()
with open('../logs/final/learning_output.txt') as r:
lines = r.readlines()
for line in lines:
algorithm(line)
finish()
main()
```
#### File: problem_set/2017/cards.py
```python
def main():
numCases = int(input(""))
# Process each case.
for loop in range(numCases):
# Get the current stack and store a reverse look up.
toks = input("").split()
n = int(toks[0])
nums = [0]*n
rev = [0]*(n+1)
for i in range(1,n+1):
nums[i-1] = int(toks[i])
rev[nums[i-1]] = i;
# Set up my bit.
mybit = bit(n+1)
for i in range(1,n+1):
mybit.add(i, nums[i-1])
# Initial values.
cur = 1
left = n*(n+1)//2
res = 0
# Now, go through queries.
for i in range(1,n+1):
# Get query range.
nxt = rev[i]
low = min(cur,nxt)
high = max(cur,nxt)
# Try both directions and do the best one.
tot = mybit.totalrange(low,high-1)
other = left - tot
res += min(tot, other)
# Bookkeeping - update all necessary variables.
left -= nums[nxt-1]
mybit.add(nxt, -nums[nxt-1])
cur = nxt
# Ta da
print(res)
# Returns the binary value of the lowest 1 bit of n.
def lowestOneBit(n,val):
if n <= 0:
return -1
while (n & val) == 0:
val <<= 1
return val;
# Here is the Binary Index Tree/Fenwick Tree...
class bit:
def __init__(self, n):
self.cumfreq = []
size = 1
while size < n:
size <<= 1;
for i in range(size+1):
self.cumfreq.append(0)
def add(self, index, value):
cur = 1
while index < len(self.cumfreq):
self.cumfreq[index] += value
item = lowestOneBit(index, cur)
index += item
cur = item
def total(self, index):
ans = 0
cur = 1
while index > 0:
ans += self.cumfreq[index]
item = lowestOneBit(index, cur)
index -= item
cur = item
return ans
def totalrange(self, low, high):
return self.total(high) - self.total(low-1)
# Get it all started.
main()
```
#### File: problem_set/2017/polycake.py
```python
EPS = 1e-9
class Point:
x = 0
y = 0
def __init__(self, x, y):
self.x = x;
self.y = y;
def dot(self, p):
return self.x * p.x + self.y * p.y
def sub(self, p):
return Point(self.x - p.x, self.y - p.y)
def length(self):
return sqrt(dot(self))
def equals(self, p):
return sub(p).length() < EPS
class Polycake:
v = 0
y = 0
points = []
def __init__(self):
numCases = int(input())
for t in range(0, numCases):
self.v, self.y = map(int, input().split())
del self.points[:]
for _ in range(0, self.v):
i, j = map(int, input().split())
self.points.append(Point(i, j))
self.runCase()
def runCase(self):
lower = []
upper = []
for i in range(0, self.v):
a = self.points[i]
b = self.points[(i + 1) % self.v]
if a.y < self.y and b.y < self.y:
lower.append(a)
elif a.y > self.y and b.y > self.y:
upper.append(a)
elif a.y < self.y:
lower.append(a)
t = (self.y - a.y) / (b.y - a.y)
newX = a.x + t * (b.x - a.x)
lower.append(Point(newX, self.y))
upper.append(Point(newX, self.y))
else:
upper.append(a)
t = (self.y - a.y) / (b.y - a.y)
newX = a.x + t * (b.x - a.x)
upper.append(Point(newX, self.y))
lower.append(Point(newX, self.y))
totalLower = 0.0
for i in range(0, len(lower)):
a = lower[i]
b = lower[(i + 1) % len(lower)]
totalLower = totalLower + ((b.x - a.x) ** 2 + (b.y - a.y) ** 2) ** 0.5
totalUpper = 0.0
for i in range(0, len(upper)):
a = upper[i]
b = upper[(i + 1) % len(upper)]
totalUpper = totalUpper + ((b.x - a.x) ** 2 + (b.y - a.y) ** 2) ** 0.5
if (totalLower < totalUpper):
print("%.3f %.3f" % (totalLower, totalUpper))
else:
print("%.3f %.3f" % (totalUpper, totalLower))
Polycake()
```
#### File: dblab/sqlite/main.py
```python
import sqlite3
import sys
import time
#################################################################################################################################
# check if the student_sql includes such like "insert", "update", "delete", "drop", "alter" that can modify the database itself #
#################################################################################################################################
def check_sql(student_sql):
tokens = student_sql.lower().split(' ')
if "insert" in tokens or "update" in tokens or "delete" in tokens or "drop" in tokens or "alter" in tokens:
print("SHOULD NOT USE insert, update, delete, drop, alter")
exit(-1)
try:
start = time.time()
cur.execute(student_sql)
conn.commit()
result_time = time.time()-start
if result_time > 5:
print('************execution time in check_sql: ' + str(result_time))
else:
print('execution time in check_sql: ' + str(result_time))
except sqlite3.Error:
return -1
##############################################
# get the name of columns from the given sql #
##############################################
def analyze_sql_to_get_columns(sql):
columns = []
splited_by_from = sql.lower().split('from')[0]
splited_by_select = splited_by_from.split('select')[1]
splited_by_comma = splited_by_select.split(',')
suffix = 1
for col in splited_by_comma:
if 'as' in col.lower():
splited_by_as = col.lower().split('as')[1].strip()
if splited_by_as in columns:
splited_by_as = splited_by_as+str(suffix)
suffix = suffix+1
columns.append(splited_by_as)
else:
ele = col.lower().strip()
if ele in columns:
ele = ele+str(suffix)
suffix = suffix+1
columns.append(ele)
return columns
#########################
# get dictionary result #
#########################
def make_dict(query_result, columns):
result = {}
for j in range(len(query_result[0])):
col_data = []
for i in range(len(query_result)):
col_data.append(query_result[i][j])
if columns[0] == '*':
result[j] = col_data
else:
result[columns[j]] = col_data
return result
#########################
# get the answer result #
#########################
def get_result(sql, columns):
result = {}
# try:
# cur.execute(sql)
# except sqlite3.Error:
# return {'sqlite_error':'occurs'}
start = time.time()
cur.execute(sql)
conn.commit()
query_result = cur.fetchall()
result_time = time.time()-start
if result_time > 5:
print('************execution time in get_result: ' + str(result_time))
else:
print('execution time in get_result: ' + str(result_time))
#columns = analyze_sql_to_get_columns(sql)
result = make_dict(query_result, columns)
#conn.commit()
return result
########################################################################
# compare the results from one from instructor, the other from student #
########################################################################
def compare_results(result_instructor, result_student):
# first, check if the number of attributes is same
if len(result_instructor.keys()) != len(result_student.keys()):
print('the number of columns is different')
return False
if len(result_instructor.values()[0]) != len(result_student.values()[0]):
print('the number of values is different')
return False
# second, if it's same, compare the values
for key_instructor in result_instructor.keys():
before = len(result_instructor.keys())
for key_student in result_student.keys():
if result_instructor[key_instructor] == result_student[key_student]:
result_instructor.pop(key_instructor)
result_student.pop(key_student)
break
after = len(result_student.keys())
if before == after:
return False
return True
###############################################################
###############################################################
###############################################################
def get_columns(instructor_sql):
start = time.time()
cur.execute(instructor_sql.strip())
columns = [des[0] for des in cur.description]
conn.commit()
result_time = time.time() - start
if result_time > 5:
print('************execution time in get_columns: ' + str(result_time))
else:
print('execution time in get_columns: ' + str(result_time))
return columns
def compare_using_sql(instructor_sql, student_sql, columns):
column_list = ''
for ele in columns:
if ele == columns[0]:
column_list = ele
else:
column_list = column_list + ',' + ele
num_of_col = len(columns)
sql = '''SELECT * FROM ({instructor}) AS qwe
WHERE NOT EXISTS (
SELECT {columns} FROM ({student}) AS asd
WHERE
CASE
WHEN qwe.{col} IS NULL AND asd.{col} IS NULL
THEN 1
WHEN qwe.{col} IS NULL AND asd.{col} IS NOT NULL
THEN 0
WHEN qwe.{col} IS NOT NULL AND asd.{col} IS NULL
THEN 0
ELSE qwe.{col} = asd.{col}
END
'''.format(instructor=instructor_sql, columns=column_list, student=student_sql, col=columns[0])
for i in range(1, num_of_col):
if num_of_col != 1:
append = '''
AND
CASE
WHEN qwe.{col} IS NULL AND asd.{col} IS NULL
THEN 1
WHEN qwe.{col} IS NULL AND asd.{col} IS NOT NULL
THEN 0
WHEN qwe.{col} IS NOT NULL AND asd.{col} IS NULL
THEN 0
ELSE qwe.{col} = asd.{col}
END
'''.format(col=columns[i])
sql = sql + append
sql = sql + ');'
start = time.time()
try:
cur.execute(sql)
except sqlite3.Error:
return 2
res = cur.fetchall()
conn.commit()
result_time = time.time() - start
if result_time > 5:
print('************execution time in compare_using_sql: ' + str(result_time))
else:
print('execution time in compare_using_sql: ' + str(result_time))
if len(res) == 0:
print ('correct answer')
return 0
else:
print('wrong answer')
return 1
###############################################################
###############################################################
###############################################################
#################
# main function #
#################
def start(instructor_sql, student_sql):
# if instructor_sql == student_sql:
# print("correct")
# return 0
if check_sql(student_sql) == -1:
# error occurs
print("line 132")
print("error")
return 2
columns_from_instrucor = get_columns(instructor_sql)
columns_from_student = get_columns(student_sql)
if 'ORDER BY' in instructor_sql.upper():
result_instructor = get_result(instructor_sql, columns_from_instrucor)
result_student = get_result(student_sql, columns_from_student)
if result_student.has_key('sqlite_error'):
# error occurs
print("line 145")
print("error")
return 2
if compare_results(result_instructor, result_student) is True:
# correct
print("correct")
return 0
else:
# wrong answer
print("wrong answer")
return 1
else:
#columns = get_columns(instructor_sql)
res = compare_using_sql(instructor_sql, student_sql, columns_from_instrucor)
if res == 0:
return 0
elif res == 1:
return 1
elif res == 2:
return 2
#####################################
# to check if this module runs well #
#####################################
def check_module():
with open('input.sql', 'r') as sql:
lines = sql.readlines()
for i in range(len(lines)):
print(str(i+1)+'th:')
start_time = time.time()
start(lines[i], lines[i])
end_time = time.time()
print('it took ' + str(end_time - start_time) + ' sec')
sql.close()
conn = sqlite3.connect('example.db')
cur = conn.cursor()
if sys.argv[1] == 'check':
check_module()
else: start(sys.argv[1], sys.argv[2])
cur.close()
conn.close()
``` |
{
"source": "jihwanlee-alphago/aqt",
"score": 2
} |
#### File: jax_legacy/jax/compute_cost_utils_test.py
```python
import logging
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from aqt.jax_legacy.jax import compute_cost_utils
from aqt.jax_legacy.jax import flax_layers as aqt_flax_layers
from aqt.jax_legacy.jax import get_bounds
from aqt.jax_legacy.jax import hlo_utils
from aqt.jax_legacy.jax import quant_config
from aqt.jax_legacy.jax import quantization
from aqt.jax_legacy.jax.quantization import QuantOps
from aqt.jax_legacy.jax.quantization import QuantType
from flax import linen as nn
from jax import random
from jax._src.lax import convolution as lax_convolution
from jax._src.lax import lax
from jax.nn import initializers
import jax.numpy as jnp
import numpy as onp
FLAGS = flags.FLAGS
class ComputeCostUtilsTest(parameterized.TestCase):
def setUp(self):
super(ComputeCostUtilsTest, self).setUp()
self.rng_key = random.PRNGKey(0)
def compare_hlo_instructions(self, hlo_no_annotation, hlo_w_annotation):
"""Compares two HLO models to check if they only differ in metadata info."""
instrs_n = []
instrs_w = []
# gather instructions from both HLO models
for computation in hlo_no_annotation.computations:
for instr in computation.instructions:
instrs_n.append(instr)
for computation in hlo_w_annotation.computations:
for instr in computation.instructions:
instrs_w.append(instr)
self.assertEqual(len(instrs_n), len(instrs_w))
for i, _ in enumerate(instrs_n):
# check instructions with the opcode 'convolution'
# the metadata field for instrs_w and instrs_n should be different.
if (instrs_n[i].opcode == 'convolution' and
instrs_w[i].opcode == 'convolution'):
self.assertNotEqual(instrs_n[i].metadata, instrs_w[i].metadata)
# remove metadata op_type and op_name
instrs_n[i].metadata.op_type = ''
instrs_w[i].metadata.op_type = ''
instrs_n[i].metadata.op_name = ''
instrs_w[i].metadata.op_name = ''
# compare the rest of the instructions.
self.assertEqual(instrs_n[i], instrs_w[i])
class TestModelWith1Dense(nn.Module):
"""Test model with a single DenseAqt layer."""
@nn.compact
def __call__(self, inputs, hparams, num_classes, dtype=jnp.float32):
output = aqt_flax_layers.DenseAqt(
features=num_classes,
dtype=dtype,
train=False,
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
paxis_name='batch',
hparams=hparams,
)(inputs, padding_mask=None)
return output
class TestModelWith1Conv(nn.Module):
"""Test model with a single ConvAqt layer."""
@nn.compact
def __call__(self,
inputs,
hparams,
kernel_size,
num_filters,
strides,
dtype=jnp.float32):
output = aqt_flax_layers.ConvAqt(
features=num_filters,
kernel_size=kernel_size,
strides=strides,
use_bias=False,
dtype=dtype,
train=False,
quant_context=quant_config.QuantContext(update_bounds=False),
paxis_name='batch',
hparams=hparams)(
inputs)
return output
class TestModelWith1DynamicMatmul(nn.Module):
"""Test model with a single dynamic matmul."""
@nn.compact
def __call__(self, lhs_act, rhs_act, lhs_prec, rhs_prec):
get_bounds_hyper = get_bounds.GetBounds.Hyper(
initial_bound=10.0,
stddev_coeff=0,
absdev_coeff=0,
mix_coeff=0,
granularity=quant_config.QuantGranularity.PER_TENSOR)
lhs_act_hparams = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=get_bounds_hyper,
prec=lhs_prec,
half_shift=False)
rhs_act_hparams = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=get_bounds_hyper,
prec=rhs_prec,
half_shift=False)
lhs_get_bounds_params = get_bounds.GetBounds.Params(
update_stats=False, update_bounds=False, module_name='lhs')
rhs_get_bounds_params = get_bounds.GetBounds.Params(
update_stats=False, update_bounds=False, module_name='rhs')
output = quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
lhs_act_hparams=lhs_act_hparams,
rhs_act_hparams=rhs_act_hparams,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=QuantType.AQT,
lhs_get_bounds_params=lhs_get_bounds_params,
rhs_get_bounds_params=rhs_get_bounds_params)
return output
@parameterized.named_parameters(
# TestModelWith1Dense
dict(
testcase_name='single_dense_layer_bfloat16',
modelclass=TestModelWith1Dense,
input_shapes=[(1, 8)],
model_kwargs={
'num_classes': 2,
'hparams': aqt_flax_layers.DenseAqt.HParams(
weight_prec=None,
quant_type=QuantType.FAKE_QUANT,
quant_act=None,
weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,
weight_half_shift=False
),
},
expected_compute_cost=8 * 2 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=8 * 2 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=8 * 2 * (16),
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_dense_layer_w8_a8',
modelclass=TestModelWith1Dense,
input_shapes=[(1, 8)],
model_kwargs={
'num_classes': 2,
'hparams': aqt_flax_layers.DenseAqt.HParams(
weight_prec=8,
quant_type=QuantType.FAKE_QUANT,
quant_act=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.POSITIVE,
prec=8,
bounds=1.0,
half_shift=False,
),
weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,
weight_half_shift=False
),
},
expected_compute_cost=8 * 2 * (8 * 8),
expected_compute_cost_ratio=0.25,
expected_compute_cost_linear=8 * 2 * (8),
expected_compute_cost_ratio_linear=0.5,
expected_memory_cost=8 * 2 * (8),
expected_memory_cost_ratio=0.5,
),
# TestModelWith1Conv
dict(
testcase_name='single_conv_layer_bfloat16',
modelclass=TestModelWith1Conv,
input_shapes=[(1, 8, 8, 3)],
model_kwargs={
'kernel_size': (3, 3),
'num_filters': 16,
'strides': (1, 1),
'hparams': aqt_flax_layers.ConvAqt.HParams(
weight_prec=None,
quant_type=QuantType.FAKE_QUANT,
quant_act=None,
weight_half_shift=False,
),
},
expected_compute_cost=(3 * 3) * (8 * 8) * 3 * 16 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=(3 * 3) * (8 * 8) * 3 * 16 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=(3 * 3) * 3 * 16 * (16),
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_conv_layer_bfloat16_strided',
modelclass=TestModelWith1Conv,
input_shapes=[(1, 8, 8, 3)],
model_kwargs={
'kernel_size': (3, 3),
'num_filters': 16,
'strides': (4, 2),
'hparams': aqt_flax_layers.ConvAqt.HParams(
weight_prec=None,
quant_type=QuantType.FAKE_QUANT,
quant_act=None,
weight_half_shift=False,
),
},
expected_compute_cost=(3 * 3) * ((8 / 4) * (8 / 2)) * 3 * 16 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=(3 * 3) * ((8 / 4) * (8 / 2)) * 3 * 16 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=(3 * 3) * 3 * 16 * (16),
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_conv_layer_bfloat16_3d',
modelclass=TestModelWith1Conv,
input_shapes=[(1, 8, 8, 8, 3)],
model_kwargs={
'kernel_size': (3, 3, 3),
'num_filters': 16,
'strides': (1, 1, 1),
'hparams': aqt_flax_layers.ConvAqt.HParams(
weight_prec=None,
quant_type=QuantType.FAKE_QUANT,
quant_act=None,
weight_half_shift=False,
),
},
expected_compute_cost=(3 * 3 * 3) * (8 * 8 * 8) * 3 * 16 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=(3 * 3 * 3) * (8 * 8 * 8) * 3 * 16 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=(3 * 3 * 3) * 3 * 16 * (16),
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_conv_layer_w4_a2',
modelclass=TestModelWith1Conv,
input_shapes=[(1, 8, 8, 3)],
model_kwargs={
'kernel_size': (3, 3),
'num_filters': 16,
'strides': (1, 1),
'hparams': aqt_flax_layers.ConvAqt.HParams(
weight_prec=4,
quant_type=QuantType.FAKE_QUANT,
quant_act=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.POSITIVE,
prec=2,
bounds=1.0,
half_shift=False,
),
weight_half_shift=False,
),
},
expected_compute_cost=(3 * 3) * (8 * 8) * 3 * 16 * (4 * 2),
expected_compute_cost_ratio=0.03125,
expected_compute_cost_linear=(3 * 3) * (8 * 8) * 3 * 16 * (4),
expected_compute_cost_ratio_linear=0.25,
expected_memory_cost=(3 * 3) * 3 * 16 * (4),
expected_memory_cost_ratio=0.25,
),
# TestModelWith1DynamicMatmul
dict(
testcase_name='single_dynamic_matmul_layer_bfloat16',
modelclass=TestModelWith1DynamicMatmul,
input_shapes=[(1, 8), (8, 1)],
model_kwargs={'lhs_prec': None,
'rhs_prec': None},
expected_compute_cost=8 * (16 * 16),
expected_compute_cost_ratio=1.0,
expected_compute_cost_linear=8 * (16),
expected_compute_cost_ratio_linear=1.0,
expected_memory_cost=0,
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_dynamic_matmul_layer_l8_r8',
modelclass=TestModelWith1DynamicMatmul,
input_shapes=[(1, 8), (8, 1)],
model_kwargs={'lhs_prec': 8,
'rhs_prec': 8},
expected_compute_cost=8 * (8 * 8),
expected_compute_cost_ratio=0.25,
expected_compute_cost_linear=8 * 8,
expected_compute_cost_ratio_linear=0.5,
expected_memory_cost=0,
expected_memory_cost_ratio=1.0,
),
dict(
testcase_name='single_dynamic_matmul_layer_l8_r4',
modelclass=TestModelWith1DynamicMatmul,
input_shapes=[(1, 8), (8, 1)],
model_kwargs={'lhs_prec': 8,
'rhs_prec': 4},
expected_compute_cost=8 * (8 * 4),
expected_compute_cost_ratio=0.125,
expected_compute_cost_linear=8 * (8),
expected_compute_cost_ratio_linear=0.5,
expected_memory_cost=0,
expected_memory_cost_ratio=1.0,
),
) # pylint: disable=line-too-long
def test_estimate_simple_model_cost(
self, modelclass, input_shapes, model_kwargs, expected_compute_cost,
expected_compute_cost_ratio, expected_compute_cost_linear,
expected_compute_cost_ratio_linear, expected_memory_cost,
expected_memory_cost_ratio):
module = modelclass()
input_shapes_with_type = [(sh, jnp.float32) for sh in input_shapes]
dummy_inputs = [
jnp.ones(input_shape, dtype=dtype)
for (input_shape, dtype) in input_shapes_with_type
]
init_state = module.init(random.PRNGKey(0), *dummy_inputs, **model_kwargs)
hlo_proto = hlo_utils.load_hlo_proto_from_model(module, init_state,
input_shapes,
**model_kwargs)
compute_result = compute_cost_utils.estimate_compute_cost(hlo_proto)
memory_result = compute_cost_utils.estimate_memory_cost(hlo_proto)
logging.info('compute cost result is %s', compute_result)
logging.info('memory cost result is %s', memory_result)
self.assertEqual(compute_result['compute_cost'], expected_compute_cost)
self.assertEqual(memory_result['memory_cost'], expected_memory_cost)
self.assertEqual(compute_result['compute_cost_ratio_to_bfloat16'],
expected_compute_cost_ratio)
self.assertEqual(memory_result['memory_cost_ratio_to_bfloat16'],
expected_memory_cost_ratio)
self.assertEqual(compute_result['compute_cost_linear'],
expected_compute_cost_linear)
self.assertEqual(compute_result['compute_cost_ratio_to_bfloat16_linear'],
expected_compute_cost_ratio_linear)
@parameterized.named_parameters(
# TestModelWith1Dense
dict(
testcase_name='single_dense_layer_bfloat16_batch_size',
modelclass=TestModelWith1Dense,
input_shape_per_sample=(16,),
model_kwargs={
'num_classes':
20,
'hparams':
aqt_flax_layers.DenseAqt.HParams(
weight_prec=None,
quant_act=None,
quant_type=QuantType.FAKE_QUANT,
weight_quant_granularity=quant_config.QuantGranularity
.PER_CHANNEL,
weight_half_shift=False)
},
),
# TestModelWith1Conv
dict(
testcase_name='single_conv_layer_bfloat16_batch_size',
modelclass=TestModelWith1Conv,
input_shape_per_sample=(16, 16, 3),
model_kwargs={
'kernel_size': (3, 3),
'num_filters':
16,
'strides': (2, 2),
'hparams':
aqt_flax_layers.ConvAqt.HParams(
weight_prec=None,
quant_act=None,
quant_type=QuantType.FAKE_QUANT,
weight_half_shift=False,
)
},
),
)
def test_batch_size_has_no_effect_on_cost(self, modelclass,
input_shape_per_sample,
model_kwargs):
expected_compute_cost = None
expected_memory_cost = None
batch_size_list = [32, 64, 128, 256, 512, 1024]
module = modelclass()
# Sweep over the batch size list
for batch_size in batch_size_list:
input_shape = (batch_size,) + input_shape_per_sample
init_state = module.init(
random.PRNGKey(0), jnp.ones(input_shape, jnp.float32), **model_kwargs)
hlo_proto = hlo_utils.load_hlo_proto_from_model(module, init_state,
[input_shape],
**model_kwargs)
del init_state
compute_result = compute_cost_utils.estimate_compute_cost(hlo_proto)
memory_result = compute_cost_utils.estimate_memory_cost(hlo_proto)
# Save the first cost and compare it with the rest
if expected_compute_cost is None:
expected_compute_cost = compute_result['compute_cost']
else:
self.assertEqual(compute_result['compute_cost'], expected_compute_cost)
if expected_memory_cost is None:
expected_memory_cost = memory_result['memory_cost']
else:
self.assertEqual(memory_result['memory_cost'], expected_memory_cost)
@parameterized.named_parameters(
dict(testcase_name='quant_8bit', weight_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4),
)
def test_check_value_inside_and_outside_of_context_conv_general(
self, weight_prec):
original_op_name = 'conv_general_dilated'
# The 'name' in primitive should change in the context in 'flax_layers'
# if the context is enabled
self.assertEqual(original_op_name,
lax_convolution.conv_general_dilated_p.name)
with compute_cost_utils.ConvMetadataMonkeyPatch(
weight_prec=weight_prec, act_prec=None):
self.assertNotEqual(original_op_name,
lax_convolution.conv_general_dilated_p.name)
self.assertEqual(original_op_name,
lax_convolution.conv_general_dilated_p.name)
@parameterized.named_parameters(
dict(testcase_name='quant_8bit', weight_prec=8, acts_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4, acts_prec=4),
)
def test_annotation_only_changes_hlo_metadata_conv(self, weight_prec,
acts_prec):
FLAGS.metadata_enabled = False
quant_act = quantization.QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.SYMMETRIC,
prec=acts_prec,
bounds=1.0,
half_shift=False)
input_shape = (1, 8, 8, 3)
module_no_annotation = aqt_flax_layers.ConvAqt(
features=4,
kernel_size=(3, 3),
padding='VALID',
paxis_name='batch',
quant_context=quant_config.QuantContext(update_bounds=False),
train=False,
hparams=aqt_flax_layers.ConvAqt.HParams(
weight_prec=weight_prec,
quant_act=quant_act,
quant_type=QuantType.FAKE_QUANT,
weight_half_shift=False),
kernel_init=initializers.ones,
bias_init=initializers.ones,
dtype=jnp.float32)
init_state = module_no_annotation.init(self.rng_key,
jnp.ones(input_shape, jnp.float32))
output_no_annotation = module_no_annotation.apply(init_state,
jnp.ones(input_shape))
hlo_no_annotation = hlo_utils.load_hlo_proto_from_model(
module_no_annotation, init_state, [input_shape])
del init_state
FLAGS.metadata_enabled = True
module_w_annotation = aqt_flax_layers.ConvAqt(
features=4,
kernel_size=(3, 3),
padding='VALID',
paxis_name='batch',
quant_context=quant_config.QuantContext(update_bounds=False),
train=False,
hparams=aqt_flax_layers.ConvAqt.HParams(
weight_prec=weight_prec,
quant_act=quant_act,
quant_type=QuantType.FAKE_QUANT,
weight_half_shift=False),
kernel_init=initializers.ones,
bias_init=initializers.ones,
dtype=jnp.float32)
init_state = module_w_annotation.init(self.rng_key,
jnp.ones(input_shape, jnp.float32))
output_w_annotation = module_w_annotation.apply(init_state,
jnp.ones(input_shape))
hlo_w_annotation = hlo_utils.load_hlo_proto_from_model(
module_w_annotation, init_state, [input_shape])
del init_state
onp.testing.assert_array_equal(output_no_annotation, output_w_annotation)
self.compare_hlo_instructions(hlo_no_annotation, hlo_w_annotation)
@parameterized.named_parameters(
dict(testcase_name='quant_8bit', weight_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4),
)
def test_check_value_inside_and_outside_of_context_dot_general(
self, weight_prec):
original_op_name = 'dot_general'
# The 'name' in primitive should change in the context in 'flax_layers'
# if the context is enabled.
self.assertEqual(original_op_name, lax.dot_general_p.name)
with compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=None, rhs_prec=weight_prec, rhs_is_weight=True):
self.assertNotEqual(original_op_name, lax.dot_general_p.name)
self.assertEqual(original_op_name, lax.dot_general_p.name)
@parameterized.named_parameters(
dict(
testcase_name='quant_8bit',
weight_prec=8,
acts_prec=8,
),)
def test_annotation_only_changes_hlo_metadata_dense(self, weight_prec,
acts_prec):
FLAGS.metadata_enabled = False
quant_act = quantization.QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.SYMMETRIC,
prec=acts_prec,
bounds=1.0,
half_shift=False)
input_shape = (1, 16)
module_no_annotation = aqt_flax_layers.DenseAqt(
features=4,
use_bias=False,
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
paxis_name='batch',
train=False,
hparams=aqt_flax_layers.DenseAqt.HParams(
weight_prec=weight_prec,
quant_act=quant_act,
quant_type=QuantType.FAKE_QUANT,
weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,
weight_half_shift=False),
dtype=jnp.float32)
init_state = module_no_annotation.init(
self.rng_key, jnp.ones(input_shape, jnp.float32), padding_mask=None)
output_no_annotation = module_no_annotation.apply(
init_state, jnp.ones(input_shape), padding_mask=None)
hlo_no_annotation = hlo_utils.load_hlo_proto_from_model(
module_no_annotation, init_state, [input_shape], padding_mask=None)
del init_state
FLAGS.metadata_enabled = True
module_w_annotation = aqt_flax_layers.DenseAqt(
features=4,
use_bias=False,
paxis_name='batch',
train=False,
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
dtype=jnp.float32,
hparams=aqt_flax_layers.DenseAqt.HParams(
weight_prec=weight_prec,
quant_act=quant_act,
quant_type=QuantType.FAKE_QUANT,
weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,
weight_half_shift=False),
)
init_state = module_w_annotation.init(
self.rng_key, jnp.ones(input_shape, jnp.float32), padding_mask=None)
output_w_annotation = module_w_annotation.apply(
init_state, jnp.ones(input_shape), padding_mask=None)
hlo_w_annotation = hlo_utils.load_hlo_proto_from_model(
module_w_annotation, init_state, [input_shape], padding_mask=None)
del init_state
onp.testing.assert_array_equal(output_no_annotation, output_w_annotation)
self.compare_hlo_instructions(hlo_no_annotation, hlo_w_annotation)
if __name__ == '__main__':
FLAGS.metadata_enabled = True # Passes quantization information to HLO
absltest.main()
```
#### File: jax_legacy/jax/get_bounds_test.py
```python
from absl.testing import absltest
from absl.testing import parameterized
from aqt.jax_legacy.jax import get_bounds
from aqt.jax_legacy.jax import quant_config
from aqt.jax_legacy.jax import test_utils
import flax
from jax import random
import jax.numpy as jnp
import numpy as onp
test_utils.configure_jax()
class GetBoundsTest(parameterized.TestCase):
def setUp(self):
super(GetBoundsTest, self).setUp()
self.rng = random.PRNGKey(0)
key1, key2 = random.split(self.rng)
self.key2 = key2
self.x = random.normal(key1, (4, 3, 2))
self.x2 = jnp.ones((4, 3, 2))
self.hyperparam = get_bounds.GetBounds.Hyper(
initial_bound=6.0,
stddev_coeff=2.0,
absdev_coeff=1.5,
mix_coeff=0.7,
reset_stats=False,
granularity=quant_config.QuantGranularity.PER_CHANNEL)
def init_model(self,
update_bounds,
update_stats=True,
reset_stats=False,
use_cams=False,
granularity=quant_config.QuantGranularity.PER_TENSOR,
ema_coeff=None):
self.hyperparam = get_bounds.GetBounds.Hyper(
initial_bound=self.hyperparam.initial_bound,
stddev_coeff=self.hyperparam.stddev_coeff,
absdev_coeff=self.hyperparam.absdev_coeff,
mix_coeff=self.hyperparam.mix_coeff,
reset_stats=reset_stats,
use_cams=use_cams,
ema_coeff=ema_coeff,
granularity=granularity)
gb_bounds_params = get_bounds.GetBounds.Params(
update_bounds=update_bounds, update_stats=update_stats)
bounds_module = get_bounds.GetBounds(hyper=self.hyperparam)
init_state = bounds_module.init(
self.key2, self.x, bounds_params=gb_bounds_params)
return bounds_module, init_state, gb_bounds_params
# TODO(shivaniagrawal): parametrize test for different values of axis
@parameterized.named_parameters(
dict(testcase_name='update_bound', update_bound=True),
dict(testcase_name='do_not_update', update_bound=False),
)
def test_get_bounds_init(self, update_bound):
_, init_state, _ = self.init_model(update_bound)
init_state_stats = init_state['get_bounds']['stats']
onp.testing.assert_array_equal(init_state_stats.n, 0)
onp.testing.assert_array_equal(init_state_stats.mean, 0)
onp.testing.assert_array_equal(init_state_stats.mean_abs, 0)
onp.testing.assert_array_equal(init_state_stats.mean_sq, 0)
onp.testing.assert_array_equal(init_state['get_bounds']['bounds'], 6.)
# TODO(shivaniagrawal): more elaborate testing here as follows:
# - run with (update_stats, update_bounds) = (False, False)
# check that neither state changed
# - run with (update_stats, update_bounds) = (True, False)
# check that stats.n increased, bound unchanged
# - run with (update_stats, update_bounds) = (False, True)
# check that stats.n unchanged but bound updated.
# - run again with (update_stats, update_bounds) = (False, True)
# check that both unchanged (update_bounds is idempotent)
# - run again with (update_stats, update_bounds) = (True, True)
# check that both changed.
@parameterized.named_parameters(
dict(
testcase_name='update_bound_reset_stats',
update_bound=True,
reset_stats=True),
dict(
testcase_name='no_update_bound_reset_stats',
update_bound=False,
reset_stats=True),
dict(
testcase_name='update_bound_no_reset_stats',
update_bound=True,
reset_stats=False),
dict(
testcase_name='no_update_bound_no_reset_stats',
update_bound=False,
reset_stats=False),
)
def test_update_stats(self, update_bound, reset_stats):
model, init_state, params = self.init_model(
update_bound,
reset_stats=reset_stats,
granularity=quant_config.QuantGranularity.PER_TENSOR)
_, state_0 = model.apply(
init_state, self.x, bounds_params=params, mutable='get_bounds')
stats_0_stats = state_0['get_bounds']['stats']
if reset_stats and update_bound:
onp.testing.assert_array_equal(stats_0_stats.n, 0)
else:
onp.testing.assert_array_equal(stats_0_stats.n, 1)
_, state = model.apply(
state_0, self.x2, bounds_params=params, mutable='get_bounds')
stats = state['get_bounds']['stats']
if reset_stats and update_bound:
onp.testing.assert_array_equal(stats.n, 0)
expected_updated_mean = 0.
onp.testing.assert_array_equal(expected_updated_mean, stats.mean)
else:
onp.testing.assert_array_equal(stats.n, 2)
expected_updated_mean = 1 / 2 * (1 + (stats_0_stats.mean))
onp.testing.assert_array_equal(expected_updated_mean, stats.mean)
@parameterized.named_parameters(
dict(
testcase_name='update_bound_reset_stats',
update_bound=True,
reset_stats=True),
dict(
testcase_name='no_update_bound_reset_stats',
update_bound=False,
reset_stats=True),
dict(
testcase_name='update_bound_no_reset_stats',
update_bound=True,
reset_stats=False),
dict(
testcase_name='no_update_bound_no_reset_stats',
update_bound=False,
reset_stats=False),
)
def test_update_stats_false(self, update_bound, reset_stats):
model, init_state, params = self.init_model(
update_bound, update_stats=False, reset_stats=reset_stats)
_, state_0 = model.apply(
init_state, self.x, bounds_params=params, mutable='get_bounds')
stats_0_stats = state_0['get_bounds']['stats']
onp.testing.assert_array_equal(stats_0_stats.n, 0)
_, state = model.apply(
state_0, self.x2, bounds_params=params, mutable='get_bounds')
onp.testing.assert_array_equal(state['get_bounds']['stats'].n, 0)
expected_updated_mean = 0.
onp.testing.assert_array_equal(expected_updated_mean,
state['get_bounds']['stats'].mean)
@parameterized.named_parameters(
dict(
testcase_name='update_bounds_true',
update_stats=False,
update_bounds=True),
dict(
testcase_name='update_stats_true',
update_stats=True,
update_bounds=False),
dict(testcase_name='both_true', update_stats=True, update_bounds=True),
)
def test_update_state_with_mutable_false_context_raises_error(
self, update_stats, update_bounds):
model, init_state, _ = self.init_model(True)
with self.assertRaises(flax.errors.ModifyScopeVariableError):
model.apply(
init_state,
self.x2,
bounds_params=get_bounds.GetBounds.Params(
update_stats=update_stats, update_bounds=update_bounds),
mutable=False)
@parameterized.named_parameters(
dict(
testcase_name='update_bound_no_ucb',
update_bound=True,
use_cams=False),
dict(
testcase_name='update_bound_with_ucb',
update_bound=True,
use_cams=True),
dict(testcase_name='do_not_update', update_bound=False, use_cams=False),
)
def test_get_bounds_update_bounds(self, update_bound, use_cams=False):
model, init_state, params = self.init_model(update_bound, use_cams=use_cams)
y, state_0 = model.apply(
init_state, self.x, bounds_params=params, mutable='get_bounds')
if not update_bound:
onp.testing.assert_array_equal(state_0['get_bounds']['bounds'], 6.)
onp.testing.assert_array_equal(y, 6.)
else:
stats_0_stats = state_0['get_bounds']['stats']
if use_cams:
expected_y = onp.abs(onp.mean(
self.x)) + self.hyperparam.stddev_coeff * onp.std(self.x)
else:
expected_y = (
self.hyperparam.stddev_coeff * self.hyperparam.mix_coeff *
jnp.sqrt(stats_0_stats.mean_sq) + self.hyperparam.absdev_coeff *
(1 - self.hyperparam.mix_coeff) * stats_0_stats.mean_abs)
onp.testing.assert_array_equal(state_0['get_bounds']['bounds'], y)
onp.testing.assert_allclose(expected_y, y)
y2, state = model.apply(
state_0, self.x2, bounds_params=params, mutable='get_bounds')
onp.testing.assert_array_equal(state['get_bounds']['bounds'], y2)
@parameterized.named_parameters(
dict(testcase_name='no_ema', ema_coeff=None),
dict(testcase_name='ema_.8', ema_coeff=0.8),
dict(testcase_name='ema_.1', ema_coeff=0.1))
def test_ema_coeff(self, ema_coeff):
x1 = jnp.array(1.0)
x2 = jnp.array(-2.0)
model, state, params = self.init_model(False, ema_coeff=ema_coeff)
_, state1 = model.apply(
state, x1, bounds_params=params, mutable='get_bounds')
_, state2 = model.apply(
state1, x2, bounds_params=params, mutable='get_bounds')
stats = state2['get_bounds']['stats']
def compute_ema_two_steps(x1, x2, alpha):
initial_value = 0.0
ema_step_1 = initial_value + alpha * (x1 - initial_value)
ema_step_2 = ema_step_1 + alpha * (x2 - ema_step_1)
return ema_step_2
if ema_coeff is None:
exp_mean = (x1 + x2) / 2
exp_mean_sq = (x1**2 + x2**2) / 2
exp_mean_abs = (jnp.abs(x1) + jnp.abs(x2)) / 2
else:
exp_mean = compute_ema_two_steps(x1, x2, ema_coeff)
exp_mean_sq = compute_ema_two_steps(x1**2, x2**2, ema_coeff)
exp_mean_abs = compute_ema_two_steps(jnp.abs(x1), jnp.abs(x2), ema_coeff)
onp.testing.assert_allclose(stats.mean, exp_mean)
onp.testing.assert_allclose(stats.mean_sq, exp_mean_sq)
onp.testing.assert_allclose(stats.mean_abs, exp_mean_abs)
print(stats)
return
if __name__ == '__main__':
absltest.main()
```
#### File: jax_legacy/utils/config_schema_utils_test.py
```python
import json
from absl.testing import absltest
from absl.testing import parameterized
from aqt.jax_legacy.utils import config_schema_utils
import ml_collections
class MakeReferenceRecursiveTest(absltest.TestCase):
def test_scalar_fields(self):
config = ml_collections.ConfigDict({'parent_field': 1})
config.child_field = config_schema_utils.make_reference(
config, 'parent_field')
# 'child_field' is a reference to 'parent_field'. Changes to
# 'parent_field' propagate to 'child_field'.
self.assertEqual(config.parent_field, 1)
self.assertEqual(config.child_field, 1)
config.parent_field = 2
self.assertEqual(config.parent_field, 2)
self.assertEqual(config.child_field, 2)
# But changes to 'child_field' to NOT propagate back up to
# 'parent_field'.
config.child_field = 3
self.assertEqual(config.parent_field, 2)
self.assertEqual(config.child_field, 3)
config.parent_field = 4
self.assertEqual(config.parent_field, 4)
# Reference is broken after 'child_field' was overridden earlier.
self.assertEqual(config.child_field, 3)
def test_nested_fields(self):
config = ml_collections.ConfigDict({'parent': {'x': 1}})
config.child = config_schema_utils.make_reference(config, 'parent')
# In this case, 'config.child.x' is a reference to 'config.parent.x', but
# note that 'config.child' is NOT a reference to 'config.parent'!
self.assertEqual(config.parent.x, 1)
self.assertEqual(config.child.x, 1)
config.parent.x = 2
self.assertEqual(config.parent.x, 2)
self.assertEqual(config.child.x, 2)
config.parent = ml_collections.ConfigDict({'x': 3})
self.assertEqual(config.parent.x, 3)
# In this case, config.parent is a new Python object unrelated to the old
# config.parent. Since config.child is a reference to the old config.parent,
# it has no connection to the new config.parent.
self.assertEqual(config.child.x, 2)
# However, this works as intended since the 'update' function assigns new
# values to existing leaf nodes, preserving the reference structure between
# parent and child internal nodes. Using this syntax is recommended for
# updating many fields at once.
config = ml_collections.ConfigDict({'parent': {'x': 1, 'y': 'hello'}})
config.child = config_schema_utils.make_reference(config, 'parent')
config.parent.update({'x': 3, 'y': 'goodbye'})
self.assertEqual(config.parent.x, 3)
self.assertEqual(config.parent.y, 'goodbye')
self.assertEqual(config.child.x, 3)
self.assertEqual(config.child.y, 'goodbye')
class SetDefaultReferenceTest(absltest.TestCase):
def test_when_child_field_is_list(self):
# Test when 'field' parameter of set_default_reference is a list
# of specific fields. We expect a new reference to be created for each
# element in the list.
parent = ml_collections.ConfigDict({'x': 1, 'y': 2, 'z': 3})
child = ml_collections.ConfigDict()
config_schema_utils.set_default_reference(child, parent, ['x', 'y'])
self.assertEqual((parent.x, parent.y), (1, 2))
self.assertEqual((child.x, child.y), (1, 2))
parent.y = 5
self.assertEqual((parent.x, parent.y), (1, 5))
self.assertEqual((child.x, child.y), (1, 5))
child.y = 10
self.assertEqual((parent.x, parent.y), (1, 5))
self.assertEqual((child.x, child.y), (1, 10))
def test_reference_to_self(self):
# Test adding a new field to a configdict which is a reference to an
# existing field in the same configdict instance.
config = ml_collections.ConfigDict({'parent': 1})
config_schema_utils.set_default_reference(
config, config, 'child', parent_field='parent')
self.assertEqual(config.child, 1)
self.assertEqual(config.parent, 1)
config.parent = 5
self.assertEqual(config.parent, 5)
self.assertEqual(config.child, 5)
config.child = 10
self.assertEqual(config.parent, 5)
self.assertEqual(config.child, 10)
class BaseConfigTest(parameterized.TestCase):
@parameterized.parameters(dict(use_auto_acts=True), dict(use_auto_acts=False))
def test_precision_propagates(self, use_auto_acts):
config = config_schema_utils.get_base_config(use_auto_acts, fp_quant=False)
# Set the global precision to 4 bits.
config.prec = 4
# Set the global half_shift flag to False
config.half_shift = False
# Test that this sets the weight and activation to 4 as well.
self.assertEqual(config.weight_prec, 4)
self.assertEqual(config.quant_act.prec, 4)
# Test that this sets the weight_half_shift and act half_shift to False
self.assertEqual(config.weight_half_shift, False)
self.assertEqual(config.quant_act.half_shift, False)
# Set the global precision to None, checks whether referencing to None
# works well.
config.prec = None
# Test that this sets the weight and activation to None as well.
self.assertIsNone(config.weight_prec, None)
self.assertIsNone(config.quant_act.prec, None)
@parameterized.parameters(dict(use_auto_acts=True), dict(use_auto_acts=False))
def test_fp_precision_propagates(self, use_auto_acts):
config = config_schema_utils.get_base_config(use_auto_acts, fp_quant=True)
config.prec.is_scaled = False
# Set the global precision to 4 bits.
config.prec.fp_spec.update({'exp_min': -3, 'exp_max': 5, 'sig_bits': 2})
expected_prec_dict = {
'is_scaled': False,
'fp_spec': {
'exp_min': -3,
'exp_max': 5,
'sig_bits': 2
}
}
# Test that this sets the weight and activation to 4 as well.
self.assertEqual(config.weight_prec.to_dict(), expected_prec_dict)
self.assertEqual(config.quant_act.prec.to_dict(), expected_prec_dict)
def test_auto_acts_parameter(self):
# If use_auto_acts is False, then the bounds should be a single scalar that
# specifies the fixed bound; 'None' by default.
config = config_schema_utils.get_base_config(
use_auto_acts=False, fp_quant=False)
self.assertIsNone(config.quant_act.bounds)
# If use_auto_acts is True, it should have the same structure as the
# GetBounds.Hyper dataclass.
config = config_schema_utils.get_base_config(
use_auto_acts=True, fp_quant=False)
self.assertIn('initial_bound', config.quant_act.bounds)
# Because the config dict is locked, it shouldn't be possible to change it
# back to fixed bounds if it was created with use_auto_acts=True.
with self.assertRaises(TypeError):
config.quant_act.bounds = 1.0
@parameterized.parameters(
dict(use_auto_acts=True, fp_quant=False),
dict(use_auto_acts=False, fp_quant=False),
dict(use_auto_acts=False, fp_quant=True))
def test_schema_matches_expected(self, use_auto_acts, fp_quant):
# This tests that the schema of the configdict returned by 'base_config',
# once all references are resolved, matches an expected schema. 'Schema'
# here means the names and structure of fields at each level of the
# configuration hierarchy. A value of 'None' in the expected schemas defined
# below indicates a real configuration would have a concrete scalar value
# there.
if fp_quant:
prec = {
'fp_spec': {
'exp_min': None,
'exp_max': None,
'sig_bits': None,
},
'is_scaled': None,
}
else:
prec = None
if use_auto_acts:
quant_act_schema = {
'bounds': {
'initial_bound': None,
'stddev_coeff': None,
'absdev_coeff': None,
'mix_coeff': None,
'reset_stats': None,
'ema_coeff': None,
'use_cams': None,
'exclude_zeros': None,
'use_mean_of_max': None,
'granularity': None
},
'input_distribution': None,
'prec': prec,
'half_shift': None,
}
else:
quant_act_schema = {
'bounds': None,
'input_distribution': None,
'prec': prec,
'half_shift': None,
}
expected_top_level_schema = {
'metadata': {
'description': None,
'hyper_str': None
},
'weight_decay': None,
'activation_bound_update_freq': None,
'activation_bound_start_step': None,
'prec': prec,
'half_shift': None,
'weight_prec': prec,
'weight_half_shift': None,
'quant_type': None,
'quant_act': quant_act_schema,
'weight_quant_granularity': None,
}
config = config_schema_utils.get_base_config(
use_auto_acts=use_auto_acts, fp_quant=fp_quant)
# This round-trip conversion from JSON forces all references to resolve to
# concrete values.
config_reified = json.loads(config.to_json())
# This test is not interested in checking the specific values of fields in
# the configuration, but only that the schema of the hierarchies
# are the same. Thus we all set the value of leaf nodes in the config to
# 'None' before checking that the actual and expected configuration
# structures are the same.
def set_leaves_to_none(config):
# We are at an intermediate node in the tree-structured input, which could
# either be in the form of a dictionary or a list of other nodes in the
# tree.
if isinstance(config, dict):
return {key: set_leaves_to_none(value) for key, value in config.items()}
elif isinstance(config, list):
return [set_leaves_to_none(value) for value in config]
# We are at a leaf node in the tree-structured input.
else:
return None
self.assertSameStructure(
set_leaves_to_none(config_reified), expected_top_level_schema)
if __name__ == '__main__':
absltest.main()
``` |
{
"source": "jihwanp/HOTR_-",
"score": 2
} |
#### File: data/evaluators/hico_eval.py
```python
import numpy as np
from collections import defaultdict
class HICOEvaluator():
def __init__(self, preds, gts, rare_triplets, non_rare_triplets, correct_mat):
self.overlap_iou = 0.5
self.max_hois = 100
self.rare_triplets = rare_triplets
self.non_rare_triplets = non_rare_triplets
self.fp = defaultdict(list)
self.tp = defaultdict(list)
self.score = defaultdict(list)
self.sum_gts = defaultdict(lambda: 0)
self.gt_triplets = []
self.preds = []
for img_preds in preds:
img_preds = {k: v.to('cpu').numpy() for k, v in img_preds.items() if k != 'hoi_recognition_time'}
bboxes = [{'bbox': bbox, 'category_id': label} for bbox, label in zip(img_preds['boxes'], img_preds['labels'])]
hoi_scores = img_preds['verb_scores']
verb_labels = np.tile(np.arange(hoi_scores.shape[1]), (hoi_scores.shape[0], 1))
subject_ids = np.tile(img_preds['sub_ids'], (hoi_scores.shape[1], 1)).T
object_ids = np.tile(img_preds['obj_ids'], (hoi_scores.shape[1], 1)).T
hoi_scores = hoi_scores.ravel()
verb_labels = verb_labels.ravel()
subject_ids = subject_ids.ravel()
object_ids = object_ids.ravel()
if len(subject_ids) > 0:
object_labels = np.array([bboxes[object_id]['category_id'] for object_id in object_ids])
masks = correct_mat[verb_labels, object_labels]
hoi_scores *= masks
hois = [{'subject_id': subject_id, 'object_id': object_id, 'category_id': category_id, 'score': score} for
subject_id, object_id, category_id, score in zip(subject_ids, object_ids, verb_labels, hoi_scores)]
hois.sort(key=lambda k: (k.get('score', 0)), reverse=True)
hois = hois[:self.max_hois]
else:
hois = []
self.preds.append({
'predictions': bboxes,
'hoi_prediction': hois
})
self.gts = []
for img_gts in gts:
img_gts = {k: v.to('cpu').numpy() for k, v in img_gts.items() if k != 'id'}
self.gts.append({
'annotations': [{'bbox': bbox, 'category_id': label} for bbox, label in zip(img_gts['boxes'], img_gts['labels'])],
'hoi_annotation': [{'subject_id': hoi[0], 'object_id': hoi[1], 'category_id': hoi[2]} for hoi in img_gts['hois']]
})
for hoi in self.gts[-1]['hoi_annotation']:
triplet = (self.gts[-1]['annotations'][hoi['subject_id']]['category_id'],
self.gts[-1]['annotations'][hoi['object_id']]['category_id'],
hoi['category_id'])
if triplet not in self.gt_triplets:
self.gt_triplets.append(triplet)
self.sum_gts[triplet] += 1
def evaluate(self):
for img_id, (img_preds, img_gts) in enumerate(zip(self.preds, self.gts)):
print(f"Evaluating Score Matrix... : [{(img_id+1):>4}/{len(self.gts):<4}]" ,flush=True, end="\r")
pred_bboxes = img_preds['predictions']
gt_bboxes = img_gts['annotations']
pred_hois = img_preds['hoi_prediction']
gt_hois = img_gts['hoi_annotation']
if len(gt_bboxes) != 0:
bbox_pairs, bbox_overlaps = self.compute_iou_mat(gt_bboxes, pred_bboxes)
self.compute_fptp(pred_hois, gt_hois, bbox_pairs, pred_bboxes, bbox_overlaps)
else:
for pred_hoi in pred_hois:
triplet = [pred_bboxes[pred_hoi['subject_id']]['category_id'],
pred_bboxes[pred_hoi['object_id']]['category_id'], pred_hoi['category_id']]
if triplet not in self.gt_triplets:
continue
self.tp[triplet].append(0)
self.fp[triplet].append(1)
self.score[triplet].append(pred_hoi['score'])
print(f"[stats] Score Matrix Generation completed!! ")
map = self.compute_map()
return map
def compute_map(self):
ap = defaultdict(lambda: 0)
rare_ap = defaultdict(lambda: 0)
non_rare_ap = defaultdict(lambda: 0)
max_recall = defaultdict(lambda: 0)
for triplet in self.gt_triplets:
sum_gts = self.sum_gts[triplet]
if sum_gts == 0:
continue
tp = np.array((self.tp[triplet]))
fp = np.array((self.fp[triplet]))
if len(tp) == 0:
ap[triplet] = 0
max_recall[triplet] = 0
if triplet in self.rare_triplets:
rare_ap[triplet] = 0
elif triplet in self.non_rare_triplets:
non_rare_ap[triplet] = 0
else:
print('Warning: triplet {} is neither in rare triplets nor in non-rare triplets'.format(triplet))
continue
score = np.array(self.score[triplet])
sort_inds = np.argsort(-score)
fp = fp[sort_inds]
tp = tp[sort_inds]
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / sum_gts
prec = tp / (fp + tp)
ap[triplet] = self.voc_ap(rec, prec)
max_recall[triplet] = np.amax(rec)
if triplet in self.rare_triplets:
rare_ap[triplet] = ap[triplet]
elif triplet in self.non_rare_triplets:
non_rare_ap[triplet] = ap[triplet]
else:
print('Warning: triplet {} is neither in rare triplets nor in non-rare triplets'.format(triplet))
m_ap = np.mean(list(ap.values())) * 100 # percentage
m_ap_rare = np.mean(list(rare_ap.values())) * 100 # percentage
m_ap_non_rare = np.mean(list(non_rare_ap.values())) * 100 # percentage
m_max_recall = np.mean(list(max_recall.values()))
return {'mAP': m_ap, 'mAP rare': m_ap_rare, 'mAP non-rare': m_ap_non_rare, 'mean max recall': m_max_recall}
def voc_ap(self, rec, prec):
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
return ap
def compute_fptp(self, pred_hois, gt_hois, match_pairs, pred_bboxes, bbox_overlaps):
pos_pred_ids = match_pairs.keys()
vis_tag = np.zeros(len(gt_hois))
pred_hois.sort(key=lambda k: (k.get('score', 0)), reverse=True)
if len(pred_hois) != 0:
for pred_hoi in pred_hois:
is_match = 0
if len(match_pairs) != 0 and pred_hoi['subject_id'] in pos_pred_ids and pred_hoi['object_id'] in pos_pred_ids:
pred_sub_ids = match_pairs[pred_hoi['subject_id']]
pred_obj_ids = match_pairs[pred_hoi['object_id']]
pred_sub_overlaps = bbox_overlaps[pred_hoi['subject_id']]
pred_obj_overlaps = bbox_overlaps[pred_hoi['object_id']]
pred_category_id = pred_hoi['category_id']
max_overlap = 0
max_gt_hoi = 0
for gt_hoi in gt_hois:
if gt_hoi['subject_id'] in pred_sub_ids and gt_hoi['object_id'] in pred_obj_ids \
and pred_category_id == gt_hoi['category_id']:
is_match = 1
min_overlap_gt = min(pred_sub_overlaps[pred_sub_ids.index(gt_hoi['subject_id'])],
pred_obj_overlaps[pred_obj_ids.index(gt_hoi['object_id'])])
if min_overlap_gt > max_overlap:
max_overlap = min_overlap_gt
max_gt_hoi = gt_hoi
triplet = (pred_bboxes[pred_hoi['subject_id']]['category_id'], pred_bboxes[pred_hoi['object_id']]['category_id'],
pred_hoi['category_id'])
if triplet not in self.gt_triplets:
continue
if is_match == 1 and vis_tag[gt_hois.index(max_gt_hoi)] == 0:
self.fp[triplet].append(0)
self.tp[triplet].append(1)
vis_tag[gt_hois.index(max_gt_hoi)] =1
else:
self.fp[triplet].append(1)
self.tp[triplet].append(0)
self.score[triplet].append(pred_hoi['score'])
def compute_iou_mat(self, bbox_list1, bbox_list2):
iou_mat = np.zeros((len(bbox_list1), len(bbox_list2)))
if len(bbox_list1) == 0 or len(bbox_list2) == 0:
return {}
for i, bbox1 in enumerate(bbox_list1):
for j, bbox2 in enumerate(bbox_list2):
iou_i = self.compute_IOU(bbox1, bbox2)
iou_mat[i, j] = iou_i
iou_mat_ov=iou_mat.copy()
iou_mat[iou_mat>=self.overlap_iou] = 1
iou_mat[iou_mat<self.overlap_iou] = 0
match_pairs = np.nonzero(iou_mat)
match_pairs_dict = {}
match_pair_overlaps = {}
if iou_mat.max() > 0:
for i, pred_id in enumerate(match_pairs[1]):
if pred_id not in match_pairs_dict.keys():
match_pairs_dict[pred_id] = []
match_pair_overlaps[pred_id]=[]
match_pairs_dict[pred_id].append(match_pairs[0][i])
match_pair_overlaps[pred_id].append(iou_mat_ov[match_pairs[0][i],pred_id])
return match_pairs_dict, match_pair_overlaps
def compute_IOU(self, bbox1, bbox2):
if isinstance(bbox1['category_id'], str):
bbox1['category_id'] = int(bbox1['category_id'].replace('\n', ''))
if isinstance(bbox2['category_id'], str):
bbox2['category_id'] = int(bbox2['category_id'].replace('\n', ''))
if bbox1['category_id'] == bbox2['category_id']:
rec1 = bbox1['bbox']
rec2 = bbox2['bbox']
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]+1) * (rec1[3] - rec1[1]+1)
S_rec2 = (rec2[2] - rec2[0]+1) * (rec2[3] - rec2[1]+1)
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line+1) * (bottom_line - top_line+1)
return intersect / (sum_area - intersect)
else:
return 0
```
#### File: hotr/engine/trainer.py
```python
import math
import torch
import sys
import hotr.util.misc as utils
import hotr.util.logger as loggers
from typing import Iterable
import wandb
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, max_epoch: int, max_norm: float = 0, dataset_file: str = 'coco', log: bool = False):
model.train()
criterion.train()
metric_logger = loggers.MetricLogger(mode="train", delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
space_fmt = str(len(str(max_epoch)))
header = 'Epoch [{start_epoch: >{fill}}/{end_epoch}]'.format(start_epoch=epoch+1, end_epoch=max_epoch, fill=space_fmt)
print_freq = int(len(data_loader)/5)
print(f"\n>>> Epoch #{(epoch+1)}")
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets, log)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if utils.get_rank() == 0 and log: wandb.log(loss_dict_reduced_scaled)
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled)
if "obj_class_error" in loss_dict:
metric_logger.update(obj_class_error=loss_dict_reduced['obj_class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
```
#### File: metrics/vcoco/ap_agent.py
```python
import numpy as np
from hotr.metrics.utils import _compute_ap, compute_overlap
import pdb
class APAgent(object):
def __init__(self, act_name, iou_threshold=0.5):
self.act_name = act_name
self.iou_threshold = iou_threshold
self.fp = [np.zeros((0,))] * len(act_name)
self.tp = [np.zeros((0,))] * len(act_name)
self.score = [np.zeros((0,))] * len(act_name)
self.num_ann = [0] * len(act_name)
def add_data(self, box, act, cat, i_box, i_act):
for label in range(len(self.act_name)):
i_inds = (i_act[:, label] == 1)
self.num_ann[label] += i_inds.sum()
n_pred = box.shape[0]
if n_pred == 0 : return
######################
valid_i_inds = (i_act[:, 0] != -1) # (n_i, ) # both in COCO & V-COCO
overlaps = compute_overlap(box, i_box) # (n_pred, n_i)
assigned_input = np.argmax(overlaps, axis=1) # (n_pred, )
v_inds = valid_i_inds[assigned_input] # (n_pred, )
n_valid = v_inds.sum()
if n_valid == 0 : return
valid_box = box[v_inds]
valid_act = act[v_inds]
valid_cat = cat[v_inds]
######################
s = valid_act * np.expand_dims(valid_cat, axis=1) # (n_v, #act)
for label in range(len(self.act_name)):
inds = np.argsort(s[:, label])[::-1] # (n_v, )
self.score[label] = np.append(self.score[label], s[inds, label])
correct_i_inds = (i_act[:, label] == 1)
if correct_i_inds.sum() == 0:
self.tp[label] = np.append(self.tp[label], np.array([0]*n_valid))
self.fp[label] = np.append(self.fp[label], np.array([1]*n_valid))
continue
overlaps = compute_overlap(valid_box[inds], i_box) # (n_v, n_i)
assigned_input = np.argmax(overlaps, axis=1) # (n_v, )
max_overlap = overlaps[range(n_valid), assigned_input] # (n_v, )
iou_inds = (max_overlap > self.iou_threshold) & correct_i_inds[assigned_input] # (n_v, )
i_nonzero = iou_inds.nonzero()[0]
i_inds = assigned_input[i_nonzero]
i_iou = np.unique(i_inds, return_index=True)[1]
i_tp = i_nonzero[i_iou]
t = np.zeros(n_valid, dtype=np.uint8)
t[i_tp] = 1
f = 1-t
self.tp[label] = np.append(self.tp[label], t)
self.fp[label] = np.append(self.fp[label], f)
def evaluate(self):
average_precisions = dict()
for label in range(len(self.act_name)):
if self.num_ann[label] == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-self.score[label])
self.fp[label] = self.fp[label][indices]
self.tp[label] = self.tp[label][indices]
# compute false positives and true positives
self.fp[label] = np.cumsum(self.fp[label])
self.tp[label] = np.cumsum(self.tp[label])
# compute recall and precision
recall = self.tp[label] / self.num_ann[label]
precision = self.tp[label] / np.maximum(self.tp[label] + self.fp[label], np.finfo(np.float64).eps)
# compute average precision
average_precisions[label] = _compute_ap(recall, precision) * 100
print('\n================== AP (Agent) ===================')
s, n = 0, 0
for label in range(len(self.act_name)):
label_name = "_".join(self.act_name[label].split("_")[1:])
print('{: >23}: AP = {:0.2f} (#pos = {:d})'.format(label_name, average_precisions[label], self.num_ann[label]))
s += average_precisions[label]
n += 1
mAP = s/n
print('| mAP(agent): {:0.2f}'.format(mAP))
print('----------------------------------------------------')
return mAP
``` |
{
"source": "JihYangChen/Moview",
"score": 2
} |
#### File: moviewCrawler/items/MovieNameSetItem.py
```python
import scrapy
class MovieNameSetItem(scrapy.Item):
movieNameSet = scrapy.Field()
def __repr__(self):
return ""
```
#### File: moviewCrawler/pipelines/MovieNamePipeline.py
```python
import json
class MovieNamePipeline(object):
movieNameSet = set()
def open_spider(self, spider):
pass
def process_item(self, item, spider):
self.movieNameSet = self.movieNameSet.union(item['movieNameSet'])
return item
def close_spider(self, spider):
print("MovieNamePipeline Spider is close!")
``` |
{
"source": "jihyeongeun/airport",
"score": 3
} |
#### File: jihyeongeun/airport/image_source.py
```python
import cv2
import logging
import glob
import itertools
class ImageSource(object):
def NextFrame(self):
raise NotImplemented
class CameraSource(ImageSource):
def __init__(self):
logging.info("Opening video capture")
self.cap = cv2.VideoCapture(0)
def NextFrame(self):
_, frame = self.cap.read()
return frame
class FileSource(ImageSource):
def __init__(self, filenames):
self.frames = [cv2.imread(f) for f in glob.glob(filenames)]
assert self.frames, "No image files specifed"
self.it = itertools.cycle(self.frames)
def NextFrame(self):
return self.it.next().copy()
``` |
{
"source": "JiHyeonSEO/Osori-SelfDrivingWithGTA5",
"score": 3
} |
#### File: Osori-SelfDrivingWithGTA5/gtaTutorial/pygta5-2.py
```python
import numpy as np
from PIL import ImageGrab
import cv2
import time
def process_img(original_image):
processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
return processed_img
last_time = time.time()
while(True):
screen = np.array(ImageGrab.grab(bbox=(0, 40, 800, 640)))
new_screen = process_img(screen)
print('Loop took {} seconds' .format(time.time()-last_time))
last_time = time.time()
cv2.imshow('window', new_screen)
#cv2.imshow('window', cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
```
#### File: Osori-SelfDrivingWithGTA5/gtaTutorial/pygta5-3.py
```python
import numpy as np
from PIL import ImageGrab
import cv2
import time
from directkeys import ReleaseKey, PressKey, W, A, S, D
def process_img(original_image):
processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
return processed_img
for i in list(range(4))[::-1]:
print(i+1)
time.sleep(1)
last_time = time.time()
while(True):
screen = np.array(ImageGrab.grab(bbox=(0, 40, 800, 640)))
new_screen = process_img(screen)
## print('down')
## PressKey(W)
## time.sleep(3)
## print('up')
## PressKey(W)
print('Loop took {} seconds' .format(time.time()-last_time))
last_time = time.time()
cv2.imshow('window', new_screen)
#cv2.imshow('window', cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
``` |
{
"source": "JiHyuk-Byun/FOTS.PyTorch-1",
"score": 2
} |
#### File: JiHyuk-Byun/FOTS.PyTorch-1/eval.py
```python
import argparse
import json
import torch
import logging
import pathlib
import traceback
from pytorch_lightning import Trainer
from FOTS.model.model import FOTSModel
from FOTS.utils.bbox import Toolbox
import easydict
from FOTS.data_loader.data_module import ICDARDataModule
logging.basicConfig(level=logging.DEBUG, format='')
def load_model(model_path, with_gpu):
model = FOTSModel(config)
if config.data_loader.dataset == 'synth800k':
data_module = SynthTextDataModule(config)
else:
data_module = ICDARDataModule(config)
root_dir = str(pathlib.Path(config.trainer.save_dir).absolute() / config.name)
checkpoint_callback = ModelCheckpoint(dirpath=root_dir + '/checkpoints', period=1)
wandb_dir = pathlib.Path(root_dir) / 'wandb'
if not wandb_dir.exists():
wandb_dir.mkdir(parents=True, exist_ok=True)
wandb_logger = WandbLogger(name=config.name,
project='FOTS',
config=config,
save_dir=root_dir)
if not config.cuda:
gpus = 0
else:
gpus = config.gpus
trainer = Trainer(
logger=wandb_logger,
callbacks=[checkpoint_callback],
max_epochs=config.trainer.epochs,
default_root_dir=root_dir,
gpus=gpus,
accelerator='ddp',
benchmark=True,
sync_batchnorm=True,
precision=config.precision,
log_gpu_memory=config.trainer.log_gpu_memory,
log_every_n_steps=config.trainer.log_every_n_steps,
overfit_batches=config.trainer.overfit_batches,
weights_summary='full',
terminate_on_nan=config.trainer.terminate_on_nan,
fast_dev_run=config.trainer.fast_dev_run,
check_val_every_n_epoch=config.trainer.check_val_every_n_epoch)
trainer.fit(model=model, datamodule=data_module)
def main(args:argparse.Namespace):
model_path = args.model
input_dir = args.input_dir
output_dir = args.output_dir
with_image = True if output_dir else False
with_gpu = True if torch.cuda.is_available() else False
config = json.load(open(args.config))
#with_gpu = False
config = easydict.EasyDict(config)
model = FOTSModel.load_from_checkpoint(checkpoint_path=model_path,
map_location='cpu', config=config)
model = model.to('cuda:0')
model.eval()
for image_fn in input_dir.glob('*.jpg'):
try:
with torch.no_grad():
ploy, im = Toolbox.predict(image_fn, model, with_image, output_dir, with_gpu=True)
print(len(ploy))
except Exception as e:
traceback.print_exc()
if __name__ == '__main__':
logger = logging.getLogger()
parser = argparse.ArgumentParser(description='Model eval')
parser.add_argument('-m', '--model', default=None, type=pathlib.Path, required=True,
help='path to model')
parser.add_argument('-o', '--output_dir', default=None, type=pathlib.Path,
help='output dir for drawn images')
parser.add_argument('-i', '--input_dir', default=None, type=pathlib.Path, required=False,
help='dir for input images')
parser.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args = parser.parse_args()
main(args)
```
#### File: FOTS/data_loader/icdar_dataset.py
```python
import typing
import pathlib
import cv2
import numpy as np
from torch.utils.data import Dataset
import imgaug.augmentables.polys as ia_polys
import imgaug.augmentables.segmaps as ia_segmaps
import loguru
import torch
from .transforms import Transform
from ..utils.util import str_label_converter
from .datautils import check_and_validate_polys, normalize_iamge
from . import utils as data_utils
class ICDARDataset(Dataset):
def __init__(self, data_root,
transform: Transform = None,
scale: float = 0.25,
size: int = 640,
vis: bool = False,
training: bool = True):
data_root = pathlib.Path(data_root)
self.images_root = data_root / 'imgs'
self.gt_root = data_root / 'gt'
self.training = training
self.transform = transform
self.vis = vis
self.scale = scale
self.size = size
self.images, self.bboxs, self.transcripts = self.__loadGT()
def __loadGT(self):
all_bboxs = []
all_texts = []
all_images = []
for image in self.images_root.glob('*.jpg'):
# image = pathlib.Path('/data/ocr/det/icdar2015/detection/train/imgs/img_756.jpg')
# gt = pathlib.Path('/data/ocr/det/icdar2015/detection/train/gt/gt_img_756.txt')
gt = self.gt_root / image.with_name('gt_{}'.format(image.stem)).with_suffix('.txt').name
with gt.open(mode='r') as f:
bboxes = []
texts = []
for line in f:
text = line.strip('\ufeff').strip('\xef\xbb\xbf').strip().split(',')
x1, y1, x2, y2, x3, y3, x4, y4 = list(map(float, text[:8]))
bbox = [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
transcript = text[8]
if transcript == '###' and self.training:
continue
bboxes.append(bbox)
texts.append(transcript)
if len(bboxes) > 0:
bboxes = np.array(bboxes)
all_bboxs.append(bboxes)
all_texts.append(texts)
all_images.append(image)
return all_images, all_bboxs, all_texts
def visualize(self,
image_name: str,
image: np.ndarray,
polygons: typing.List[typing.List[float]],
score_map: np.ndarray,
training_mask: np.ndarray):
polygon_list = []
for polygon in polygons:
polygon_list.append(ia_polys.Polygon(
np.array(polygon).reshape(4, 2)
))
polygons_on_image = ia_polys.PolygonsOnImage(polygons=polygon_list, shape=image.shape)
new_image = polygons_on_image.draw_on_image(image)
cv2.imwrite(image_name + '.jpg', new_image)
score_map = ia_segmaps.SegmentationMapsOnImage(score_map.astype(dtype=np.uint8), shape=image.shape)
new_image = score_map.draw_on_image(image.astype(dtype=np.uint8))
cv2.imwrite(image_name + '_score.jpg', new_image[0])
training_mask = ia_segmaps.SegmentationMapsOnImage(training_mask.astype(dtype=np.uint8), shape=image.shape)
new_image = training_mask.draw_on_image(image.astype(dtype=np.uint8))
cv2.imwrite(image_name + '_mask.jpg', new_image[0])
def __getitem__(self, index):
try:
image_path = self.images[index]
word_b_boxes = self.bboxs[index] # num_words * 8
transcripts = self.transcripts[index]
im = cv2.imread((self.images_root / image_path).as_posix())
image_path = pathlib.Path(image_path)
num_of_words = word_b_boxes.shape[0]
text_polys = word_b_boxes
transcripts = [word for line in transcripts for word in line.split()]
if num_of_words == len(transcripts):
h, w, _ = im.shape
text_polys = check_and_validate_polys(text_polys, (h, w))
max_tries = 10
if self.transform:
while True and (max_tries != 0):
transformed_im, transformed_text_polys = self.transform(im, text_polys)
valid_text_polys = [polygon for polygon in transformed_text_polys if polygon.is_fully_within_image(image=im)]
if len(valid_text_polys) > 0:
text_polys = valid_text_polys
transcripts = [transcripts[i] for i, polygon in enumerate(text_polys) if polygon.is_fully_within_image(image=im)]
im = transformed_im
break
max_tries -= 1
if max_tries == 0:
#loguru.logger.debug('Max tries has reached.')
return self.__getitem__(np.random.randint(0, len(self)))
polys = np.stack([poly.coords for poly in text_polys])
score_map, geo_map, training_mask, rectangles, rois = data_utils.get_score_geo(im, polys,
np.ones(polys.shape[0]),
self.scale, self.size)
# predict ๅบๆฅ็feature map ๆฏ 128 * 128๏ผ ๆไปฅ gt ้่ฆๅ /4 ๆญฅ้ฟ
image = im[:, :, ::-1].astype(np.float32) # bgr -> rgb
assert len(transcripts) == len(rectangles)
if len(transcripts) == 0:
raise RuntimeError('No text found.')
if self.vis:
self.visualize(image=image,
polygons=rectangles,
score_map=score_map,
training_mask=training_mask,
image_name=image_path.stem)
transcripts = str_label_converter.encode(transcripts)
image = normalize_iamge(image)
return image_path.as_posix(), image, score_map, geo_map, training_mask, transcripts, rectangles, rois
else:
return self.__getitem__(torch.tensor(np.random.randint(0, len(self))))
except Exception as e:
raise e
# loguru.logger.warning('Something wrong with data processing. Resample.')
# return self.__getitem__(torch.tensor(np.random.randint(0, len(self))))
def __len__(self):
return len(self.images)
```
#### File: JiHyuk-Byun/FOTS.PyTorch-1/train.py
```python
import argparse
import json
from loguru import logger
import os
import pathlib
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from easydict import EasyDict
from FOTS.model.model import FOTSModel
from FOTS.model.loss import *
from FOTS.model.metric import *
from FOTS.data_loader.data_module import SynthTextDataModule, ICDARDataModule
def main(config, resume: bool):
model = FOTSModel(config)
if resume:
assert pathlib.Path(config.pretrain).exists()
resume_ckpt = config.pretrain
logger.info('Resume training from: {}'.format(config.pretrain))
else:
if config.pretrain:
assert pathlib.Path(config.pretrain).exists()
logger.info('Finetune with: {}'.format(config.pretrain))
model.load_from_checkpoint(config.pretrain, config=config, map_location='cpu')
resume_ckpt = None
else:
resume_ckpt = None
if config.data_loader.dataset == 'synth800k':
data_module = SynthTextDataModule(config)
else:
data_module = ICDARDataModule(config)
data_module.setup()
root_dir = str(pathlib.Path(config.trainer.save_dir).absolute() / config.name)
checkpoint_callback = ModelCheckpoint(dirpath=root_dir + '/checkpoints', period=1)
wandb_dir = pathlib.Path(root_dir) / 'wandb'
if not wandb_dir.exists():
wandb_dir.mkdir(parents=True, exist_ok=True)
wandb_logger = WandbLogger(name=config.name,
project='FOTS',
config=config,
save_dir=root_dir)
if not config.cuda:
gpus = 0
else:
gpus = config.gpus
trainer = Trainer(
logger=wandb_logger,
callbacks=[checkpoint_callback],
max_epochs=config.trainer.epochs,
default_root_dir=root_dir,
gpus=gpus,
accelerator='ddp',
benchmark=True,
sync_batchnorm=True,
precision=config.precision,
log_gpu_memory=config.trainer.log_gpu_memory,
log_every_n_steps=config.trainer.log_every_n_steps,
overfit_batches=config.trainer.overfit_batches,
weights_summary='full',
terminate_on_nan=config.trainer.terminate_on_nan,
fast_dev_run=config.trainer.fast_dev_run,
check_val_every_n_epoch=config.trainer.check_val_every_n_epoch,
resume_from_checkpoint=resume_ckpt)
trainer.fit(model=model, datamodule=data_module)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Template')
parser.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
parser.add_argument('-r', '--resume', action='store_true',
help='path to latest checkpoint (default: None)')
args = parser.parse_args()
config = None
if args.config is not None:
config = json.load(open(args.config))
path = os.path.join(config['trainer']['save_dir'], config['name'])
# assert not os.path.exists(path), "Path {} already exists!".format(path)
else:
if args.resume is not None:
logger.warning('Warning: --config overridden by --resume')
config = torch.load(args.resume, map_location='cpu')['config']
assert config is not None
config = EasyDict(config)
main(config, args.resume)
``` |
{
"source": "jihyunbak/exprmanager",
"score": 3
} |
#### File: exprmanager/exprmanager/exprmanager.py
```python
import os
import numpy as np
import itertools
from exprmanager import utils
class ExprManager():
''' manage the logistics between the main program
and the experiment output folder.
create a class instance for a single experiment (a folder).
'''
def __init__(self, base_dir='', expr_name='test',
varied=None, config=None):
# set up expr_subdir
self.expr_name = expr_name
self.expr_dir = os.path.join(base_dir, expr_name + '/')
self.res_dir = os.path.join(self.expr_dir, 'res/')
# os.makedirs(self.res_dir, exist_ok=True) # recursive mkdir
if varied is not None:
self.varied = varied
self.config = config or dict() # if None, empty dict
self.config['expr_name'] = expr_name
def save_varied_params(self):
for param_name, param_values in self.varied.items():
self.print_parameter_list(self.expr_dir + param_name + '.tsv', param_values)
@staticmethod
def print_parameter_list(filename, param_values, delimiter='\t'):
''' save parameter set into a tab-separted value (tsv) file
where each row has an index and a value.
params: expecting a list of values.
'''
header = ['idx', 'val']
body = [[i, v] for i, v in enumerate(param_values)]
utils.write_csv(filename, body, header=header, delimiter=delimiter)
def load_parameter_list(self, param_name, delimiter='\t'):
body, _ = utils.read_csv(self.expr_dir + param_name + '.tsv',
nheader=1,
delimiter=delimiter)
# idx = [int(row[0]) for row in body] # no need
val = [float(row[1]) for row in body]
return val
def save_config(self):
config_copy = self.treat_dict_before_export(self.config)
utils.save_json(self.expr_dir + 'config.json', config_copy)
def load_config(self):
return utils.load_json(self.expr_dir + 'config.json')
def treat_dict_before_export(self, full_dict, allow_only=None):
# TODO: change to listing types that are *not* allowed
if allow_only is None:
allow_only=(int, float, str, np.ndarray, type(None))
return utils.copy_nested_dict(full_dict,
allow_only=allow_only, replace_value='(dropped)')
def set_filename(self, idx, prefix='sol', subdir=False):
if subdir:
prefix = prefix + '/' + prefix
return utils._set_filename(idx, prefix=prefix, extension='')
def save_result(self, obj, filename, return_path=False):
''' save to a BSDF file. '''
filename = utils.ensure_extension(filename, ext='.bsdf')
res_path = os.path.join(self.res_dir, filename) # full path to file
# if `filename` includes subdirectories like 'sub/dir/file', make them
os.makedirs(os.path.dirname(res_path), exist_ok=True)
utils.save_bsdf(res_path, obj)
if return_path:
return res_path
def load_result(self, filename):
''' load and return previous result if available as a file;
if the file does not exist, return None.
'''
filename = utils.ensure_extension(filename, ext='.bsdf')
res_path = os.path.join(self.res_dir, filename) # full path to file
try:
return utils.load_bsdf(res_path) # previous result
except FileNotFoundError:
return None
def export_as_dict(self, obj, filename):
''' export a copy of the __dict__ of the given object;
in this case a model or a solver,
that stores parameter values as attributes.
'''
return export_dict(obj.__dict__)
def export_dict(self, obj, filename):
''' export a copy of the dictionary object to a file.
obj: dict (output data)
filename: string (path to output file to be generated)
'''
out_dict = self.treat_dict_before_export(obj)
out_dict['type'] = type(obj).__name__ # make a string
self.save_result(out_dict, filename)
return out_dict
def call_with_file(self, solver, data=None, idx=None,
solver_kwargs=None, save_output=True,
prefix='sol', subdir=False,
force_recalculate=False, verbose=True):
filename = self.set_filename(idx, prefix=prefix, subdir=subdir)
# check for previously saved file
if not force_recalculate:
sol = self.load_result(filename)
if sol is not None:
if verbose:
print(' - {}: loaded from file'.format(filename))
return sol
if solver is None:
raise FileNotFoundError('{}: check filename or generate.'.format(filename))
# do the computation and save results to file
if verbose:
print(' - {}: running...'.format(filename))
solver_kwargs = solver_kwargs or self.config.get('solver', dict())
sol = solver(*data, **solver_kwargs)
if save_output:
_ = self.export_dict(sol, filename) # write to a BDSF file
return sol
def load_from_export(self, filename):
# TODO: load previous exports, to create a new copy of object
pass
def varied_param_iterables(self):
''' prepare for iteration over multiple parameter loops.
return iterable lists of K-tuples, where K is the param space dim.
'''
# expand
val_aux = tuple(self.varied.values())
idx_aux = tuple([np.arange(0, len(vv)) for vv in val_aux])
idx_iter = itertools.product(*idx_aux)
val_iter = itertools.product(*val_aux)
return idx_iter, val_iter
def run_expr_loop(self, data_input, func_prep_data, func_solve):
''' common template for parameter space sweep
'''
# print parameter lists and solver configs
self.save_varied_params()
self.save_config()
# prepare (or generate) data for inference etc.
data = func_prep_data(data_input)
# iterate
idx_iter, prm_iter = self.varied_param_iterables()
for idx, prm in zip(idx_iter, prm_iter):
func_solve(idx, prm, data)
print('end of experiment.')
``` |
{
"source": "jihyunbak/gsc-scrape",
"score": 3
} |
#### File: gsc-scrape/gscscr/odor_data_coll.py
```python
from bs4 import BeautifulSoup
import csv
import re
from .utils_webscraper import simple_get
from . import utils
TARGET_URL = None
class OdorDataCollector:
''' code for web-scraping from the GSC website '''
def __init__(self, target_url=TARGET_URL, list_num=None,
out_dir='', summary_dir=None, file_prefix='', talkative=True):
# input
if target_url is None:
raise ValueError('target_url is required.')
if list_num is None:
raise ValueError('list_num is required.')
self.target_url = target_url
self.num = list_num # database list number
# output
self.set_out_dir(out_dir)
self.set_summary_dir(summary_dir)
self.file_prefix = file_prefix
self.talkative = talkative
def set_out_dir(self, out_dir):
self.out_dir = out_dir
utils.make_dir(self.out_dir)
def set_summary_dir(self, summary_dir):
if summary_dir is None:
return
self.summary_dir = summary_dir
utils.make_dir(self.summary_dir)
def set_list_dir(self):
# create molecule-level output directory
self.list_dir = self.out_dir + 'list%d/' % self.num
# --- scraping ---
def get_odor_info_from_list(self, list_only=False, test_run=False):
"""
retrieve odor information from all molecules in each list
"""
# get the list of URLs
# (always set full_list to False when using it to retrieve individual entries)
all_mol_address = self.get_list_db(full_list=False, use_existing_file=True)
if list_only:
return
if self.talkative:
print('running through all {} molecules in the list ...'.format(len(all_mol_address)))
# create DB-specific output directory
self.set_list_dir()
utils.make_dir(self.list_dir) # make when necessary
# loop over list
for idx in range(0,len(all_mol_address)):
# retrieve odor information from the specified entry
toc = self.get_odor_info_single_mol(all_mol_address, idx)
if test_run:
if self.talkative:
print('stopped by test_run option')
break # for developing
# wait for a while before making the next request (not to overload server)
utils.wait(toc*5)
def get_list_db(self, use_existing_file=True, full_list=False):
"""
retrieves product list from the GSC website
creates a csv file
"""
file_postfix = '_full' if full_list else ''
csvfilename = self._make_dbfilename(file_postfix=file_postfix)
if utils.isfile(csvfilename) and use_existing_file:
if self.talkative:
print('file already exists: ' + csvfilename)
return utils.read_csv(csvfilename)
# retrieve list from website
all_mol_address, header = self._retrieve_mol_list(additional_info=full_list)
if all_mol_address is None:
return None # could not find website
# write to a csv file
utils.write_csv(csvfilename, all_mol_address, header=header)
if self.talkative:
print('file saved to: ' + csvfilename)
return all_mol_address
def _make_dbfilename(self, file_postfix=''):
return (self.out_dir + self.file_prefix + 'list%d' + file_postfix + '.csv') % self.num
def get_odor_info_single_mol(self, all_mol_address, idx):
"""
retrieve odor-related information for a single molecule.
"""
# retrieve page from one molecule
mol_name = all_mol_address[idx][0]
if self.talkative:
print(mol_name)
mol_url = all_mol_address[idx][1]
mol_code = utils.search_string_between(mol_url, self.target_url + 'data/', '.html')
# set up output file name
outfilename = self.list_dir + 'list%d_mol%d_%s.txt' % (self.num, idx, mol_code)
if utils.isfile(outfilename):
print('list file already exists:' + outfilename)
return 0
# get website content
_, tic = utils.timer()
mol_html = simple_get(mol_url)
toc, _ = utils.timer(tic)
if self.talkative:
print('time for webpage retrieval: ' + str(toc))
# parse and find section heading "Organoleptic properties"
mol_soup = BeautifulSoup(mol_html, 'html.parser')
for sec in mol_soup.find_all(attrs={"class": "sectionclass"}):
if(sec.text[0:6]=='Organo'):
break
tab_organo = sec.find_next_sibling('table') # we want the following table
# write to file
self._write_odor_info_to_file(outfilename, tab_organo, mol_name, mol_code)
return toc
def _retrieve_mol_list(self, additional_info=True):
''' written for a specific target website format '''
# locate target webpage url
url_list = self.target_url + 'allproc-%d.html' % self.num
# retrieve webpage content
raw_html = simple_get(url_list)
if raw_html is None:
# website not found
return None, None
# parse html content
soup = BeautifulSoup(raw_html, 'html.parser')
if self.talkative:
print(soup.title.text) # check content
if additional_info:
# retrieve more information (4/17/2019)
cnt = -1 # so that first data row counts as 0
mylist = []
for row in soup.table.find_all('tr'): # the tr's
myrow1 = self._read_row_html(row)
if myrow1: # if not empty
myrow2 = [self.num, cnt] + myrow1
mylist.append(myrow2)
cnt = cnt + 1 # increment after writing!
all_mol_address = mylist[1:-1] # skip first (header) & last rows (disclaimer)
header = ['ListIdx','MolIdx','CAS','Prefix','Name','URL','MoreInfo']
return all_mol_address, header
# extract link and molecule name only (early version)
all_mol_address = [];
for link in soup.table.find_all('a'):
click_action = link.get('onclick')
mol_name = link.text
mol_url = utils.search_string_between(click_action, "openMainWindow\(\'", "\'\);")
all_mol_address.append([mol_name, mol_url])
header = ["MoleculeName", "MolURL"]
return all_mol_address, header
def _read_row_html(self, row):
'''
retreives all information from each row in list html
'''
myrow = []
for dat in row.find_all('td'):
mycol = []
if not dat.find_all('a'): # CAS number
myprefix = []
else:
myprefix = ['']
more_text = []
for item in dat.children:
# print(item)
if type(item) is type(dat): # if a tag
if item.name=='a':
link = item
click_action = link.get('onclick')
mol_name = link.text
mol_url = utils.search_string_between(click_action, "openMainWindow\(\'", "\'\);")
mycol.append(mol_name)
mycol.append(mol_url)
elif item.name=='div':
myprefix = [item.text]
else:
sn = item
sn_clean = sn.replace('\r', '').replace('\n', '') # remove newlines
more_text.append(sn_clean)
mycol.append('; '.join(more_text)) # make a single string
myrow = myrow + myprefix + mycol # concatenate
return myrow
def _write_odor_info_to_file(self, outfilename, tab_organo, mol_name, mol_code):
# -- header
ft = open(outfilename,'w')
ft.write(mol_name + '\n')
ft.write(mol_code + '\n')
ft.write('\n')
# -- odor information
for mytd in tab_organo.find_all('td'): #, class_=['qinfr2','radw5']):
if(mytd.text[0:4]=='Odor'):
if(mytd.attrs.get('class')==['demstrafrm']):
break # other-source block starts
ft.write('====================\n')
row = mytd.contents
for item in row:
if type(item) is type(mytd):
ft.write(item.get_text(strip=True, separator='\n'))
ft.write('\n')
else:
ft.write(item + '\n')
if mytd.find_all('a'):
for odortag in mytd.find_all('a'):
taglink = odortag.attrs.get('href')
result = utils.search_string_between(taglink, self.target_url + 'odor/', '.html', extract_pattern=False)
if result is None:
pass # not an odor tag
else:
ft.write('>> ' + result.group(1) + '\n')
ft.close()
if self.talkative:
print('Saved to: ' + outfilename)
# --- after scraping, local database summary ---
def get_list_summary_count(self):
_, _, cnts = self.get_list_summary()
count_mylist = [self.num, cnts['all_odors'], cnts['nonempty_odortype'], cnts['nonempty_descriptors']]
header = ['ListIdx', 'NumAllOdors', 'hasOdorType', 'hasOdorDescriptors']
return count_mylist, header
def get_list_summary(self, talkative=True):
# get list of molecules in DB
all_mol_info = self.get_list_db(full_list=True) # use full information!
# loop through files
myodorlist, header, cnts = self._merge_db_list(all_mol_info)
# write to a csv file
summaryfilename = self.summary_dir + self.file_prefix + 'out_list%d.csv' % self.num
utils.write_csv(summaryfilename, myodorlist, header=header)
if talkative:
print('file saved to: ' + summaryfilename)
cnts['all_odors'] = len(myodorlist)
return myodorlist, header, cnts
def _merge_db_list(self, all_mol_info):
self.set_list_dir()
myodorlist = []
cnt_nonempty_odortype = 0
cnt_nonempty_descriptors = 0
for idx in range(0, len(all_mol_info)):
if idx >= len(all_mol_info):
break
# retrieve molecule code
mymol = all_mol_info[idx]
if (not mymol[0]==str(self.num)) or (not mymol[1]==str(idx)):
# check indices
raise Exception('index mismatch in row idx: {}'.format(idx))
[mol_CAS, mol_prefix, mol_name, mol_url] = mymol[2:6]
mol_code = utils.search_string_between(mol_url, self.target_url + 'data/', '.html')
# set up target file name
molfilename = self.list_dir + 'list%d_mol%d_%s.txt' % (self.num, idx, mol_code)
numlines = utils.count_lines_in_file(molfilename)
if (numlines is None):
continue
# count number of molecules with non-empty odor info
mytype, mydescriptor_string = self._filter_each_mol(molfilename)
if mytype:
cnt_nonempty_odortype = cnt_nonempty_odortype + 1
if mydescriptor_string:
cnt_nonempty_descriptors = cnt_nonempty_descriptors + 1
# summarize odor information
myodorrow = [self.num, idx, mol_CAS, mol_prefix, mol_name, mytype, mydescriptor_string]
myodorlist.append(myodorrow)
header = ['ListIdx', 'MolIdx', 'CAS', 'MolPrefix', 'MolName', 'OdorType', 'OdorDescriptors']
cnts = {'nonempty_odortype': cnt_nonempty_odortype, 'nonempty_descriptors': cnt_nonempty_descriptors}
return myodorlist, header, cnts
def _filter_each_mol(self, molfilename):
'''
extracts odor descriptor words from each odor entry file
that is scraped from the web
'''
with open(molfilename,'r') as f:
mydescriptors = []
mytype = ''
for line in f:
if line[0:9]=='Odor Type':
mytypefind = re.search('Odor Type: (.*)\n', line)
mytype = mytypefind[1]
elif line[0:2]=='>>':
myword = re.search('>> (.*)\n', line)
mydescriptors.append(myword[1])
if mydescriptors: # true if not empty
mydescriptors_uniq = list(set(mydescriptors)) # re-ordered
mydescriptors_string = ';'.join(mydescriptors_uniq)
else:
mydescriptors_string = ''
return mytype, mydescriptors_string
def roll_summary(self, cnt):
merged_list = []
# just read in the csv file (this is not the optimal way)
csvfilename = self.summary_dir + self.file_prefix + 'out_list%d.csv' % self.num
isheader = 1 # for the header
with open(csvfilename, newline='') as f:
reader = csv.reader(f)
# headers = next(f) # skip header
for row in reader:
if isheader:
header = row # keep header
isheader = 0
continue
[mytype, mydescriptors] = row[5:7]
if (mytype) or (mydescriptors):
row_ext = [cnt] + row
merged_list.append(row_ext)
cnt = cnt + 1
merged_header = ['OdorIdx'] + header
return merged_list, merged_header, cnt
``` |
{
"source": "jihyunbak/nwb_datajoint",
"score": 2
} |
#### File: nwb_datajoint/common/common_ephys.py
```python
import re
import warnings
import datajoint as dj
import numpy as np
import pynwb
from .common_device import Probe # noqa: F401
from .common_filter import FirFilter
from .common_interval import IntervalList # noqa: F401
# SortInterval, interval_list_intersect, interval_list_excludes_ind
from .common_nwbfile import AnalysisNwbfile, Nwbfile
from .common_region import BrainRegion # noqa: F401
from .common_session import Session # noqa: F401
from .dj_helper_fn import fetch_nwb # dj_replace
from .nwb_helper_fn import (estimate_sampling_rate, get_data_interface,
get_electrode_indices, get_nwb_file,
get_valid_intervals)
schema = dj.schema('common_ephys')
@schema
class ElectrodeGroup(dj.Imported):
definition = """
# Grouping of electrodes corresponding to a physical probe.
-> Session
electrode_group_name: varchar(80) # electrode group name from NWBFile
---
-> BrainRegion
-> Probe
description: varchar(80) # description of electrode group
target_hemisphere: enum('Right','Left')
"""
def make(self, key):
nwb_file_name = key['nwb_file_name']
nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)
nwbf = get_nwb_file(nwb_file_abspath)
# fill in the groups
egroups = list(nwbf.electrode_groups.keys())
for eg_name in egroups:
# for each electrode group, we get the group and add an electrode group entry.
# as the ElectrodeGroup
electrode_group = nwbf.get_electrode_group(eg_name)
key['electrode_group_name'] = eg_name
# check to see if the location is listed in the region.BrainRegion schema, and if not add it
region_dict = dict()
region_dict['region_name'] = electrode_group.location
region_dict['subregion_name'] = ''
region_dict['subsubregion_name'] = ''
query = BrainRegion() & region_dict
if len(query) == 0:
# this region isn't in the list, so add it
BrainRegion().insert1(region_dict)
query = BrainRegion() & region_dict
# we also need to get the region_id for this new region or find the right region_id
region_id_dict = query.fetch1()
key['region_id'] = region_id_dict['region_id']
key['description'] = electrode_group.description
# the following should probably be a function that returns the probe devices from the file
# TODO check and replace this with
# if isinstance(electrode_group.device, ndx_franklab_novela.Probe):
# key['probe_type'] = electrode_group.device.probe_type
# else:
# key['probe_type'] = 'unknown-probe-type'
probe_re = re.compile("probe")
for d in nwbf.devices:
if probe_re.search(d):
if nwbf.devices[d] == electrode_group.device:
# this will match the entry in the device schema
key['probe_type'] = electrode_group.device.probe_type
break
if 'probe_type' not in key:
key['probe_type'] = 'unknown-probe-type'
self.insert1(key, skip_duplicates=True)
@schema
class Electrode(dj.Imported):
definition = """
-> ElectrodeGroup
electrode_id: int # the unique number for this electrode
---
-> Probe.Electrode
-> BrainRegion
name='': varchar(80) # unique label for each contact
original_reference_electrode=-1: int # the configured reference electrode for this electrode
x=NULL: float # the x coordinate of the electrode position in the brain
y=NULL: float # the y coordinate of the electrode position in the brain
z=NULL: float # the z coordinate of the electrode position in the brain
filtering: varchar(200) # description of the signal filtering
impedance=null: float # electrode impedance
bad_channel: enum("True","False") # if electrode is 'good' or 'bad' as observed during recording
x_warped=NULL: float # x coordinate of electrode position warped to common template brain
y_warped=NULL: float # y coordinate of electrode position warped to common template brain
z_warped=NULL: float # z coordinate of electrode position warped to common template brain
contacts: varchar(80) # label of electrode contacts used for a bipolar signal -- current workaround
"""
def make(self, key):
nwb_file_name = key['nwb_file_name']
nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)
nwbf = get_nwb_file(nwb_file_abspath)
# create the table of electrodes
electrodes = nwbf.electrodes.to_dataframe()
# Below it would be better to find the mapping between
# nwbf.electrodes.colnames and the schema fields and
# where possible, assign automatically. It would also help to be
# robust to missing fields and have them
# assigned as empty if they don't exist in the nwb file in case
# people are not using our column names.
for elect in electrodes.iterrows():
key['electrode_group_name'] = elect[1].group_name
key['electrode_id'] = elect[0]
key['name'] = str(elect[0])
key['probe_type'] = elect[1].group.device.probe_type
key['probe_shank'] = elect[1].probe_shank
key['probe_electrode'] = elect[1].probe_electrode
key['bad_channel'] = 'True' if elect[1].bad_channel else 'False'
# look up the region
region_dict = dict()
region_dict['region_name'] = elect[1].group.location
region_dict['subregion_name'] = ''
region_dict['subsubregion_name'] = ''
key['region_id'] = (
BrainRegion() & region_dict).fetch1('region_id')
key['x'] = elect[1].x
key['y'] = elect[1].y
key['z'] = elect[1].z
key['x_warped'] = 0
key['y_warped'] = 0
key['z_warped'] = 0
key['contacts'] = ''
key['filtering'] = elect[1].filtering
key['impedance'] = elect[1].imp
try:
key['original_reference_electrode'] = elect[1].ref_elect_id
except Exception: # TODO: use more precise error check
key['original_reference_electrode'] = -1
self.insert1(key, skip_duplicates=True)
@schema
class Raw(dj.Imported):
definition = """
# Raw voltage timeseries data, ElectricalSeries in NWB.
-> Session
---
-> IntervalList
raw_object_id: varchar(80) # the NWB object ID for loading this object from the file
sampling_rate: float # Sampling rate calculated from data, in Hz
comments: varchar(80)
description: varchar(80)
"""
def make(self, key):
nwb_file_name = key['nwb_file_name']
nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)
nwbf = get_nwb_file(nwb_file_abspath)
raw_interval_name = "raw data valid times"
# get the acquisition object
# TODO this assumes there is a single item in NWBFile.acquisition
try:
rawdata = nwbf.get_acquisition()
assert isinstance(rawdata, pynwb.ecephys.ElectricalSeries)
except Exception: # TODO: use more precise error check
warnings.warn(
f'WARNING: Unable to get acquisition object in: {nwb_file_abspath}')
return
print('Estimating sampling rate...')
# NOTE: Only use first 1e6 timepoints to save time
sampling_rate = estimate_sampling_rate(
np.asarray(rawdata.timestamps[:int(1e6)]), 1.5)
print(f'Estimated sampling rate: {sampling_rate}')
key['sampling_rate'] = sampling_rate
interval_dict = dict()
interval_dict['nwb_file_name'] = key['nwb_file_name']
interval_dict['interval_list_name'] = raw_interval_name
# get the list of valid times given the specified sampling rate.
interval_dict['valid_times'] = get_valid_intervals(np.asarray(rawdata.timestamps), key['sampling_rate'],
1.75, 0)
IntervalList().insert1(interval_dict, skip_duplicates=True)
# now insert each of the electrodes as an individual row, but with the same nwb_object_id
key['raw_object_id'] = rawdata.object_id
key['sampling_rate'] = sampling_rate
print(
f'Importing raw data: Estimated sampling rate:\t{key["sampling_rate"]} Hz')
print(
f' Number of valid intervals:\t{len(interval_dict["valid_times"])}')
key['interval_list_name'] = raw_interval_name
key['comments'] = rawdata.comments
key['description'] = rawdata.description
self.insert1(key, skip_duplicates=True)
def nwb_object(self, key):
# TODO return the nwb_object; FIX: this should be replaced with a fetch call. Note that we're using the raw file
# so we can modify the other one.
nwb_file_name = key['nwb_file_name']
nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)
nwbf = get_nwb_file(nwb_file_abspath)
raw_object_id = (self & {'nwb_file_name': key['nwb_file_name']}).fetch1(
'raw_object_id')
return nwbf.objects[raw_object_id]
def fetch_nwb(self, *attrs, **kwargs):
return fetch_nwb(self, (Nwbfile, 'nwb_file_abs_path'), *attrs, **kwargs)
@schema
class SampleCount(dj.Imported):
definition = """
# Sample count :s timestamp timeseries
-> Session
---
sample_count_object_id: varchar(40) # the NWB object ID for loading this object from the file
"""
def make(self, key):
nwb_file_name = key['nwb_file_name']
nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)
nwbf = get_nwb_file(nwb_file_abspath)
# get the sample count object
# TODO: change name when nwb file is changed
sample_count = get_data_interface(nwbf, 'sample_count')
if sample_count is None:
warnings.warn(
f'Unable to get sample count object in: {nwb_file_abspath}')
return
key['sample_count_object_id'] = sample_count.object_id
def fetch_nwb(self, *attrs, **kwargs):
return fetch_nwb(self, (Nwbfile, 'nwb_file_abs_path'), *attrs, **kwargs)
@schema
class LFPSelection(dj.Manual):
definition = """
-> Session
"""
class LFPElectrode(dj.Part):
definition = """
-> master
-> Electrode
"""
def set_lfp_electrodes(self, nwb_file_name, electrode_list):
'''
Removes all electrodes for the specified nwb file and then adds back the electrodes in the list
:param nwb_file_name: string - the name of the nwb file for the desired session
:param electrode_list: list of electrodes to be used for LFP
:return:
'''
# remove the session and then recreate the session and Electrode list
(LFPSelection() & {'nwb_file_name': nwb_file_name}).delete()
# check to see if the user allowed the deletion
if len((LFPSelection() & {'nwb_file_name': nwb_file_name}).fetch()) == 0:
LFPSelection().insert1({'nwb_file_name': nwb_file_name})
# TO DO: do this in a better way
all_electrodes = Electrode.fetch(as_dict=True)
primary_key = Electrode.primary_key
for e in all_electrodes:
# create a dictionary so we can insert new elects
if e['electrode_id'] in electrode_list:
lfpelectdict = {k: v for k,
v in e.items() if k in primary_key}
LFPSelection().LFPElectrode.insert1(lfpelectdict, replace=True)
@schema
class LFP(dj.Imported):
definition = """
-> LFPSelection
---
-> IntervalList # the valid intervals for the data
-> FirFilter # the filter used for the data
-> AnalysisNwbfile # the name of the nwb file with the lfp data
lfp_object_id: varchar(80) # the NWB object ID for loading this object from the file
lfp_sampling_rate: float # the sampling rate, in HZ
"""
def make(self, key):
# get the NWB object with the data; FIX: change to fetch with additional infrastructure
rawdata = Raw().nwb_object(key)
sampling_rate, interval_list_name = (Raw() & key).fetch1(
'sampling_rate', 'interval_list_name')
sampling_rate = int(np.round(sampling_rate))
# TEST
# interval_list_name = '01_s1'
key['interval_list_name'] = interval_list_name
valid_times = (IntervalList() & {'nwb_file_name': key['nwb_file_name'],
'interval_list_name': interval_list_name}).fetch1('valid_times')
# target 1 KHz sampling rate
decimation = sampling_rate // 1000
# get the LFP filter that matches the raw data
filter = (FirFilter() & {'filter_name': 'LFP 0-400 Hz'} &
{'filter_sampling_rate': sampling_rate}).fetch(as_dict=True)
# there should only be one filter that matches, so we take the first of the dictionaries
key['filter_name'] = filter[0]['filter_name']
key['filter_sampling_rate'] = filter[0]['filter_sampling_rate']
filter_coeff = filter[0]['filter_coeff']
if len(filter_coeff) == 0:
print(
f'Error in LFP: no filter found with data sampling rate of {sampling_rate}')
return None
# get the list of selected LFP Channels from LFPElectrode
electrode_keys = (LFPSelection.LFPElectrode & key).fetch('KEY')
electrode_id_list = list(k['electrode_id'] for k in electrode_keys)
lfp_file_name = AnalysisNwbfile().create(key['nwb_file_name'])
lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name)
# test:
lfp_object_id = FirFilter().filter_data_nwb(lfp_file_abspath, rawdata,
filter_coeff, valid_times, electrode_id_list, decimation)
key['analysis_file_name'] = lfp_file_name
key['lfp_object_id'] = lfp_object_id
key['lfp_sampling_rate'] = sampling_rate // decimation
self.insert1(key)
def nwb_object(self, key):
# return the nwb_object.
lfp_file_name = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch1(
'analysis_file_name')
lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name)
nwbf = get_nwb_file(lfp_file_abspath)
# get the object id
nwb_object_id = (self & {'analysis_file_name': lfp_file_name}).fetch1(
'filtered_data_object_id')
return nwbf.objects[nwb_object_id]
def fetch_nwb(self, *attrs, **kwargs):
return fetch_nwb(self, (AnalysisNwbfile, 'analysis_file_abs_path'), *attrs, **kwargs)
@schema
class LFPBandSelection(dj.Manual):
definition = """
-> LFP
-> FirFilter # the filter to use for the data
---
-> IntervalList # the set of times to be filtered
lfp_band_sampling_rate: int # the sampling rate for this band
"""
class LFPBandElectrode(dj.Part):
definition = """
-> master
-> LFPSelection.LFPElectrode # the LFP electrode to be filtered LFP
reference_elect_id = -1: int # the reference electrode to use; -1 for no reference
---
"""
def set_lfp_band_electrodes(self, nwb_file_name, electrode_list, filter_name, interval_list_name,
reference_electrode_list, lfp_band_sampling_rate):
'''
Adds an entry for each electrode in the electrode_list with the specified filter, interval_list, and
reference electrode.
Also removes any entries that have the same filter, interval list and reference electrode but are not
in the electrode_list.
:param nwb_file_name: string - the name of the nwb file for the desired session
:param electrode_list: list of LFP electrodes to be filtered
:param filter_name: the name of the filter (from the FirFilter schema)
:param interval_name: the name of the interval list (from the IntervalList schema)
:param reference_electrode_list: A single electrode id corresponding to the reference to use for all
electrodes or a list with one element per entry in the electrode_list
:param lfp_band_sampling_rate: The output sampling rate to be used for the filtered data; must be an
integer divisor of the LFP sampling rate
:return: none
'''
# Error checks on parameters
# electrode_list
available_electrodes = (LFPSelection().LFPElectrode() & {
'nwb_file_name': nwb_file_name}).fetch('electrode_id')
if not np.all(np.isin(electrode_list, available_electrodes)):
raise ValueError(
'All elements in electrode_list must be valid electrode_ids in the LFPSelection table')
# sampling rate
lfp_sampling_rate = (LFP() & {'nwb_file_name': nwb_file_name}).fetch1(
'lfp_sampling_rate')
decimation = lfp_sampling_rate // lfp_band_sampling_rate
if lfp_sampling_rate // decimation != lfp_band_sampling_rate:
raise ValueError(f'lfp_band_sampling rate {lfp_band_sampling_rate} is not an integer divisor of lfp '
f'samping rate {lfp_sampling_rate}')
# filter
if not len((FirFilter() & {'filter_name': filter_name, 'filter_sampling_rate': lfp_sampling_rate}).fetch()):
raise ValueError(
f'filter {filter_name}, sampling rate {lfp_sampling_rate}is not in the FirFilter table')
# interval_list
if not len((IntervalList() & {'interval_name': interval_list_name}).fetch()):
raise ValueError(f'interval list {interval_list_name} is not in the IntervalList table; the list must be '
'added before this function is called')
# reference_electrode_list
if len(reference_electrode_list) != 1 and len(reference_electrode_list) != len(electrode_list):
raise ValueError(
'reference_electrode_list must contain either 1 or len(electrode_list) elements')
# add a -1 element to the list to allow for the no reference option
available_electrodes = np.append(available_electrodes, [-1])
if not np.all(np.isin(reference_electrode_list, available_electrodes)):
raise ValueError('All elements in reference_electrode_list must be valid electrode_ids in the LFPSelection '
'table')
# make a list of all the references
ref_list = np.zeros((len(electrode_list),))
ref_list[:] = reference_electrode_list
key = dict()
key['nwb_file_name'] = nwb_file_name
key['filter_name'] = filter_name
key['filter_sampling_rate'] = lfp_sampling_rate
key['interval_list_name'] = interval_list_name
key['lfp_band_sampling_rate'] = lfp_sampling_rate // decimation
# insert an entry into the main LFPBandSelectionTable
self.insert1(key, skip_duplicates=True)
# remove the keys that are not used for the LFPBandElectrode table
key.pop('interval_list_name')
key.pop('lfp_band_sampling_rate')
# get all of the current entries and delete any that are not in the list
elect_id, ref_id = (self.LFPBandElectrode() & key).fetch(
'electrode_id', 'reference_elect_id')
for e, r in zip(elect_id, ref_id):
if not len(np.where((electrode_list == e) & (ref_list == r))[0]):
key['electrode_id'] = e
key['reference_elect_id'] = r
(self.LFPBandElectrode() & key).delete()
# iterate through all of the new elements and add them
for e, r in zip(electrode_list, ref_list):
key['electrode_id'] = e
key['electrode_group_name'] = (
Electrode & {'electrode_id': e}).fetch1('electrode_group_name')
key['reference_elect_id'] = r
self.LFPBandElectrode().insert1(key, skip_duplicates=True)
@schema
class LFPBand(dj.Computed):
definition = """
-> LFPBandSelection
---
-> AnalysisNwbfile
filtered_data_object_id: varchar(80) # the NWB object ID for loading this object from the file
"""
def make(self, key):
# get the NWB object with the lfp data; FIX: change to fetch with additional infrastructure
lfp_object = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch_nwb()[
0]['lfp']
# load all the data to speed filtering
lfp_data = np.asarray(
lfp_object.data, dtype=type(lfp_object.data[0][0]))
# lfp_timestamps = np.asarray(lfp_object.timestamps, dtype=type(lfp_object.timestamps[0]))
# get the electrodes to be filtered and their references
lfp_band_elect_id, lfp_band_ref_id = (LFPBandSelection().LFPBandElectrode() & key).fetch('electrode_id',
'reference_elect_id')
# get the indices of the electrodes to be filtered and the references
lfp_band_elect_index = get_electrode_indices(
lfp_object, lfp_band_elect_id)
lfp_band_ref_index = get_electrode_indices(lfp_object, lfp_band_ref_id)
# subtract off the references for the selected channels
for index, elect_index in enumerate(lfp_band_elect_index):
if lfp_band_ref_id[index] != -1:
lfp_data[:, elect_index] = lfp_data[:, elect_index] - \
lfp_data[:, lfp_band_ref_index]
lfp_sampling_rate = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch1(
'lfp_sampling_rate')
interval_list_name, lfp_band_sampling_rate = (LFPBandSelection() & key).fetch1('interval_list_name',
'lfp_band_sampling_rate')
valid_times = (IntervalList() & {
'interval_list_name': interval_list_name}).fetch1('valid_times')
filter_name, filter_sampling_rate, lfp_band_sampling_rate = (LFPBandSelection() & key).fetch1(
'filter_name', 'filter_sampling_rate', 'lfp_band_sampling_rate')
decimation = int(lfp_sampling_rate) // lfp_band_sampling_rate
# get the LFP filter that matches the raw data
filter = (FirFilter() & {'filter_name': filter_name} &
{'filter_sampling_rate': filter_sampling_rate}).fetch(as_dict=True)
if len(filter) == 0:
raise ValueError(f'Filter {filter_name} and sampling_rate {lfp_band_sampling_rate} does not exit in the '
'FirFilter table')
filter_coeff = filter[0]['filter_coeff']
if len(filter_coeff) == 0:
print(
f'Error in LFPBand: no filter found with data sampling rate of {lfp_band_sampling_rate}')
return None
# create the analysis nwb file to store the results.
lfp_band_file_name = AnalysisNwbfile().create(key['nwb_file_name'])
lfp_band_file_abspath = AnalysisNwbfile().get_abs_path(lfp_band_file_name)
# filter the data and write to an the nwb file
filtered_data_object_id = FirFilter().filter_data_nwb(lfp_band_file_abspath, lfp_object, filter_coeff,
valid_times, lfp_band_elect_id, decimation)
key['analysis_file_name'] = lfp_band_file_name
key['filtered_data_object_id'] = filtered_data_object_id
self.insert1(key)
def fetch_nwb(self, *attrs, **kwargs):
return fetch_nwb(self, (AnalysisNwbfile, 'analysis_file_abs_path'), *attrs, **kwargs)
``` |
{
"source": "jihyunbak/ORnetwork",
"score": 3
} |
#### File: ORnetwork/Code_Py/aux.py
```python
import numpy as np
import pandas as pd
# for stat
from scipy.sparse import coo_matrix
from scipy import stats
# for io
import csv
# for plot
import matplotlib as mpl
import matplotlib.pyplot as plt
# === ds: custom data structure
class Tray:
''' empty class, to emulate Matlab's struct '''
def __init__(self):
pass
def get_attr_keys(self):
dkey = self.__dict__.keys()
return dkey
# /
# === dm: data manipulation
# --- pandas DataFrame specific
def collect_df_rows_by_index(df, idx_input, drop=True):
# should extend for the bad-index case (NaN)
idx = idx_input.astype('int')
df_new = df.iloc[idx].reset_index(drop=drop)
return df_new
def convert_data_types(df, fields, type):
for myfield in fields:
myvalue = getattr(df, myfield).astype(type)
setattr(df, myfield, myvalue)
return df
def sort_and_reset_index(intab, columns, drop=True):
''' sort by columns and reset index '''
sorttab = intab.sort_values(columns)
outtab = sorttab.reset_index(drop=drop)
return outtab
# --- other
def find_equal(listlike, targ):
idx_hit = []
for m in range(len(listlike)):
if targ == listlike[m]:
idx_hit.append(m)
return idx_hit
def find_idx(testlist_bool):
# https://stackoverflow.com/questions/364621/how-to-get-items-position-in-a-list
myidx = [i for i,x in enumerate(testlist_bool) if x == 1]
return myidx
def findby(vlist, testlist_bool):
myidx_list = find_idx(testlist_bool)
val = [vlist[i] for i in myidx_list]
return val
def isin_lists(list, testlist):
y_array = np.isin(np.array(list), np.array(testlist))
y = y_array.tolist()
return y
def normalize_by(mat, axis):
mysum = np.sum(mat, axis=axis)
newmat = np.true_divide(mat, mysum)
return newmat
def center_by(mat, axis):
mymean = np.mean(mat, axis=axis)
newmat = mat - mymean
return newmat
# /
# === stat: reusable statistics
# --- counting & probability estimation
def count_with_weight(vec, wgt=None, *args):
# v_uniq, v_cnt = np.unique(vec, return_counts=True)
if wgt is None:
wgt = np.ones(np.size(vec))
v_uniq = np.unique(vec).tolist()
v_wgtcnt = []
for vu in v_uniq:
myidx = find_idx(isin_lists(vec, vu))
mywgtcnt = sum([wgt[i] for i in myidx])
v_wgtcnt.append(mywgtcnt)
return v_uniq, v_wgtcnt
def samp_prob1(vec, wgt=None, normalize=True):
''' sampled probability for one variable with discrete values '''
v_uniq, v_cnt = count_with_weight(vec, wgt)
cnt_mat = np.matrix(v_cnt).transpose()
if normalize:
cnt_mat = normalize_by(cnt_mat, axis=None) # single dimension
return cnt_mat, v_uniq
def samp_joint_prob(v1, v2, wgt=None, normalize=True):
''' sampled joint probability for two variables v1 and v2 '''
if not wgt:
wgt = np.ones(np.size(v1))
# use COO matrix
v1uniq, v1iinv = np.unique(v1, return_inverse=True) # renumber
v2uniq, v2iinv = np.unique(v2, return_inverse=True)
mat_shape = (len(v1uniq), len(v2uniq))
cnt_mat_sparse = coo_matrix((wgt, (v1iinv, v2iinv)), shape=mat_shape)
cnt_mat = cnt_mat_sparse.todense()
if normalize:
cnt_mat = cnt_mat / np.sum(cnt_mat) # normalize by all-entries sum
return cnt_mat, v1uniq, v2uniq
def get_joint_counts(vars, wgt, names=('v1', 'v2')):
'''
given simultaneous samples of two variables v1 and v2,
compute the joint counts and probabilities and return DataFrame objects.
each row is a distinct value of v1 (first input);
each column is a distinct value of v2 (second input).
INPUT: vars = (v1, v2) and names = (v1name, v2name) are tuples.
OUTPUT: (cnts, probs) with Tray objects cnts and probs.
'''
# unpack input
(h2, b2) = vars
(v1name, v2name) = names
# -- count matrices
# receptor code groups (marginal counts)
p_h, h2_uniq1 = samp_prob1(h2, wgt=wgt, normalize=True)
cnt_h, _ = samp_prob1(h2, wgt=wgt, normalize=False)
dat_h = np.concatenate((cnt_h.astype('int'), p_h), axis=1)
# perceptual odor categories (marginal counts)
p_b, b2_uniq1 = samp_prob1(b2, wgt=wgt, normalize=True)
cnt_b, _ = samp_prob1(b2, wgt=wgt, normalize=False)
dat_b = np.concatenate((cnt_b.astype('int'), p_b), axis=1)
# joint statistics
p_hb, _, _ = samp_joint_prob(h2, b2, wgt=wgt, normalize=True)
cnt_hb, _, _ = samp_joint_prob(h2, b2, wgt=wgt, normalize=False)
# expected joint distribution (product of marginals)
dat_p_exp = np.multiply(np.matrix(p_h), np.matrix(p_b).transpose())
# -- make DataFrame objects
names_h = [v1name + '=' + str(h) for h in h2_uniq1]
names_b = [v2name + '=' + str(b) for b in b2_uniq1]
cnt_h_df = pd.DataFrame(data=dat_h, index=names_h, columns=['cnt', 'p'])
cnt_b_df = pd.DataFrame(data=dat_b, index=names_b, columns=['cnt', 'p'])
cnt_hb_df = pd.DataFrame(data=cnt_hb.astype('int'), index=names_h, columns=names_b)
p_hb_df = pd.DataFrame(data=p_hb, index=names_h, columns=names_b)
p_exp_df = pd.DataFrame(data=dat_p_exp, index=names_h, columns=names_b)
# -- pack output and return
# raw counts
cnts = Tray()
setattr(cnts, v1name, cnt_h_df)
setattr(cnts, v2name ,cnt_b_df)
cnts.joint = cnt_hb_df
# joint probabilities
probs = Tray()
probs.obs = p_hb_df
probs.exp = p_exp_df
return cnts, probs
# --- statistical test
def chisq_to_pvals(chisq, dof):
pval_lo = stats.chi2.cdf(chisq, dof)
pval_up = 1 - stats.chi2.cdf(chisq, dof)
return (pval_lo, pval_up)
# /
# === io: file input/output
def csv_to_df(filename, delimiter=','):
'''
assuming a single header line,
read a csv file and return a pandas DataFrame
'''
dat, header = mycsvread(filename, 1, delimiter=delimiter)
df = pd.DataFrame(dat, columns=header[0])
return df
def mycsvread(filename, nheader=0, row_filter=None, \
encoding='utf-8', delimiter=','):
'''
reads from a csv file and returns a list (or two lists)
optionally reads the first n lines seperately as header (default is 0)
optinally specify the encoding (default is utf-8)
'''
# -- default is to read each row as-is
if not row_filter:
row_filter = lambda row: row # dummy function to just return the input
# -- read the file content
mylist = []
myheader = []
cnt = 0
with open(filename, 'r', newline='', encoding=encoding) as f:
reader = csv.reader(f, delimiter=delimiter)
for row in reader:
# read row as header
if(cnt < nheader):
myheader.append(row)
cnt = cnt + 1
continue
# read row as body
myrow = row_filter(row)
mylist.append(myrow)
if nheader>0:
return mylist, myheader
else:
return mylist
# /
# === plot: reusable plots
# --- 2D heatmap ---
def draw_heatmap(data, row_labels, col_labels, filename=None, extend='neither', **kwargs):
fig, ax = plt.subplots()
im = ax.imshow(data, **kwargs)
# tick labels
ax.set_xticks(np.arange(len(col_labels)))
ax.set_yticks(np.arange(len(row_labels)))
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# colorbar
cbar = ax.figure.colorbar(im, ax=ax, extend=extend)
# TODO: also annotate values in each cell?
if not filename:
pass
else:
plt.savefig(filename)
print('figure saved to:' + filename)
plt.show()
def draw_heatmap_df(mydf, **kwargs):
draw_heatmap(mydf.get_values(), mydf.index, mydf.columns, **kwargs)
# /
```
#### File: ORnetwork/Code_Py/labeling.py
```python
import numpy as np
import pandas as pd
# JHB custom
from Code_Py import aux, bim
Tray = aux.Tray
# /
# === data file loaders
# --- set directories
indir = 'Data/database/' # web-scraped percept data (GoodScents)
passdir = 'Data/database/' # intermediate data being passed
corrdir = 'Data/database/' # cross-list correspondence (Mainland2015 and GoodScents)
# --- functions
def load_mols_with_descriptors_goodscent(add_fixup=False, filter_corr=False):
''' load the list of molecules with odor descriptors '''
# mfilename = indir + 'goodscents_out_merged.csv'
mfilename = indir + 'odorants_with_descriptors.csv'
mtab = aux.csv_to_df(mfilename)
# optionally, add fix-up table
if add_fixup:
fixtab = fixup_mols_with_descriptors(mtab.columns, mtab.shape[0])
mtab = mtab.append(fixtab, ignore_index=True)
# then optionally, filter odorants in Mainland2015 dataset (keep Ltab order)
if filter_corr:
mtab_full = mtab
Ltab = load_odorants_Mainland15_with_goodscent_corr()
mtab = aux.collect_df_rows_by_index(mtab_full, Ltab.mtab_idx)
return mtab
def fixup_mols_with_descriptors(columns, ibase):
'''
add odor information for test molecules
that are not covered by GoodScents CAS-numbered list
'''
# add information (TODO: should replace by a text file)
addlist = [\
['benzene', 'aromatic', 'aromatic;sweet;gasoline-like'],
['5,5-Dimethyl-1,3-cyclohexanedione', '', 'odorless;weak'],
['nonanedioic acid', '', 'fatty'],
['thioglycolic acid', '', 'unpleasant;pungent;rotten'],
['1-octanethiol', 'sulfurous', 'sulfurous'],
['TMT', '', 'fox-odor'],
['(+)-menthol', 'mentholic', 'mentholic;cooling;minty'],
['androstenone', 'sweaty', 'sweaty;urinous;woody;floral'],
['banana', 'fruity', 'banana;fruity;creamy;tropical'],
['androstadienone', '', 'sweaty'],
['1-formylpiperidine', '', 'odorless'],
['3-methyl-2-hexenoic acid', '', 'sweaty'],
['butyric acid', '', 'unpleasant;rancid;penetrating;obnoxious']]
# match format to mtab
def match_format_to_mtab(fixlist, ibase, columns):
fixlist_ext = []
for i in range(len(fixlist)):
nr = ibase + i
front_matter = [nr, 0, 0, 'N/A', '']
fixlist_ext.append(front_matter + fixlist[i])
fix_df = pd.DataFrame(fixlist_ext, columns=columns)
return fix_df
# "fixed" dataset with added information
fix_df = match_format_to_mtab(addlist, ibase, columns)
return fix_df
def load_mols_with_descriptor_indices_goodscent(add_fixup=False, filter_corr=False):
# load nonzero indices
fixtag = 'fixup_' if add_fixup else ''
# ifilename = passdir + 'goodscents_' + fixtag + 'out_merged2_basis.csv'
ifilename = passdir + 'odorants_with_descriptor_indices.csv'
ilist, iheader = aux.mycsvread(ifilename, 1)
itab = pd.DataFrame(data=ilist, columns=iheader[0])
# optionally, filter odorants in Mainland2015 dataset (keep Ltab order)
if filter_corr:
Ltab = load_odorants_Mainland15_with_goodscent_corr()
itab = aux.collect_df_rows_by_index(itab, Ltab.mtab_idx)
ilist = [ilist[midx] for midx in Ltab.mtab_idx.astype('int')]
return itab, ilist
def load_descriptor_basis_goodscent(add_fixup=False):
# load basis words
fixtag = 'fixup_' if add_fixup else ''
# bfilename = passdir + 'goodscents_' + fixtag + 'descriptor_basis.tsv' # 5/29/2019 updated
bfilename = passdir + 'descriptor_basis.tsv'
bdat = aux.mycsvread(bfilename, delimiter='\t')
btab = pd.DataFrame(data=bdat, columns=['dim', 'word'])
return btab
def load_odorants_Mainland15_with_goodscent_corr():
# load Mainland2015 odorant list (with goodScents correspondence)
# lfilename = corrdir + 'Ltab_corr3.csv' # with out-of-GoodScents input
lfilename = corrdir + 'odorants_corr.csv' # with out-of-GoodScents input
Ltab = aux.csv_to_df(lfilename)
return Ltab
def load_descriptors_with_Castro13_category():
# load manually annotated Castro2013 grouping
# filename = passdir + 'descriptor_with_rcode_c13grp_manual.csv'
filename = passdir + 'descriptor_proxy_category_map.csv'
ctab = aux.csv_to_df(filename, delimiter=',')
return ctab
# --- wrapper ---
def load_descriptor_space(add_fixup=True, filter_corr=False, dataset='goodscents'):
if dataset == 'goodscents':
mtab = load_mols_with_descriptors_goodscent(add_fixup, filter_corr) # mols with descriptors
itab, ilist = load_mols_with_descriptor_indices_goodscent(add_fixup, filter_corr) # nonzero idx
btab = load_descriptor_basis_goodscent(add_fixup) # basis words
# else:
# todo
return mtab, itab, ilist, btab
# /
# === perceptual odor groups
def split_collect_idx(mystr, delimiter=';'):
idxlist = []
if (mystr is not None) and (len(mystr) > 0):
nb_words_list = mystr.split(delimiter)
for word in nb_words_list:
idx_word = int(word)
idxlist.append(idx_word)
return idxlist
def unpack_itab_to_type_all(itab):
ilist2 = []
for j in range(len(itab)):
il_type = split_collect_idx(itab.iloc[j].OdorTypeN)
il_words = split_collect_idx(itab.iloc[j].OdorDescriptorN)
il_all = list(set(il_type + il_words)) # union
mylist = [il_type, il_all]
ilist2.append(mylist)
return ilist2
def assign_wgroup_to_odorants(itab, ctab, Klist=None):
''' assign to each odorant a perceptual odor category index '''
# unpack index data: [[typeN], [typeN and descriptorN merged]]
ilist2 = unpack_itab_to_type_all(itab)
# unpack category indices
u = ctab.widx.astype('int') # index to Wtab
cgrp = ctab.c13grp_manual.astype('int') # manually assigned Castro2013 groups
# detect Klist if not provided
if not Klist:
Klist_full = np.sort(np.unique(cgrp)).tolist()
Klist = [k for k in Klist_full if k>0]
# make a list of descriptor categories
cglist2 = list_descriptor_categories(ilist2, cgrp, u)
# fractional assignment
odorant_cgrp = fractional_assignment(cglist2, Klist)
# add more fields to output
out = odorant_cgrp
out.list = cglist2
out.columns = Klist
out.rows = ilist2
return out
def list_descriptor_categories(ilist2, cgrp, u):
''' collect the odor category labels for the descriptors'''
def collect_list(sublist):
mycg = []
for wi in sublist:
wi1 = (wi + 1) # make 1-based indices
v2 = aux.findby(cgrp, u == wi1)
mycg = mycg + v2
if not v2: # indicates error
print(wi1)
return mycg
# loop over odorants
cglist2 = []
for j in range(len(ilist2)):
# list all associated descriptor categories
mycg = []
for sublist in ilist2[j]:
mycg_sub = collect_list(sublist)
mycg.append(mycg_sub)
cglist2.append(mycg)
return cglist2
def fractional_assignment(cglist2, Klist):
''' fractional assignment '''
# local function
def count_hits(clist):
mymat = np.equal(np.matrix(clist).transpose(), np.array(Klist)).astype('int')
myvec = np.sum(mymat, axis=0)
return myvec
# loop
vecs_cnt = []
vecs_norm = []
inds_best = []
for j in range(len(cglist2)):
# odor type
myvec_type = count_hits(cglist2[j][0])
# odor descriptors
myvec_words = count_hits(cglist2[j][1])
# vote
myvec = myvec_words + 0.5 * myvec_type # let OdorType break ties
myvecsum = np.sum(myvec)
if myvecsum > 0:
myvec_norm = list(myvec / myvecsum)
idx_max = np.argmax(myvec) + 1 # make 1-based
else:
myvec_norm = myvec # all zeros
idx_max = 0 # keep 0
vecs_cnt.append(myvec)
vecs_norm.append(myvec_norm)
inds_best.append(idx_max)
odorant_cgrp = Tray()
odorant_cgrp.vecs_cnt = vecs_cnt
odorant_cgrp.vecs_norm = vecs_norm
odorant_cgrp.best = inds_best
return odorant_cgrp
# /
``` |
{
"source": "jihyunbak/rec_to_nwb",
"score": 2
} |
#### File: builder/originators/mda_invalid_time_originator.py
```python
import os
import logging.config
from rec_to_nwb.processing.nwb.components.mda.time.invalid.fl_mda_invalid_time_manager import FlMdaInvalidTimeManager
from rec_to_nwb.processing.nwb.components.mda.time.invalid.mda_invalid_time_injector import MdaInvalidTimeInjector
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class MdaInvalidTimeOriginator:
def __init__(self, header, metadata):
self.fl_mda_invalid_time_manager = FlMdaInvalidTimeManager(
sampling_rate=float(header.configuration.hardware_configuration.sampling_rate),
metadata=metadata
)
self.mda_invalid_time_injector = MdaInvalidTimeInjector()
def make(self, nwb_content):
logger.info('MDA invalid times: Building')
mda_invalid_times = self.fl_mda_invalid_time_manager.get_fl_mda_invalid_times(nwb_content)
logger.info('MDA invalid times: Injecting')
self.mda_invalid_time_injector.inject_all(mda_invalid_times, nwb_content)
```
#### File: builder/originators/pos_invalid_originator.py
```python
import os
import logging.config
from rec_to_nwb.processing.nwb.components.position.time.invalid.fl_pos_invalid_time_manager import \
FlPosInvalidTimeManager
from rec_to_nwb.processing.nwb.components.position.time.invalid.pos_invalid_time_injector import PosInvalidTimeInjector
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class PosInvalidTimeOriginator:
def __init__(self, metadata):
self.fl_pos_invalid_time_manager = FlPosInvalidTimeManager(metadata)
self.pos_invalid_time_injector = PosInvalidTimeInjector()
def make(self, nwb_content):
logger.info('POS invalid times: Building')
pos_invalid_times = self.fl_pos_invalid_time_manager.get_fl_pos_invalid_times(nwb_content)
logger.info('POS invalid times: Injecting')
self.pos_invalid_time_injector.inject_all(pos_invalid_times, nwb_content)
```
#### File: builder/originators/shanks_originator.py
```python
import os
import logging.config
from rec_to_nwb.processing.nwb.components.device.probe.shanks.fl_shank_manager import FlShankManager
from rec_to_nwb.processing.nwb.components.device.probe.shanks.shank_creator import ShankCreator
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class ShanksOriginator:
def __init__(self, probes, metadata):
self.fl_shank_manager = FlShankManager(probes, metadata['electrode groups'])
self.shank_creator = ShankCreator()
def make(self, shanks_electrodes_dict):
logger.info('Probes-Shanks: Building')
fl_shanks_dict = self.fl_shank_manager.get_fl_shanks_dict(shanks_electrodes_dict)
logger.info('Probes-Shanks: Creating')
shanks_dict = {}
for probe_type, fl_shanks in fl_shanks_dict.items():
shanks_dict[probe_type] = [self.shank_creator.create(fl_shank) for fl_shank in fl_shanks]
return shanks_dict
```
#### File: header/module/channel.py
```python
class Channel:
def __init__(self, element):
self.tree = element
self.tag = self.tree.tag
self.id = self.tree.get('id')
self.bit = self.tree.get('bit')
self.data_type = self.tree.get('dataType')
self.start_byte = self.tree.get('startByte')
self.input = self.tree.get('input')
```
#### File: header/module/device.py
```python
from .channel import Channel
class Device:
def __init__(self, element):
self.tree = element
self.channels = \
[Channel(channel_element) for channel_element in self.tree.findall('Channel')]
self.tag = self.tree.tag
self.name = self.tree.get('name')
self.num_bytes = self.tree.get('numBytes')
self.available = self.tree.get('available')
self.packet_order_preference = self.tree.get('packetOrderPreference')
```
#### File: header/module/hardware_configuration.py
```python
from .device import Device
class HardwareConfiguration:
def __init__(self, element):
self.tree = element
self.devices = [Device(device_element) for device_element in self.tree.findall('Device')]
self.tag = self.tree.tag
self.sampling_rate = self.tree.get('samplingRate')
self.num_channels = self.tree.get('numChannels')
```
#### File: header/module/spike_configuration.py
```python
from .spike_n_trode import SpikeNTrode
class SpikeConfiguration:
def __init__(self, element):
self.tree = element
self.spike_n_trodes = [SpikeNTrode(spike_n_trode_element) for spike_n_trode_element
in self.tree.findall('SpikeNTrode')]
self.tag = self.tree.tag
self.categories = self.tree.get('categories')
```
#### File: processing/metadata/metadata_manager.py
```python
from rec_to_nwb.processing.metadata.metadata_extractor import MetadataExtractor
from rec_to_nwb.processing.nwb.components.device.probe.fl_probe_extractor import FlProbesExtractor
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.validation.metadata_validator import MetadataValidator
from rec_to_nwb.processing.validation.validation_registrator import ValidationRegistrator
class MetadataManager:
"""
Args:
metadata_path (string): path to file .yml with metadata describing experiment
probes_paths (list of strings): list of paths to .yml files with data describing probes used in experiment
"""
@beartype
def __init__(self, metadata_path: str, probes_paths: list):
self.__validate(metadata_path, probes_paths)
self.probes_paths = probes_paths
self.metadata_path = metadata_path
self.fl_probes_extractor = FlProbesExtractor()
self.metadata_extractor = MetadataExtractor()
self.metadata = self.__get_metadata(metadata_path)
self.probes = self.__get_probes(probes_paths)
@staticmethod
def __validate(metadata_path, probes_paths):
validation_registrator = ValidationRegistrator()
validation_registrator.register(MetadataValidator(metadata_path, probes_paths))
validation_registrator.validate()
def __get_metadata(self, metadata_path):
return self.metadata_extractor.extract_metadata(metadata_path)
def __get_probes(self, probes_paths):
return self.fl_probes_extractor.extract_probes_metadata(probes_paths)
def __str__(self):
metadata_info = 'Experimenter: ' + self.metadata['experimenter name'] + \
'\nDescription: ' + self.metadata['experiment description'] + \
'\nSession Id: ' + self.metadata['session_id'] + \
'\nSubject: ' + self.metadata['subject']['description']
probe_types = list(map(lambda probe: probe['probe_type'], self.probes))
probe_types_info = '\n\nAvailable probe types: ' + str(probe_types)
return 'Experiment Info:\n' + metadata_info + probe_types_info
```
#### File: components/associated_files/fl_associated_file.py
```python
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class FlAssociatedFile:
@beartype
def __init__(self, name: str, description: str, content: str, task_epochs: str):
self.name = name
self.description = description
self.content = content
self.task_epochs = task_epochs
```
#### File: components/associated_files/fl_associated_files_builder.py
```python
from rec_to_nwb.processing.nwb.components.associated_files.fl_associated_file import FlAssociatedFile
class FlAssociatedFilesBuilder:
@staticmethod
def build(name, description, content, task_epochs):
return FlAssociatedFile(name, description, content, task_epochs)
```
#### File: device/probe/fl_probe_manager.py
```python
from rec_to_nwb.processing.nwb.components.device.probe.fl_probe_builder import FlProbeBuilder
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.tools.filter_probe_by_type import filter_probe_by_type
class FlProbeManager:
@beartype
def __init__(self, probes_metadata: list):
self.probes_metadata = probes_metadata
self.fl_probe_builder = FlProbeBuilder()
self.probe_id = -1
@beartype
def get_fl_probes(self, shanks_dict: dict, probes_valid_map: set):
fl_probes = []
for probe_type in probes_valid_map:
probe_metadata = filter_probe_by_type(self.probes_metadata, probe_type)
fl_probes.append(self._build_single_probe(
probe_metadata=probe_metadata,
shanks=shanks_dict[probe_type])
)
return fl_probes
@beartype
def _build_single_probe(self, probe_metadata: dict, shanks: list):
self.probe_id += 1
return self.fl_probe_builder.build(
probe_id=self.probe_id,
name='probe ' + str(self.probe_id),
probe_type=probe_metadata['probe_type'],
units=probe_metadata['units'],
probe_description=probe_metadata['probe_description'],
contact_side_numbering=probe_metadata['contact_side_numbering'],
contact_size=float(probe_metadata['contact_size']),
shanks=shanks
)
```
#### File: probe/shanks_electrodes/fl_shanks_electrode.py
```python
class FlShanksElectrode:
def __init__(self, shanks_electrode_id, rel_x, rel_y, rel_z):
self.shanks_electrode_id = shanks_electrode_id
self.rel_x = rel_x
self.rel_y = rel_y
self.rel_z = rel_z
```
#### File: probe/shanks_electrodes/shanks_electrode_creator.py
```python
from ndx_franklab_novela.probe import ShanksElectrode
from rec_to_nwb.processing.nwb.components.device.probe.shanks_electrodes.fl_shanks_electrode import FlShanksElectrode
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.tools.validate_parameters import validate_parameters_not_none
class ShanksElectrodeCreator:
@classmethod
@beartype
def create(cls, fl_shanks_electrode: FlShanksElectrode) -> ShanksElectrode:
validate_parameters_not_none(__name__, fl_shanks_electrode.shanks_electrode_id,
fl_shanks_electrode.rel_x, fl_shanks_electrode.rel_y, fl_shanks_electrode.rel_z)
return ShanksElectrode(
name=str(fl_shanks_electrode.shanks_electrode_id),
rel_x=float(fl_shanks_electrode.rel_x),
rel_y=float(fl_shanks_electrode.rel_y),
rel_z=float(fl_shanks_electrode.rel_z),
)
```
#### File: probe/shanks/fl_shank.py
```python
class FlShank:
def __init__(self, shank_id, shanks_electrodes):
self.shank_id = shank_id
self.shanks_electrodes = shanks_electrodes
```
#### File: components/dio/dio_extractor.py
```python
import logging.config
import os
# import numpy as np
from rec_to_binaries.read_binaries import readTrodesExtractedDataFile
from rec_to_nwb.processing.time.continuous_time_extractor import ContinuousTimeExtractor
from rec_to_nwb.processing.time.timestamp_converter import TimestampConverter
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class DioExtractor:
@staticmethod
def extract_dio_for_single_dataset(filtered_files, continuous_time_file,
convert_timestamps=True):
single_dataset_data = {}
continuous_time = ContinuousTimeExtractor.get_continuous_time_array_file(continuous_time_file)
for dio_sensor in filtered_files:
try:
dio_data = readTrodesExtractedDataFile(filtered_files[dio_sensor])
# dio_data['data'] is a labeled array with 'time' and 'state' columns. 'time' corresponds to sample count
single_dataset_data[dio_sensor] = DioExtractor.__get_dio_time_series(
dio_data, continuous_time, convert_timestamps)
# keys, values = DioExtractor.__get_dio_time_series(dio_data, continuous_time_dict
# single_dataset_data[dio_sensor] = ([keys, values])
except KeyError as error:
message = "there is no " + str(dio_sensor) + ", error: "
logger.exception(message + str(error))
except TypeError as error:
message = "there is no data for event " + str(dio_sensor) + ", error: "
logger.exception(message + str(error))
return single_dataset_data
@staticmethod
def __get_dio_time_series(dio_data, continuous_time, convert_timestamps=True):
dio_state = dio_data['data']['state']
time_counts = dio_data['data']['time'] # time sample counts
if not convert_timestamps:
return [time_counts, dio_state]
converted_timestamps = TimestampConverter.convert_timestamps(continuous_time, time_counts)
#values = np.asarray(dio_data['state'], dtype='bool')
# values = [bool(recorded_event[1]) for recorded_event in dio_data['data']]
# keys = [recorded_event[0] for recorded_event in dio_data['data']]
# keys = DioExtractor.__convert_keys(continuoues_time_dict, keys)
# return keys, values
return [converted_timestamps, dio_state]
# @staticmethod
# def __convert_keys(continuous_time_array, keys):
# converted_timestamps = TimestampConverter.convert_timestamps(continuous_time_array, keys)
# return converted_timestamps
```
#### File: components/dio/dio_files.py
```python
import os
class DioFiles:
def __init__(self, directories, dio_metadata):
self.directories = directories
self.dio_metadata = dio_metadata
def get_files(self):
multiple_datasets_dio_files = [self.__get_dict(dataset) for dataset in self.directories]
filtered_datasets_dio_files = self.__filter_files(multiple_datasets_dio_files, self.dio_metadata)
return filtered_datasets_dio_files
@classmethod
def __filter_files(cls, multiple_datasets_dio_files, dio_metadata):
return [{dio_file: single_dataset[dio_file] for dio_file in single_dataset
if dio_file in [dio_event['description'] for dio_event in dio_metadata]}
for single_dataset in multiple_datasets_dio_files]
@classmethod
def __get_dict(cls, directory):
dio_dict = {}
files = os.listdir(directory)
files.sort()
for file in files:
if file.endswith('.dat'):
split_filename = file.split('.')
dio_dict[split_filename[-2].split('_')[1]] = directory + '/' + file
return dio_dict
```
#### File: components/electrode_group/fl_nwb_electrode_group_manager.py
```python
from ndx_franklab_novela.probe import Probe
from rec_to_nwb.processing.nwb.components.electrode_group.fl_nwb_electrode_group_builder import FlNwbElectrodeGroupBuilder
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class FlNwbElectrodeGroupManager:
"""Manage ElectrodeGroup data and call FlNwbElectrodeGroupBuilder to create list of FlNwbElectrodeGroupBuilder.
Args:
electrode_groups_metadata (list): list that contains electrode group metadata dicts
Methods:
get_fl_nwb_electrode_groups()
"""
@beartype
def __init__(self, electrode_groups_metadata: list):
self.electrode_groups_metadata = electrode_groups_metadata
@beartype
def get_fl_nwb_electrode_groups(self, probes: list, electrode_groups_valid_map: set):
"""Manage ElectrodeGroup data and call FlNwbElectrodeGroupBuilder to create list of FlNwbElectrodeGroupBuilder.
Args:
probes (list): list of existing probes
electrode_groups_valid_map (set): Set of electrode groups ids that are not corrupted
Returns:
list: list with FlNwbElectrodeGroupBuilder objects
"""
fl_nwb_electrode_groups = []
for electrode_group_metadata in self.electrode_groups_metadata:
if electrode_group_metadata['id'] in electrode_groups_valid_map:
probe = self.__get_probe_by_type(probes, electrode_group_metadata['device_type'])
fl_nwb_electrode_groups.append(
FlNwbElectrodeGroupBuilder.build(
metadata=electrode_group_metadata,
device=probe
)
)
return fl_nwb_electrode_groups
@staticmethod
@beartype
def __get_probe_by_type(probes: list, probe_type: str) -> Probe:
for probe in probes:
if probe_type == probe.probe_type:
return probe
```
#### File: components/epochs/fl_epochs_extractor.py
```python
from rec_to_binaries.read_binaries import readTrodesExtractedDataFile
class FlEpochsExtractor:
def __init__(self, continuous_time_files):
self.continuous_time_files = continuous_time_files
def extract_epochs(self):
session_start_times = []
session_end_times = []
for continuous_time_file in self.continuous_time_files:
continuous_time_data = self.__read_contunious_time_file(continuous_time_file)
session_start_times.append(float(continuous_time_data['data'][0][1]) / 1E9)
session_end_times.append(float(continuous_time_data['data'][-1][1]) / 1E9)
return session_start_times, session_end_times
def __read_contunious_time_file(self, continuous_time_file):
return readTrodesExtractedDataFile(continuous_time_file)
```
#### File: components/epochs/fl_epochs.py
```python
from rec_to_nwb.processing.tools.validate_parameters import validate_parameters_equal_length
class FlEpochs:
def __init__(self, session_start_times, session_end_times, tags):
validate_parameters_equal_length(__name__, session_start_times, session_end_times, tags)
self.session_start_times = session_start_times
self.session_end_times = session_end_times
self.tags = tags
```
#### File: components/mda/mda_content.py
```python
class MdaContent:
def __init__(self, mda_data, mda_timestamps):
self.mda_data = mda_data
self.mda_timestamps = mda_timestamps
```
#### File: time/valid/fl_mda_valid_time_builder.py
```python
from rec_to_nwb.processing.nwb.components.mda.time.valid.fl_mda_valid_time import FlMdaValidTime
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class FlMdaValidTimeBuilder:
@staticmethod
@beartype
def build(start_time: float, stop_time: float):
return FlMdaValidTime(
start_time=start_time,
stop_time=stop_time
)
```
#### File: time/valid/fl_mda_valid_time_manager.py
```python
import numpy as np
from pynwb import NWBFile
from rec_to_nwb.processing.exceptions.missing_data_exception import MissingDataException
from rec_to_nwb.processing.nwb.components.mda.time.valid.fl_mda_valid_time_builder import FlMdaValidTimeBuilder
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.tools.get_times_period_multiplier import get_times_period_multiplier
class FlMdaValidTimeManager:
"""" Manage MDA data and call FLMdaValidTimeBuilder to create list of FLMdaValidTime objects.
Args:
sampling_rate (float): Sampling rate of MDA data
metadata (dict): Project metadata
Methods:
get_fl_mda_valid_times()
"""
@beartype
def __init__(self, sampling_rate: float, metadata: dict):
self.sampling_rate = sampling_rate
self.period_multiplier = get_times_period_multiplier(metadata)
@beartype
def get_fl_mda_valid_times(self, nwb_content: NWBFile, gaps_margin: float = 0.000001) -> list:
""" Manage MDA data and call FlMdaValidTimeBuilder for every valid gap.
Args:
nwb_content (NWBFile): NWBFile object with MDA timestamps inside
gaps_margin (float): Error margin for valid gaps
Raises:
MissingDataException: If timestamps are empty
Returns:
list of FlMdaValidTime objects
"""
timestamps = self.__get_mda_timestamps(nwb_content)
period = 1 / self.sampling_rate
valid_times = self.__get_mda_valid_times(timestamps, period, gaps_margin)
return self.__build_mda_valid_times(valid_times)
@staticmethod
def __get_mda_timestamps(nwb_content):
try:
timestamps = np.array(
nwb_content.acquisition['e-series'].timestamps)
except KeyError:
raise MissingDataException('MDA timestamps are not found')
if timestamps.any():
return timestamps
raise MissingDataException('MDA timestamps are not found')
def __get_mda_valid_times(self, timestamps, period, gaps_margin):
min_valid_len = 3 * gaps_margin
timestamps = timestamps[~np.isnan(timestamps)]
gaps = np.diff(timestamps) > period * self.period_multiplier
gap_indexes = np.asarray(np.where(gaps))
gap_start = np.insert(gap_indexes + 1, 0, 0)
gap_end = np.append(gap_indexes, np.asarray(len(timestamps) - 1))
valid_indices = np.vstack([gap_start, gap_end]).transpose()
valid_times = timestamps[valid_indices]
valid_times[:, 0] = valid_times[:, 0] + gaps_margin
valid_times[:, 1] = valid_times[:, 1] - gaps_margin
valid_intervals = [valid_time > min_valid_len for valid_time in valid_times[:, 1] - valid_times[:, 0]]
return valid_times[valid_intervals, :]
@staticmethod
def __build_mda_valid_times(valid_times):
return [FlMdaValidTimeBuilder.build(gap[0], gap[1]) for gap in valid_times]
```
#### File: components/position/fl_position.py
```python
class FlPosition:
def __init__(self, position_data, column_labels, timestamps, conversion):
self.position_data = position_data
self.column_labels = column_labels
self.timestamps = timestamps
self.conversion = conversion
```
#### File: components/position/pos_timestamp_manager.py
```python
import logging.config
import os
import pandas as pd
from rec_to_binaries.read_binaries import readTrodesExtractedDataFile
from rec_to_nwb.processing.nwb.common.timestamps_manager import TimestampManager
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class PosTimestampManager(TimestampManager):
def __init__(self, directories, continuous_time_directories,
convert_timestamps=True):
TimestampManager.__init__(self, directories, continuous_time_directories)
self.convert_timestamps = convert_timestamps
# override
def _get_timestamps(self, dataset_id):
pos_online = readTrodesExtractedDataFile(self.directories[dataset_id][0])
position = pd.DataFrame(pos_online['data'])
return position.time.to_numpy(dtype='int64')
def retrieve_real_timestamps(self, dataset_id):
return TimestampManager.retrieve_real_timestamps(self, dataset_id,
convert_timestamps=self.convert_timestamps)
```
#### File: time/invalid/fl_pos_invalid_time_builder.py
```python
from rec_to_nwb.processing.nwb.components.position.time.invalid.fl_pos_invalid_time import FlPosInvalidTime
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class FlPosInvalidTimeBuilder:
@staticmethod
@beartype
def build(start_time: float, stop_time: float):
return FlPosInvalidTime(
start_time=start_time,
stop_time=stop_time
)
```
#### File: time/invalid/pos_invalid_time_injector.py
```python
from pynwb.epoch import TimeIntervals
class PosInvalidTimeInjector:
def inject_all(self, valid_times, nwb_content):
intervals = TimeIntervals(
name='pos_invalid_times',
description='Invalid times based on pos timestamps',
)
for single_interval in valid_times:
self.inject(single_interval, intervals)
nwb_content.add_time_intervals(intervals)
@staticmethod
def inject(single_interval, intervals):
intervals.add_interval(
single_interval.start_time,
single_interval.stop_time
)
```
#### File: components/sample_count_timestamp_corespondence/sample_count_timestamp_corespondence_builder.py
```python
from pynwb import TimeSeries
class SampleCountTimestampCorespondenceBuilder:
def __init__(self, data):
self.data = data
def build(self):
return TimeSeries(name="sample_count",
description="acquisition system sample count",
data=self.data[:, 0],
timestamps=self.data[:, 1],
unit='int64'
)
```
#### File: video_files/camera_sample_frame_counts/camera_sample_frame_counts.py
```python
class CameraSampleFrameCounts:
def __init__(self, frame_count, timestamps):
self.frame_count = frame_count
self.timestamps = timestamps
```
#### File: components/video_files/video_files_injector.py
```python
from pynwb import NWBFile
from pynwb.behavior import BehavioralEvents
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class VideoFilesInjector:
@staticmethod
@beartype
def inject_all(nwb_content: NWBFile, image_series_list: list):
video = BehavioralEvents(name='video')
for image_series in image_series_list:
VideoFilesInjector.__add_single_image_series(video, image_series)
nwb_content.processing['video_files'].add(video)
@staticmethod
def __add_single_image_series(video, image_series):
video.add_timeseries(image_series)
return video
```
#### File: processing/tools/data_scanner.py
```python
import fnmatch
import os
from rec_to_nwb.processing.exceptions.missing_data_exception import MissingDataException
from rec_to_nwb.processing.metadata.metadata_manager import MetadataManager
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.tools.dataset import Dataset
from rec_to_nwb.processing.tools.file_sorter import FileSorter
class DataScanner:
@beartype
def __init__(self, data_path: str, animal_name: str, nwb_metadata: MetadataManager):
self.data_path = data_path
self.animal_name = animal_name
self.nwb_metadata = nwb_metadata
self.data = None
@beartype
def get_all_epochs(self, date: str) -> list:
all_datasets = []
directories = os.listdir(self.data_path + '/' + self.animal_name + '/preprocessing/' + date)
FileSorter.sort_filenames(directories)
for directory in directories:
if directory.startswith(date):
dataset_name = (directory.split('_')[2] + '_' + directory.split('_')[3]).split('.')[0]
if not dataset_name in all_datasets:
all_datasets.append(dataset_name)
return all_datasets
@beartype
def get_all_data_from_dataset(self, date: str) -> list:
self.__check_if_path_exists(self.data_path + '/' + self.animal_name + '/preprocessing/' + date)
return os.listdir(self.data_path + '/' + self.animal_name + '/preprocessing/' + date)
@beartype
def extract_data_from_date_folder(self, date: str):
self.data = {self.animal_name: self.__extract_experiments(self.data_path, self.animal_name, [date])}
@beartype
def extract_data_from_dates_folders(self, dates: list):
self.data = {self.animal_name: self.__extract_experiments(self.data_path, self.animal_name, dates)}
def extract_data_from_all_dates_folders(self):
self.data = {self.animal_name: self.__extract_experiments(self.data_path, self.animal_name, None)}
def __extract_experiments(self, data_path, animal_name, dates):
preprocessing_path = data_path + animal_name + '/preprocessing'
if not dates:
dates = FileSorter.sort_filenames(os.listdir(preprocessing_path))
return {date: self.__extract_datasets(preprocessing_path + '/' + date) for date in dates}
@staticmethod
def __extract_datasets(date_path):
existing_datasets = set()
datasets = {}
directories = FileSorter.sort_filenames(os.listdir(date_path))
for directory in directories:
dir_split = directory.split('_')
if dir_split[0].isdigit():
dir_last_part = dir_split.pop().split('.')
dataset_name = dir_split.pop() + '_' + dir_last_part[0]
if not (dataset_name in existing_datasets):
datasets[dataset_name] = Dataset(dataset_name)
existing_datasets.add(dataset_name)
for dataset in datasets.values():
if dataset_name == dataset.name:
dataset.add_data_to_dataset(date_path + '/' + directory + '/', dir_last_part.pop())
return datasets
@beartype
def get_all_animals(self) -> list:
return list(self.data.keys())
@beartype
def get_all_experiment_dates(self, animal: str) -> list:
return list(self.data[animal].keys())
@beartype
def get_all_datasets(self, animal: str, date: str) -> list:
return list(self.data[animal][date].keys())
@beartype
def get_mda_timestamps(self, animal: str, date: str, dataset: str):
for file in self.data[animal][date][dataset].get_all_data_from_dataset('mda'):
if file.endswith('timestamps.mda'):
return self.data[animal][date][dataset].get_data_path_from_dataset('mda') + file
return None
@staticmethod
@beartype
def get_probes_from_directory(path: str):
probes = []
files = FileSorter.sort_filenames(os.listdir(path))
for probe_file in files:
if fnmatch.fnmatch(probe_file, "probe*.yml"):
probes.append(path + '/' + probe_file)
return probes
def __check_if_path_exists(self, path):
if not (os.path.exists(path)):
raise MissingDataException('missing ' + self.data_path + ' directory')
```
#### File: processing/tools/get_times_period_multiplier.py
```python
from rec_to_nwb.processing.tools.beartype.beartype import beartype
def get_times_period_multiplier(metadata):
times_period_multiplier = metadata.get('times_period_multiplier', '1.5')
return return_validated_period(times_period_multiplier)
@beartype
def return_validated_period(period: (int, float, str)) -> float:
return float(period)
```
#### File: processing/validation/ntrode_validator.py
```python
import logging.config
import os
from rec_to_nwb.processing.exceptions.invalid_header_exception import InvalidHeaderException
from rec_to_nwb.processing.exceptions.invalid_metadata_exception import InvalidMetadataException
from rec_to_nwb.processing.header.module.header import Header
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.tools.count_electrodes_in_ntrode import count_electrodes_in_ntrode
from rec_to_nwb.processing.tools.count_electrodes_in_probe import count_electrodes_in_probe
from rec_to_nwb.processing.tools.filter_probe_by_type import filter_probe_by_type
from rec_to_nwb.processing.validation.ntrode_validation_summary import NTrodeValidationSummary
from rec_to_nwb.processing.validation.validator import Validator
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class NTrodeValidator(Validator):
@beartype
def __init__(self, metadata: dict, header: Header, probes_metadata: list):
self.metadata = metadata
self.header = header
self.probes_metadata = probes_metadata
def create_summary(self):
ntrodes = self.metadata['ntrode electrode group channel map']
if len(ntrodes) == 0:
raise InvalidMetadataException("There are no ntrodes defined in metadata.yml file.")
if self.header is None or \
self.header.configuration.spike_configuration is None or \
self.header.configuration.spike_configuration.spike_n_trodes is None:
raise InvalidHeaderException("Rec header does not contain spike_n_trodes data")
spike_ntrodes = self.header.configuration.spike_configuration.spike_n_trodes
ntrodes_num = len(ntrodes)
spike_ntrodes_num = len(spike_ntrodes)
self.validate_ntrode_metadata_with_probe_metadata(self.metadata, self.probes_metadata)
return NTrodeValidationSummary(ntrodes_num, spike_ntrodes_num)
@staticmethod
def validate_ntrode_metadata_with_probe_metadata(metadata, probes_metadata):
for electrode_group in metadata['electrode groups']:
probe_metadata = filter_probe_by_type(probes_metadata, electrode_group['device_type'])
electrodes_in_probe = count_electrodes_in_probe(probe_metadata)
electrodes_in_group = count_electrodes_in_ntrode(
metadata['ntrode electrode group channel map'],
electrode_group['id']
)
if electrodes_in_probe != electrodes_in_group:
raise InvalidMetadataException(
'Ntrode definition in metadata is not compatible with probe schema.' +
'Probe_type: ' + str(electrode_group['device_type']) +
' electrodes in this probe_type: ' + str(electrodes_in_probe) +
'. Ntrode_metadata for electrode_group of id: ' + str(electrode_group['id']) +
' electrodes in this electrode_group: ' + str(electrodes_in_group)
)
```
#### File: processing/validation/validation_registrator.py
```python
from rec_to_nwb.processing.exceptions.invalid_input_exception import InvalidInputException
from rec_to_nwb.processing.validation.validator import Validator
class ValidationRegistrator(Validator):
def __init__(self):
self.validators = []
def register(self, validator):
if isinstance(validator, Validator):
self.validators.append(validator)
def validate(self):
for validator in self.validators:
result = validator.create_summary()
if not result.is_valid:
raise InvalidInputException("Validation: " + str(type(validator)) + "has failed!")
```
#### File: processing/validation/xml_files_validation.py
```python
import os
from rec_to_nwb.processing.exceptions.missing_data_exception import MissingDataException
from rec_to_nwb.processing.validation.validation_registrator import ValidationRegistrator
from rec_to_nwb.processing.validation.xml_files_validation_summary import XmlFilesValidationSummary
class XmlFilesValidator(ValidationRegistrator):
def __init__(self, path):
self.path = path
def create_summary(self):
if not os.path.exists(self.path):
raise MissingDataException('xml file ' + self.path + ' does not exist!')
return XmlFilesValidationSummary()
```
#### File: e2etests/dio/test_dioManager.py
```python
import os
import unittest
from unittest.mock import Mock, patch
import pandas as pd
import numpy as np
from pandas import array
from rec_to_nwb.processing.nwb.components.dio.dio_extractor import DioExtractor
from rec_to_nwb.processing.nwb.components.dio.dio_manager import DioManager
path = os.path.dirname(os.path.abspath(__file__))
@unittest.skip('Need preprocessed .dat files')
class TestDioManager(unittest.TestCase):
@staticmethod
def fake_extract_dio_for_single_dataset(*args, **kwargs):
return {
'Din1': [
array(
[0.01919192, 0.07474747, 0.08989899, 0.05454545, 0.06767677,
0.03232323, 0.05050505, 0.04141414, 0.05555555, 0.02323232,
0.02121212, 0.03131313, 0.04040404, 0.06666667, 0.04848485]
),
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
],
'Din2': [
array(
[0.01919192]
),
[0]
]
}
@classmethod
@patch.object(DioExtractor, 'extract_dio_for_single_dataset', new=fake_extract_dio_for_single_dataset)
def setUpClass(cls):
dio_metadata = [
{'name': 'Din1', 'description': 'Poke1'},
{'name': 'Din2', 'description': 'Poke2'}
]
dio_files = [
{
'Din1': path + '/../../test_data/beans/preprocessing/20190718/20190718_beans_01_s1.DIO//20190718_beans_01_s1.dio_Din1.dat',
'Din2': path + '/../../test_data/beans/preprocessing/20190718/20190718_beans_01_s1.DIO//20190718_beans_01_s1.dio_Din2.dat',
},
{
'Din1': path + '/../../test_data/beans/preprocessing/20190718/20190718_beans_01_s1.DIO//20190718_beans_01_s1.dio_Din1.dat',
'Din2': path + '/../../test_data/beans/preprocessing/20190718/20190718_beans_01_s1.DIO//20190718_beans_01_s1.dio_Din2.dat',
},
]
cls.dio_manager = DioManager(
dio_files=dio_files,
dio_metadata=dio_metadata,
continuous_time_files='mocked'
)
cls.din_1_array = pd.array(
[1367266, 9599570, 9603169, 9610303, 9612481,
9619154, 9619802, 9627552, 9641056, 9643239,
9644490, 9644629, 9645544, 9645721, 9646074]
)
cls.din_1_list = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
cls.din_2_array = pd.array([1367266])
cls.din_2_list = [0]
cls.dio_manager.dio_extractor = Mock(spec=DioExtractor)
cls.dio_manager.dio_extractor.extract_dio_for_single_dataset.return_value = {
'Din1': [cls.din_1_array, cls.din_1_list],
'Din2': [cls.din_2_array, cls.din_2_list],
}
cls.dio = cls.dio_manager.get_dio()
def test_get_dio_returnCorrectType_true(self):
self.assertIsInstance(self.dio, dict)
self.assertIsInstance(self.dio['Din1'][0], np.ndarray)
self.assertIsInstance(self.dio['Din1'][1], list)
self.assertIsInstance(self.dio['Din2'][0], np.ndarray)
self.assertIsInstance(self.dio['Din2'][1], list)
def test_get_dio_returnCorrectValue_true(self):
self.assertEqual(
self.dio,
{
'Din1': [
array([
1367266, 9599570, 9603169, 9610303, 9612481, 9619154, 9619802,
9627552, 9641056, 9643239, 9644490, 9644629, 9645544, 9645721,
9646074, 1367266, 9599570, 9603169, 9610303, 9612481, 9619154,
9619802, 9627552, 9641056, 9643239, 9644490, 9644629, 9645544,
9645721, 9646074
]),
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]],
'Din2': [
array([1367266, 1367266]),
[0, 0]
]
}
)
self.assertEqual(
self.dio['Din1'][0],
array([
1367266, 9599570, 9603169, 9610303, 9612481, 9619154, 9619802,
9627552, 9641056, 9643239, 9644490, 9644629, 9645544, 9645721,
9646074, 1367266, 9599570, 9603169, 9610303, 9612481, 9619154,
9619802, 9627552, 9641056, 9643239, 9644490, 9644629, 9645544,
9645721, 9646074
])
)
self.assertEqual(
self.dio['Din1'][1],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
)
self.assertEqual(
self.dio['Din2'][0],
array([1367266, 1367266])
)
self.assertEqual(
self.dio['Din2'][1],
[0, 0]
)
def test_get_dio_returnCorrectShape_true(self):
self.assertEqual(len(self.dio), 2)
self.assertEqual(len(self.dio['Din1']), 2)
self.assertEqual(self.dio['Din1'][0].shape, (30,))
self.assertEqual(len(self.dio['Din1'][1]), 30)
self.assertEqual(len(self.dio['Din2']), 2)
self.assertEqual(self.dio['Din2'][0].shape, (2,))
self.assertEqual(len(self.dio['Din2'][0]), 2)
self.assertEqual(len(self.dio['Din2'][1]), 2)
```
#### File: test/e2etests/test_headerComparator.py
```python
import os
import unittest
from rec_to_nwb.processing.header.header_checker.header_comparator import HeaderComparator
path = os.path.dirname(os.path.abspath(__file__))
class TestHeaderComparator(unittest.TestCase):
@unittest.skip('test need to create files localy not working on travis')
class TestHeaderReader(unittest.TestCase):
def setUp(self):
with open(path + '/../processing/res/test_xmls/test1.xml', 'w') as xml_1:
xml_1.write('<string_string_string/>')
with open(path + '/../processing//res/test_xmls/test2.xml', 'w') as xml_2:
xml_2.write('<random_test_strings/>')
with open(path + '/../processing//res/test_xmls/test3.xml', 'w') as xml_3:
xml_3.write('<some_content/>')
self.header_comparator = HeaderComparator(
[path + '/../processing/res/test_xmls/test1.xml',
path + '/../processing/res/test_xmls/test2.xml',
path + '/../processing/res/test_xmls/test3.xml']
)
def test_comparing_headers(self):
headers_difference = self.header_comparator.compare()
self.assertNotEqual([], headers_difference)
```
#### File: test/e2etests/test_positionIntegration.py
```python
import os
import unittest
from pynwb.behavior import Position
from rec_to_nwb.processing.nwb.components.iterator.multi_thread_data_iterator import MultiThreadDataIterator
from rec_to_nwb.processing.nwb.components.position.fl_position_manager import FlPositionManager
from rec_to_nwb.processing.nwb.components.position.position_creator import PositionCreator
from rec_to_nwb.processing.tools.dataset import Dataset
path = os.path.dirname(os.path.abspath(__file__))
@unittest.skip("test requires continuoustime.dat file and can't be used on travis")
class TestPositionIntegration(unittest.TestCase):
@staticmethod
def create_test_dataset():
dataset = Dataset('test_dataset')
dataset.add_data_to_dataset(path + '/../processing/res/pos_test/', 'pos')
dataset.add_data_to_dataset(path + '/../test_data/beans/preprocessing/20190718/20190718_beans_01_s1.time/',
'time')
return dataset
def test_position_extractor_reading_data_successfully(self):
dataset = self.create_test_dataset()
fl_position_manager = FlPositionManager(
datasets=[dataset],
metadata={
'cameras': [
{'id': '0', 'meters_per_pixel': '0.02'},
{'id': '1', 'meters_per_pixel': '0.03'},
{'id': '2', 'meters_per_pixel': '0.5'},
],
'tasks': [
{"task_name": "Sleep", "task_description": "The animal sleeps in a small empty box.",
'camera_id': ['0'], 'task_epochs': ['1', '3', '5']}
]
},
dataset_names=['01_s1']
)
position_creator = PositionCreator()
fl_positions = fl_position_manager.get_fl_positions()
positions = position_creator.create_all(fl_positions)
self.assertIsInstance(positions, Position)
self.assertEqual(positions.spatial_series['series_0'].conversion, 0.02)
self.assertIsInstance(positions.spatial_series['series_0'].data, MultiThreadDataIterator)
self.assertEqual(positions.spatial_series['series_0'].data.shape, [32658, 4])
self.assertEqual(positions.spatial_series['series_0'].timestamps_unit, 'seconds')
self.assertEqual(positions.spatial_series['series_0'].unit, 'meters')
```
#### File: test/e2etests/test_rawToNwbGeneration.py
```python
import os
import unittest
from testfixtures import should_raise
from rec_to_nwb.processing.builder.raw_to_nwb_builder import RawToNWBBuilder
from rec_to_nwb.processing.metadata.metadata_manager import MetadataManager
path = os.path.dirname(os.path.abspath(__file__))
_DEFAULT_TRODES_REC_EXPORT_ARGS = ('-reconfig', 'C:/Users/wmery/PycharmProjects/rec_to_nwb/rec_to_nwb/test/test_data/KF2/raw/20170120/kf2_reconfig.xml')
<EMAIL>("Super heavy RAW to NWB Generation")
class TestRawToNWBGeneration(unittest.TestCase):
def setUp(self):
self.metadata = MetadataManager(
'C:/Users/wmery/PycharmProjects/rec_to_nwb/rec_to_nwb/test/test_data/KF2/raw/20170120/kibbles20170216_metadata.yml',
[
'C:/Users/wmery/PycharmProjects/rec_to_nwb/rec_to_nwb/test/test_data/KF2/raw/20170120/64c-3s6mm6cm-20um-40um-sl.yml',
'C:/Users/wmery/PycharmProjects/rec_to_nwb/rec_to_nwb/test/test_data/KF2/raw/20170120/64c-4s6mm6cm-20um-40um-dl.yml'
])
self.builder = RawToNWBBuilder(
animal_name='KF2',
data_path=str(path) + '/../test_data/',
dates=['20170120'],
nwb_metadata=self.metadata,
output_path='',
video_path=str(path) + '/../test_data',
extract_spikes=False,
extract_mda=True,
extract_lfps=False,
extract_analog=True,
extract_dio=True,
overwrite=True,
trodes_rec_export_args=_DEFAULT_TRODES_REC_EXPORT_ARGS
)
def test_from_raw_to_nwb_generation(self):
self.builder.build_nwb(
process_mda_valid_time=False,
process_mda_invalid_time=False,
process_pos_valid_time=False,
process_pos_invalid_time=False
)
self.assertTrue(os.path.exists('beans20190718.nwb'), 'NWBFile did not build')
@should_raise(TypeError)
def test_raw_to_nwb_builder_failed_due_to_none_parameters(self):
RawToNWBBuilder(
animal_name='beans',
data_path=str(path) + '/../test_data/',
dates=['20190718'],
nwb_metadata=None,
)
@should_raise(TypeError)
def test_raw_to_nwb_builder_failed_due_to_incorrect_type_parameters(self):
RawToNWBBuilder(
animal_name=11111,
data_path=str(path) + '/../test_data/',
dates=['20190718'],
nwb_metadata=self.metadata,
output_path='',
extract_spikes=False,
extract_mda=True,
extract_lfps=False,
extract_analog=True,
extract_dio=True,
overwrite=True,
trodes_rec_export_args=_DEFAULT_TRODES_REC_EXPORT_ARGS
)
# def tearDown(self):
# self.builder.cleanup()
```
#### File: device/camera/test_flCameraDeviceManager.py
```python
from unittest import TestCase
from testfixtures import should_raise
from rec_to_nwb.processing.exceptions.none_param_exception import NoneParamException
from rec_to_nwb.processing.nwb.components.device.camera.fl_camera_device import FlCameraDevice
from rec_to_nwb.processing.nwb.components.device.camera.fl_camera_device_manager import FlCameraDeviceManager
class TestFlCameraDeviceManager(TestCase):
def test_fl_camera_device_manager_created_successfully(self):
metadata = {
'cameras': [
{'id': 0, 'meters_per_pixel': 0.02, 'manufacturer': 'novela', 'model': 'gt500', 'lens': '500dpt', 'camera_name': 'john'},
{'id': 1, 'meters_per_pixel': 3, 'manufacturer': 'novela', 'model': 'gt400', 'lens': '400dpt', 'camera_name': 'josh'},
{'id': 2, 'meters_per_pixel': '0.05', 'manufacturer': 'novela', 'model': 'gt300', 'lens': '300dpt', 'camera_name': 'joe'},
]
}
fl_camera_device_manager = FlCameraDeviceManager(metadata)
fl_camera_devices = fl_camera_device_manager.get_fl_device_manager()
self.assertIsInstance(fl_camera_devices, list)
self.assertIsInstance(fl_camera_devices[0], FlCameraDevice)
self.assertEqual(fl_camera_devices[0].name, 'camera_device 0')
self.assertEqual(fl_camera_devices[0].meters_per_pixel, 0.02)
self.assertEqual(fl_camera_devices[1].name, 'camera_device 1')
self.assertEqual(fl_camera_devices[1].meters_per_pixel, 3.0)
self.assertEqual(fl_camera_devices[2].name, 'camera_device 2')
self.assertEqual(fl_camera_devices[2].meters_per_pixel, 0.05)
@should_raise(NoneParamException)
def test_fl_camera_device_manager_failed_due_to_none_key_in_metadata(self):
metadata = {
'cameras': [
{'id': 0, 'meters_per_pixel': 0.02},
{'id': 1, 'meters_per_pixel': 3},
{'id': 2},
]
}
fl_camera_device_manager = FlCameraDeviceManager(metadata)
fl_camera_device_manager.get_fl_device_manager()
@should_raise(TypeError)
def test_fl_camera_device_manager_failed_init_due_to_none_param(self):
FlCameraDeviceManager(None)
```
#### File: processing/dio/test_dioBuilder.py
```python
from unittest import TestCase
from rec_to_nwb.processing.nwb.components.dio.dio_builder import DioBuilder
class TestDioBuilder(TestCase):
def setUp(self):
self.data = {
'Din1': [[1, 1, 1, 1], [11, 11, 11, 11]],
'Din2': [[2, 2, 2], [22, 22, 22]]}
self.metadata = [
{'name': 'poke1', 'description': 'Din1'},
{'name': 'poke2', 'description': 'Din2'}]
def test_build(self):
dio_builder = DioBuilder(data=self.data,
dio_metadata=self.metadata,
unit='um'
)
behavioral_events = dio_builder.build()
self.assertEqual(2, len(behavioral_events.time_series))
self.assertEqual([11, 11, 11, 11], behavioral_events.time_series['poke1'].data)
self.assertEqual('Din2', behavioral_events.time_series['poke2'].description)
```
#### File: processing/electrode_group/test_electrodeGroupFactory.py
```python
from unittest import TestCase
from unittest.mock import Mock
from ndx_franklab_novela.probe import Probe
from pynwb.device import Device
from testfixtures import should_raise
from rec_to_nwb.processing.exceptions.none_param_exception import NoneParamException
from rec_to_nwb.processing.nwb.components.electrode_group.electrode_group_factory import ElectrodeGroupFactory
from rec_to_nwb.processing.nwb.components.electrode_group.fl_electrode_group import FlElectrodeGroup
from rec_to_nwb.processing.nwb.components.electrode_group.fl_nwb_electrode_group import FlNwbElectrodeGroup
class TestElectrodeGroupFactory(TestCase):
def test_electrode_group_factory_create_ElectrodeGroup_successfully(self):
mock_probe = Mock(spec=Probe)
mock_device = Mock(spec=Device)
mock_fl_electrode_group_1 = Mock(spec=FlElectrodeGroup)
mock_fl_electrode_group_1.name = '0'
mock_fl_electrode_group_1.description = 'ElectrodeGroup 1'
mock_fl_electrode_group_1.location = 'mPFC'
mock_fl_electrode_group_1.device = mock_probe
mock_fl_electrode_group_2 = Mock(spec=FlElectrodeGroup)
mock_fl_electrode_group_2.name = '1'
mock_fl_electrode_group_2.description = 'ElectrodeGroup 2'
mock_fl_electrode_group_2.location = 'mPFC'
mock_fl_electrode_group_2.device = mock_device
electrode_group_1 = ElectrodeGroupFactory.create_electrode_group(mock_fl_electrode_group_1)
electrode_group_2 = ElectrodeGroupFactory.create_electrode_group(mock_fl_electrode_group_2)
self.assertIsNotNone(electrode_group_1)
self.assertIsNotNone(electrode_group_2)
self.assertEqual(electrode_group_1.name, "0")
self.assertEqual(electrode_group_1.description, 'ElectrodeGroup 1')
self.assertEqual(electrode_group_1.location, 'mPFC')
self.assertEqual(electrode_group_1.device, mock_probe)
self.assertEqual(electrode_group_2.name, '1')
self.assertEqual(electrode_group_2.description, 'ElectrodeGroup 2')
self.assertEqual(electrode_group_2.location, 'mPFC')
self.assertEqual(electrode_group_2.device, mock_device)
def test_electrode_group_factory_create_NwbElectrodeGroup_successfully(self):
mock_probe = Mock(spec=Probe)
mock_device = Mock(spec=Device)
mock_fl_nwb_electrode_group_1 = Mock(spec=FlNwbElectrodeGroup)
mock_fl_nwb_electrode_group_1.name = '0'
mock_fl_nwb_electrode_group_1.description = 'ElectrodeGroup 1'
mock_fl_nwb_electrode_group_1.location = 'mPFC'
mock_fl_nwb_electrode_group_1.device = mock_probe
mock_fl_nwb_electrode_group_1.targeted_location = 'Sample location'
mock_fl_nwb_electrode_group_1.targeted_x = 0.0
mock_fl_nwb_electrode_group_1.targeted_y = 0.0
mock_fl_nwb_electrode_group_1.targeted_z = 0.0
mock_fl_nwb_electrode_group_1.units = 'um'
mock_fl_nwb_electrode_group_2 = Mock(spec=FlNwbElectrodeGroup)
mock_fl_nwb_electrode_group_2.name = '1'
mock_fl_nwb_electrode_group_2.description = 'ElectrodeGroup 2'
mock_fl_nwb_electrode_group_2.location = 'mPFC'
mock_fl_nwb_electrode_group_2.device = mock_device
mock_fl_nwb_electrode_group_2.targeted_location = 'Sample location'
mock_fl_nwb_electrode_group_2.targeted_x = 0.0
mock_fl_nwb_electrode_group_2.targeted_y = 0.0
mock_fl_nwb_electrode_group_2.targeted_z = 0.0
mock_fl_nwb_electrode_group_2.units = 'mm'
nwb_electrode_group_1 = ElectrodeGroupFactory.create_nwb_electrode_group(mock_fl_nwb_electrode_group_1)
nwb_electrode_group_2 = ElectrodeGroupFactory.create_nwb_electrode_group(mock_fl_nwb_electrode_group_2)
self.assertIsNotNone(nwb_electrode_group_1)
self.assertIsNotNone(nwb_electrode_group_2)
self.assertEqual(nwb_electrode_group_1.name, "0")
self.assertEqual(nwb_electrode_group_1.description, 'ElectrodeGroup 1')
self.assertEqual(nwb_electrode_group_1.location, 'mPFC')
self.assertEqual(nwb_electrode_group_1.device, mock_probe)
self.assertEqual(nwb_electrode_group_1.targeted_location, 'Sample location')
self.assertEqual(nwb_electrode_group_1.targeted_x, 0.0)
self.assertEqual(nwb_electrode_group_1.targeted_y, 0.0)
self.assertEqual(nwb_electrode_group_1.targeted_z, 0.0)
self.assertEqual(nwb_electrode_group_1.units, 'um')
self.assertEqual(nwb_electrode_group_2.name, '1')
self.assertEqual(nwb_electrode_group_2.description, 'ElectrodeGroup 2')
self.assertEqual(nwb_electrode_group_2.location, 'mPFC')
self.assertEqual(nwb_electrode_group_2.device, mock_device)
self.assertEqual(nwb_electrode_group_2.targeted_location, 'Sample location')
self.assertEqual(nwb_electrode_group_2.targeted_x, 0.0)
self.assertEqual(nwb_electrode_group_2.targeted_y, 0.0)
self.assertEqual(nwb_electrode_group_2.targeted_z, 0.0)
self.assertEqual(nwb_electrode_group_2.units, 'mm')
@should_raise(TypeError)
def test_electrode_group_factory_failed_creating_ElectrodeGroup_due_to_lack_of_FLElectrodeGroup(self):
ElectrodeGroupFactory.create_electrode_group(None)
@should_raise(NoneParamException)
def test_electrode_group_factory_failed_creating_ElectrodeGroup_due_to_lack_of_FlElectrodeGroup_attr(self):
mock_fl_electrode_group_1 = Mock(spec=FlElectrodeGroup)
mock_fl_electrode_group_1.name = 'ElectrodeGroup 1'
mock_fl_electrode_group_1.description = 'sample desciption 1'
mock_fl_electrode_group_1.location = 'sample location 1'
mock_fl_electrode_group_1.device = None
ElectrodeGroupFactory.create_electrode_group(mock_fl_electrode_group_1)
@should_raise(TypeError)
def test_electrode_group_factory_failed_creating_NwbElectrodeGroup_due_to_lack_of_FLNwbElectrodeGroup(self):
ElectrodeGroupFactory.create_nwb_electrode_group(None)
@should_raise(NoneParamException)
def test_electrode_group_factory_failed_creating_NwbElectrodeGroup_due_to_lack_of_FlNwbElectrodeGroup_attr(self):
mock_fl_nwb_electrode_group_1 = Mock(spec=FlNwbElectrodeGroup)
mock_fl_nwb_electrode_group_1.name = 'ElectrodeGroup 1'
mock_fl_nwb_electrode_group_1.description = 'sample desciption 1'
mock_fl_nwb_electrode_group_1.location = 'sample location 1'
mock_fl_nwb_electrode_group_1.device = None
mock_fl_nwb_electrode_group_1.targeted_location = None
mock_fl_nwb_electrode_group_1.targeted_x = None
mock_fl_nwb_electrode_group_1.targeted_y = None
mock_fl_nwb_electrode_group_1.targeted_z = None
mock_fl_nwb_electrode_group_1.units = None
ElectrodeGroupFactory.create_nwb_electrode_group(mock_fl_nwb_electrode_group_1)
```
#### File: time/valid/test_flMdaValidTimeManager.py
```python
from unittest import TestCase
from unittest.mock import MagicMock
import numpy as np
from pynwb import NWBFile
from testfixtures import should_raise
from rec_to_nwb.processing.exceptions.missing_data_exception import MissingDataException
from rec_to_nwb.processing.nwb.components.mda.time.valid.fl_mda_valid_time_manager import FlMdaValidTimeManager
class TestMdaValidTimeManager(TestCase):
def test_fl_mda_valid_time_manager_not_initialized_due_to_None_param(self):
with self.assertRaises(TypeError):
FlMdaValidTimeManager(None)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_in_middle(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 2, 3, 4, 5, 7, 9, 10, 11, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 2)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 4.9999)
self.assertEqual(round(fl_mda_valid_times[1].start_time, 4), 9.0001)
self.assertEqual(round(fl_mda_valid_times[1].stop_time, 4), 11.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_without_gap(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 9.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_at_start(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 5.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 11.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_at_end(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10, ])
array = [1, 2, 3, 4, 5, 6, 7, 8, 10, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 7.9999)
@should_raise(TypeError)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_failed_due_to_None_param(self):
gaps_margin = 0.0001
sampling_rate = 1.0
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=None,
gaps_margin=gaps_margin
)
@should_raise(MissingDataException)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_failed_due_to_lack_of_timestamps(self):
gaps_margin = 0.0001
sampling_rate = 1.0
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = None
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
```
#### File: test/processing/test_headerView.py
```python
import os
from unittest import TestCase
from rec_to_nwb.processing.header.module import header
path = os.path.dirname(os.path.abspath(__file__))
class TestHeaderInterface(TestCase):
@classmethod
def setUpClass(cls):
cls.header = header.Header(path + '/res/fl_lab_sample_header.xml')
def test_configuration_tag(self):
configuration = self.header.configuration
self.assertIsNotNone(configuration)
self.assertEqual('Configuration', configuration.tag)
def test_module_configuration_tag(self):
module_configuration = self.header.configuration.module_configuration
self.assertIsNotNone(module_configuration)
self.assertEqual('ModuleConfiguration', module_configuration.tag)
self.assertEqual(2, len(module_configuration.single_module_configurations))
def test_single_module_configuration_tag(self):
single_module_configurations = self.header.configuration.module_configuration.single_module_configurations
self.assertIsNotNone(single_module_configurations)
self.assertEqual('SingleModuleConfiguration', single_module_configurations[1].tag)
self.assertEqual('1', single_module_configurations[0].send_trodes_config)
self.assertEqual('./stateScript', single_module_configurations[1].module_name)
self.assertEqual('1', single_module_configurations[1].send_network_info)
def test_argument_tag(self):
arguments = self.header.configuration.module_configuration.single_module_configurations[1].arguments
self.assertIsNotNone(arguments)
self.assertEqual('Argument', arguments[1].tag)
self.assertEqual('-suppressUpdates', arguments[0].flag)
self.assertEqual('4', arguments[1].value)
def test_global_configuration_tag(self):
global_configuration = self.header.configuration.global_configuration
self.assertIsNotNone(global_configuration)
self.assertEqual('GlobalConfiguration', global_configuration.tag)
self.assertEqual('00401 00003', global_configuration.headstage_serial)
self.assertEqual('0', global_configuration.headstage_smart_ref_on)
self.assertEqual('0', global_configuration.realtime_mode)
self.assertEqual('0', global_configuration.headstage_auto_settle_on)
self.assertEqual('38699433', global_configuration.timestamp_at_creation)
self.assertEqual('2.2', global_configuration.controller_firmware_version)
self.assertEqual('65535 65535', global_configuration.controller_serial)
self.assertEqual('1', global_configuration.save_displayed_chan_only)
self.assertEqual('3.9', global_configuration.headstage_firmware_version)
self.assertEqual('5.9.8', global_configuration.qt_version)
self.assertEqual('May 16 2019', global_configuration.compile_date)
self.assertEqual('10:32:19', global_configuration.compile_time)
self.assertEqual('myAnimal', global_configuration.file_prefix)
self.assertEqual('0', global_configuration.headstage_gyro_sensor_on)
self.assertEqual('0', global_configuration.headstage_mag_sensor_on)
self.assertEqual('1.8.2', global_configuration.trodes_version)
self.assertEqual('0', global_configuration.headstage_accel_sensor_on)
self.assertEqual('heads/Release_1.8.2-0-g9a3e37c', global_configuration.commit_head)
self.assertEqual('', global_configuration.file_path)
self.assertEqual('1563323368633', global_configuration.system_time_at_creation)
def test_hardware_configuration_tag(self):
hardware_configuration = self.header.configuration.hardware_configuration
self.assertIsNotNone(hardware_configuration)
self.assertEqual('HardwareConfiguration', hardware_configuration.tag)
self.assertEqual('30000', hardware_configuration.sampling_rate)
self.assertEqual('128', hardware_configuration.num_channels)
def test_device_tag(self):
devices = self.header.configuration.hardware_configuration.devices
self.assertIsNotNone(devices)
self.assertEqual('Device', devices[0].tag)
self.assertEqual('1', devices[0].num_bytes)
self.assertEqual('1', devices[1].available)
self.assertEqual('MCU_IO', devices[0].name)
self.assertEqual('10', devices[0].packet_order_preference)
def test_channel_tag(self):
channels = self.header.configuration.hardware_configuration.devices[0].channels
self.assertIsNotNone(channels)
self.assertEqual('Channel', channels[1].tag)
self.assertEqual('MCU_Din1', channels[0].id)
self.assertEqual('0', channels[0].bit)
self.assertEqual('0', channels[0].start_byte)
self.assertEqual('1', channels[0].input)
self.assertEqual('digital', channels[0].data_type)
def test_stream_display_tag(self):
stream_display = self.header.configuration.stream_display
self.assertIsNotNone(stream_display)
self.assertEqual('StreamDisplay', stream_display.tag)
self.assertEqual('#030303', stream_display.background_color)
self.assertEqual('2', stream_display.columns)
self.assertEqual('2', stream_display.pages)
def test_spike_configuration_tag(self):
spike_configuration = self.header.configuration.spike_configuration
self.assertIsNotNone(spike_configuration)
self.assertEqual('SpikeConfiguration', spike_configuration.tag)
self.assertEqual('', spike_configuration.categories)
def test_aux_display_configuration_tag(self):
aux_display_configuration = self.header.configuration.aux_display_configuration
self.assertIsNotNone(aux_display_configuration)
self.assertEqual('AuxDisplayConfiguration', aux_display_configuration.tag)
def test_disp_channel_tag(self):
disp_channels = self.header.configuration.aux_display_configuration.disp_channels
self.assertIsNotNone(disp_channels)
self.assertEqual('DispChannel', disp_channels[1].tag)
self.assertEqual('1', disp_channels[0].analyze)
self.assertEqual('2', disp_channels[1].max_disp)
self.assertEqual('#aaaaaa', disp_channels[1].color)
self.assertEqual('Din2', disp_channels[1].id)
self.assertEqual('ECU', disp_channels[0].device)
def test_spike_n_trode_tag(self):
spike_n_trode = self.header.configuration.spike_configuration.spike_n_trodes
self.assertIsNotNone(spike_n_trode)
self.assertEqual('SpikeNTrode', spike_n_trode[0].tag)
self.assertEqual('300', spike_n_trode[1].low_filter)
self.assertEqual('1', spike_n_trode[1].lfp_chan)
self.assertEqual('1', spike_n_trode[1].lfp_filter_on)
self.assertEqual('0', spike_n_trode[1].ref_group)
self.assertEqual('0', spike_n_trode[1].group_ref_on)
self.assertEqual('400', spike_n_trode[1].lfp_high_filter)
self.assertEqual('6000', spike_n_trode[1].hight_filter)
self.assertEqual('#24c600', spike_n_trode[1].color)
self.assertEqual('1', spike_n_trode[1].ref_chan)
self.assertEqual('2', spike_n_trode[1].id)
self.assertEqual('0', spike_n_trode[1].lfp_ref_on)
self.assertEqual('1', spike_n_trode[1].filter_on)
self.assertEqual('0', spike_n_trode[1].ref_on)
self.assertEqual('0', spike_n_trode[1].module_data_on)
self.assertEqual('1', spike_n_trode[1].ref_n_trode_id)
def test_spike_channel_tag(self):
spike_channels = \
self.header.configuration.spike_configuration.spike_n_trodes[1].spike_channels
self.assertIsNotNone(spike_channels)
self.assertEqual('SpikeChannel', spike_channels[0].tag)
self.assertEqual('178', spike_channels[0].hw_chan)
self.assertEqual('225', spike_channels[0].max_disp)
self.assertEqual('10', spike_channels[0].thresh)
self.assertEqual('1', spike_channels[0].trigger_on)
```
#### File: processing/tools/test_beartype.py
```python
from unittest import TestCase
import typing
import pytest
from testfixtures import should_raise
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class TestBearyype(TestCase):
def test_beartype_noop(self) -> None:
"""
Test bear typing of a function with no function annotations, reducing to
_no_ type checking.
"""
# Unannotated function to be type checked.
@beartype
def khorne(gork, mork):
return gork + mork
# Call this function and assert the expected return value.
assert khorne('WAAAGH!', '!HGAAAW') == 'WAAAGH!!HGAAAW'
# ....................{ TESTS ~ pass : param }....................
def test_beartype_pass_param_keyword_and_positional(self) -> None:
"""
Test bear typing of a function call successfully passed both annotated
positional and keyword parameters.
"""
# Function to be type checked.
@beartype
def slaanesh(daemonette: str, keeper_of_secrets: str) -> str:
return daemonette + keeper_of_secrets
# Call this function with both positional and keyword arguments and assert
# the expected return value.
assert slaanesh(
'Seeker of Decadence', keeper_of_secrets="N'Kari") == (
"Seeker of DecadenceN'Kari")
def test_beartype_pass_param_keyword_only(self) -> None:
"""
Test bear typing of a function call successfully passed an annotated
keyword-only parameter following an `*` or `*args` parameter.
"""
# Function to be type checked.
@beartype
def changer_of_ways(sky_shark: str, *, chaos_spawn: str) -> str:
return sky_shark + chaos_spawn
# Call this function with keyword arguments and assert the expected return
# value.
assert changer_of_ways(
'Screamers', chaos_spawn="Mith'an'driarkh") == (
"ScreamersMith'an'driarkh")
def test_beartype_pass_param_tuple(self) -> None:
"""
Test bear typing of a function call successfully passed a parameter
annotated as a tuple.
"""
# Function to be type checked.
@beartype
def genestealer(tyranid: str, hive_fleet: (str, int)) -> str:
return tyranid + str(hive_fleet)
# Call this function with each of the two types listed in the above tuple.
assert genestealer(
'Norn-Queen', hive_fleet='Behemoth') == 'Norn-QueenBehemoth'
assert genestealer(
'Carnifex', hive_fleet=0xDEADBEEF) == 'Carnifex3735928559'
def test_type_check_pass_param_custom(self) -> None:
"""
Test bear typing of a function call successfully passed a parameter
annotated as a user-defined rather than builtin type.
"""
# User-defined type.
class CustomTestStr(str):
pass
# Function to be type checked.
@beartype
def hrud(gugann: str, delphic_plague: CustomTestStr) -> str:
return gugann + delphic_plague
# Call this function with each of the two types listed in the above tuple.
assert hrud(
'Troglydium hruddi', delphic_plague=CustomTestStr('Delphic Sink')) == (
'Troglydium hruddiDelphic Sink')
def test_type_check_pass_typing_module(self) -> None:
"""
Test bear typing of a function call successfully passed a parameter
annotated with an abstract type from the typing module.
"""
MyMap = typing.Mapping
@beartype
def function(par: MyMap, ameter: MyMap) -> MyMap:
result = par.copy()
result.update(ameter)
return result
assert function({1:1}, {2:2}) == {1:1, 2:2}
def test_type_check_pass_parameterized_typing_module(self) -> None:
"""
Test bear typing of a function call successfully passed a parameter
annotated with a parametirized abstract type from the typing module.
"""
MyMap = typing.Mapping
@beartype
def function(par: MyMap, ameter: MyMap) -> MyMap:
result = par.copy()
result.update(ameter)
return result
assert function({1:1}, {2:2}) == {1:1, 2:2}
# ....................{ TESTS ~ pass : return }....................
def test_type_check_pass_return_none(self) -> None:
"""
Test bear typing of a function call successfully returning `None` and
annotated as such.
"""
# Function to be type checked.
@beartype
def xenos(interex: str, diasporex: str) -> None:
interex + diasporex
# Call this function and assert no value to be returned.
assert xenos(
'Luna Wolves', diasporex='Iron Hands Legion') is None
# ....................{ TESTS ~ fail }....................
def test_beartype_fail_keyword_unknown(self) -> None:
"""
Test bear typing of an annotated function call passed an unrecognized
keyword parameter.
"""
# Annotated function to be type checked.
@beartype
def tau(kroot: str, vespid: str) -> str:
return kroot + vespid
# Call this function with an unrecognized keyword parameter and assert the
# expected exception.
with pytest.raises(TypeError) as exception:
tau(kroot='Greater Good', nicassar='Dhow')
# For readability, this should be a "TypeError" synopsizing the exact issue
# raised by the Python interpreter on calling the original function rather
# than a "TypeError" failing to synopsize the exact issue raised by the
# wrapper type-checking the original function. Since the function
# annotations defined above guarantee that the exception message of the
# latter will be suffixed by "not a str", ensure this is *NOT* the case.
assert not str(exception.value).endswith('not a str')
def test_beartype_fail_param_name(self) -> None:
"""
Test bear typing of a function accepting a parameter name reserved for
use by the `@beartype` decorator.
"""
# Define a function accepting a reserved parameter name and assert the
# expected exception.
@beartype
@should_raise(NameError)
def jokaero(weaponsmith: str, __beartype_func: str) -> str:
return weaponsmith + __beartype_func
# ....................{ TESTS ~ fail : type }....................
def test_beartype_fail_param_type(self) -> None:
"""
Test bear typing of an annotated function call failing a parameter type
check.
"""
# Annotated function to be type checked.
@beartype
def eldar(isha: str, asuryan: (str, int)) -> str:
return isha + asuryan
# Call this function with an invalid type and assert the expected exception.
with pytest.raises(TypeError):
eldar('Mother of the Eldar', 100.100)
def test_beartype_fail_return_type(self) -> None:
"""
Test bear typing of an annotated function call failing a return type
check.
"""
# Annotated function to be type checked.
@beartype
def necron(star_god: str, old_one: str) -> str:
return 60e6
# Call this function and assert the expected exception.
with pytest.raises(TypeError):
necron("C'tan", 'Elder Thing')
# ....................{ TESTS ~ fail : annotation }....................
def test_beartype_fail_annotation_param(self) -> None:
"""
Test bear typing of a function with an unsupported parameter annotation.
"""
# Assert the expected exception from attempting to type check a function
# with a parameter annotation that is *NOT* a type.
with pytest.raises(TypeError):
@beartype
def nurgle(nurgling: str, great_unclean_one: 'Bringer of Poxes') -> str:
return nurgling + great_unclean_one
def test_beartype_fail_annotation_return(self) -> None:
"""
Test bear typing of a function with an unsupported return annotation.
"""
# Assert the expected exception from attempting to type check a function
# with a return annotation that is *NOT* a type.
with pytest.raises(TypeError):
@beartype
def tzeentch(disc: str, lord_of_change: str) -> 'Player of Games':
return disc + lord_of_change
```
#### File: processing/validators/test_taskValidator.py
```python
from unittest import TestCase
from testfixtures import should_raise
from rec_to_nwb.processing.exceptions.invalid_metadata_exception import InvalidMetadataException
from rec_to_nwb.processing.validation.task_validator import TaskValidator
class TestTaskValidator(TestCase):
def test_task_validator_existing_tasks_valid(self):
tasks = [{'task_name': 'task1'}, {'task_name': 'task2'}]
task_validator = TaskValidator(tasks)
result = task_validator.create_summary()
self.assertTrue(result.is_valid)
@should_raise(InvalidMetadataException)
def test_task_validator_empty_tasks_failed(self):
tasks = []
task_validator = TaskValidator(tasks)
result = task_validator.create_summary()
self.assertFalse(result.is_valid)
``` |
{
"source": "jihyunbak/spyglass",
"score": 3
} |
#### File: nwb_datajoint/common/common_backup.py
```python
import numpy as np
import datajoint as dj
schema = dj.schema('common_backup')
@schema
class SpikeSortingBackUp(dj.Manual):
definition = """
nwb_file_name: varchar(500)
sort_group_id: int
sort_interval_name: varchar(500)
filter_parameter_set_name: varchar(500)
sorter_name: varchar(500)
spikesorter_parameter_set_name: varchar(500)
---
sorting_id: varchar(500)
analysis_file_name: varchar(1000)
time_of_sort: int # in Unix time, to the nearest second
units_object_id: varchar(100)
"""
def insert_from_backup(self, backup_file):
"""backup file lives in /common/backup_keys/
Parameters
----------
backup_file : str
path to npy pickle file containing keys
"""
backup_keys = np.load(backup_file, allow_pickle=True)
self.insert(backup_keys, skip_duplicates=True)
@schema
class CuratedSpikeSortingBackUp(dj.Manual):
definition = """
nwb_file_name: varchar(500)
sort_group_id: int
sort_interval_name: varchar(500)
filter_parameter_set_name: varchar(500)
sorting_id: varchar(500)
---
analysis_file_name: varchar(1000)
units_object_id: varchar(100)
"""
def insert_from_backup(self, backup_file):
backup_keys = np.load(backup_file, allow_pickle=True)
self.insert(backup_keys, skip_duplicates=True)
```
#### File: nwb_datajoint/common/common_session.py
```python
import datajoint as dj
from .common_device import CameraDevice, DataAcquisitionDevice, Probe
from .common_lab import Institution, Lab, LabMember
from .common_nwbfile import Nwbfile
from .common_subject import Subject
from .nwb_helper_fn import get_nwb_file
schema = dj.schema('common_session')
# TODO: figure out what to do about ExperimenterList
@schema
class Session(dj.Imported):
definition = """
# Table for holding experimental sessions.
-> Nwbfile
---
-> [nullable] Subject
-> [nullable] Institution
-> [nullable] Lab
session_id = NULL: varchar(200)
session_description: varchar(2000)
session_start_time: datetime
timestamps_reference_time: datetime
experiment_description = NULL: varchar(2000)
"""
def make(self, key):
# These imports must go here to avoid cyclic dependencies
# from .common_task import Task, TaskEpoch
from .common_interval import IntervalList
# from .common_ephys import Unit
nwb_file_name = key['nwb_file_name']
nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)
nwbf = get_nwb_file(nwb_file_abspath)
# certain data are not associated with a single NWB file / session because they may apply to
# multiple sessions. these data go into dj.Manual tables
# e.g., a lab member may be associated with multiple experiments, so the lab member table should not
# be dependent on (contain a primary key for) a session
print('Institution...')
Institution().insert_from_nwbfile(nwbf)
print('Lab...')
Lab().insert_from_nwbfile(nwbf)
print('LabMember...')
LabMember().insert_from_nwbfile(nwbf)
print('Subject...')
Subject().insert_from_nwbfile(nwbf)
print('DataAcquisitionDevice...')
DataAcquisitionDevice().insert_from_nwbfile(nwbf)
print('CameraDevice...')
CameraDevice().insert_from_nwbfile(nwbf)
print('Probe...')
Probe().insert_from_nwbfile(nwbf)
if nwbf.subject is not None:
subject_id = nwbf.subject.subject_id
else:
subject_id = None
Session().insert1({
'nwb_file_name': nwb_file_name,
'subject_id': subject_id,
'institution_name': nwbf.institution,
'lab_name': nwbf.lab,
'session_id': nwbf.session_id,
'session_description': nwbf.session_description,
'session_start_time': nwbf.session_start_time,
'timestamps_reference_time': nwbf.timestamps_reference_time,
'experiment_description': nwbf.experiment_description
}, skip_duplicates=True)
print('Skipping Apparatus for now...')
# Apparatus().insert_from_nwbfile(nwbf)
# interval lists depend on Session (as a primary key) but users may want to add these manually so this is
# a manual table that is also populated from NWB files
print('IntervalList...')
IntervalList().insert_from_nwbfile(nwbf, nwb_file_name=nwb_file_name)
# print('Unit...')
# Unit().insert_from_nwbfile(nwbf, nwb_file_name=nwb_file_name)
@schema
class ExperimenterList(dj.Imported):
definition = """
-> Session
"""
class Experimenter(dj.Part):
definition = """
-> ExperimenterList
-> LabMember
"""
def make(self, key):
nwb_file_name = key['nwb_file_name']
nwb_file_abspath = Nwbfile().get_abs_path(nwb_file_name)
self.insert1({'nwb_file_name': nwb_file_name}, skip_duplicates=True) # TODO is this necessary??
nwbf = get_nwb_file(nwb_file_abspath)
if nwbf.experimenter is None:
return
for name in nwbf.experimenter:
LabMember().insert_from_name(name)
key = dict()
key['nwb_file_name'] = nwb_file_name
key['lab_member_name'] = name
self.Experimenter().insert1(key)
@schema
class SessionGroup(dj.Manual):
definition = """
session_group_name: varchar(200)
---
session_group_description: varchar(2000)
"""
@staticmethod
def add_group(session_group_name: str, session_group_description):
SessionGroup.insert1({
'session_group_name': session_group_name,
'session_group_description': session_group_description
})
@staticmethod
def update_session_group_description(session_group_name: str, session_group_description):
SessionGroup.update1({
'session_group_name': session_group_name,
'session_group_description': session_group_description
})
@staticmethod
def add_session_to_group(nwb_file_name: str, session_group_name: str):
SessionGroupSession.insert1({
'session_group_name': session_group_name,
'nwb_file_name': nwb_file_name
})
@staticmethod
def remove_session_from_group(nwb_file_name: str, session_group_name: str):
query = {'session_group_name': session_group_name, 'nwb_file_name': nwb_file_name}
(SessionGroupSession & query).delete()
@staticmethod
def delete_group(session_group_name: str):
query = {'session_group_name': session_group_name}
(SessionGroup & query).delete()
@staticmethod
def get_group_sessions(session_group_name: str):
results = (SessionGroupSession & {'session_group_name': session_group_name}).fetch(as_dict=True)
return [
{'nwb_file_name': result['nwb_file_name']}
for result in results
]
# The reason this is not implemented as a dj.Part is that
# datajoint prohibits deleting from a subtable without
# also deleting the parent table.
# See: https://docs.datajoint.org/python/computation/03-master-part.html
@schema
class SessionGroupSession(dj.Manual):
definition = """
-> SessionGroup
-> Session
"""
```
#### File: nwb_datajoint/decoding/get_unit_waveforms.py
```python
import spikeextractors as se
import numpy as np
def _extract_snippet_from_traces(
traces,
start_frame,
end_frame,
channel_indices
):
if (0 <= start_frame) and (end_frame <= traces.shape[1]):
x = traces[:, start_frame:end_frame]
else:
# handle edge cases
x = np.zeros((traces.shape[0], end_frame - start_frame), dtype=traces.dtype)
i1 = int(max(0, start_frame))
i2 = int(min(traces.shape[1], end_frame))
x[:, (i1 - start_frame):(i2 - start_frame)] = traces[:, i1:i2]
if channel_indices is not None:
x = x[channel_indices, :]
return x
def _get_unit_waveforms_for_chunk(
recording,
sorting,
frame_offset,
unit_ids,
snippet_len,
channel_ids_by_unit
):
# chunks are chosen small enough so that all traces can be loaded into memory
print('Retrieving traces for chunk')
traces = recording.get_traces()
print('Collecting waveforms for chunk')
unit_waveforms = []
for unit_id in unit_ids:
times0 = sorting.get_unit_spike_train(unit_id=unit_id)
if channel_ids_by_unit is not None:
channel_ids = channel_ids_by_unit[unit_id]
all_channel_ids = recording.get_channel_ids()
channel_indices = [
np.array(all_channel_ids).tolist().index(ch_id)
for ch_id in channel_ids
]
len_channel_indices = len(channel_indices)
else:
channel_indices = None
len_channel_indices = traces.shape[0]
# num_channels x len_of_one_snippet
snippets = [
_extract_snippet_from_traces(
traces,
start_frame=frame_offset + int(t) - snippet_len[0],
end_frame=frame_offset + int(t) + snippet_len[1],
channel_indices=channel_indices
)
for t in times0
]
if len(snippets) > 0:
unit_waveforms.append(
# len(times0) x num_channels_in_nbhd[unit_id] x len_of_one_snippet
np.stack(snippets)
)
else:
unit_waveforms.append(
np.zeros((0, len_channel_indices, snippet_len[0] + snippet_len[1]), dtype=traces.dtype)
)
return unit_waveforms
def _divide_recording_into_time_chunks(num_frames, chunk_size, padding_size):
chunks = []
ii = 0
while ii < num_frames:
ii2 = int(min(ii + chunk_size, num_frames))
chunks.append(dict(
istart=ii,
iend=ii2,
istart_with_padding=int(max(0, ii - padding_size)),
iend_with_padding=int(min(num_frames, ii2 + padding_size))
))
ii = ii2
return chunks
def get_unit_waveforms(
recording,
sorting,
unit_ids,
channel_ids_by_unit,
snippet_len
):
if not isinstance(snippet_len, list) and not isinstance(snippet_len, tuple):
b = int(snippet_len / 2)
a = int(snippet_len) - b
snippet_len = [a, b]
num_channels = recording.get_num_channels()
num_frames = recording.get_num_frames()
num_bytes_per_chunk = 1000 * 1000 * 1000 # ? how to choose this
num_bytes_per_frame = num_channels * 2
chunk_size = num_bytes_per_chunk / num_bytes_per_frame
padding_size = 100 + snippet_len[0] + snippet_len[1] # a bit excess padding
chunks = _divide_recording_into_time_chunks(
num_frames=num_frames,
chunk_size=chunk_size,
padding_size=padding_size
)
all_unit_waveforms = [[] for ii in range(len(unit_ids))]
for ii, chunk in enumerate(chunks):
# chunk: {istart, iend, istart_with_padding, iend_with_padding} # include padding
print(f'Processing chunk {ii + 1} of {len(chunks)}; chunk-range: {chunk["istart_with_padding"]} {chunk["iend_with_padding"]}; num-frames: {num_frames}')
recording_chunk = se.SubRecordingExtractor(
parent_recording=recording,
start_frame=chunk['istart_with_padding'],
end_frame=chunk['iend_with_padding']
)
# note that the efficiency of this operation may need improvement (really depends on sorting extractor implementation)
sorting_chunk = se.SubSortingExtractor(
parent_sorting=sorting,
start_frame=chunk['istart'],
end_frame=chunk['iend']
)
print(f'Getting unit waveforms for chunk {ii + 1} of {len(chunks)}')
# num_events_in_chunk x num_channels_in_nbhd[unit_id] x len_of_one_snippet
unit_waveforms = _get_unit_waveforms_for_chunk(
recording=recording_chunk,
sorting=sorting_chunk,
frame_offset=chunk['istart'] - chunk['istart_with_padding'], # just the padding size (except 0 for first chunk)
unit_ids=unit_ids,
snippet_len=snippet_len,
channel_ids_by_unit=channel_ids_by_unit
)
for i_unit, x in enumerate(unit_waveforms):
all_unit_waveforms[i_unit].append(x)
# concatenate the results over the chunks
unit_waveforms = [
# tot_num_events_for_unit x num_channels_in_nbhd[unit_id] x len_of_one_snippet
np.concatenate(all_unit_waveforms[i_unit], axis=0)
for i_unit in range(len(unit_ids))
]
return unit_waveforms
```
#### File: nwb_datajoint/figurl_views/SpikeSortingRecordingView.py
```python
from typing import List, Union
import datajoint as dj
import sortingview as sv
from sortingview.SpikeSortingView import create_raw_traces_plot
from sortingview.SpikeSortingView.Figure import Figure
import numpy as np
import kachery_client as kc
import os
import spikeinterface as si
from ..spikesorting.spikesorting_recording import SpikeSortingRecording
schema = dj.schema('figurl_view_spike_sorting_recording')
@schema
class SpikeSortingRecordingView(dj.Computed):
definition = """
# Schema for storing figurl views of spike sorting recordings
-> SpikeSortingRecording
---
figurl: varchar(10000)
"""
def make(self, key):
# Get the SpikeSortingRecording row
rec = (SpikeSortingRecording & key).fetch1()
nwb_file_name = rec['nwb_file_name']
sort_group_id = rec['sort_group_id']
sort_interval_name = rec['sort_interval_name']
recording_path = rec['recording_path']
# Load the SI recording extractor
print('Loading recording')
recording: si.BaseRecording = si.load_extractor(recording_path)
# Raw traces (sample)
# Extract the first 1 second of traces
print('Extracting traces')
traces: np.array = recording.get_traces(
start_frame=0,
end_frame=int(recording.get_sampling_frequency() * 1)
).astype(np.float32)
f1 = create_raw_traces_plot(
traces=traces,
start_time_sec=0,
sampling_frequency=recording.get_sampling_frequency(),
label='Raw traces (sample)'
)
# Electrode geometry
print('Electrode geometry')
f2 = create_electrode_geometry(recording)
label = f'{nwb_file_name}:{sort_group_id}:{sort_interval_name}'
print(label)
# Mountain layout
F = create_mountain_layout(
figures=[f1, f2],
label=label
)
# Insert row into table
key2 = dict(key, **{
'figurl': F.url()
})
self.insert1(key2)
def create_electrode_geometry(recording: si.BaseRecording):
channel_locations = {
str(channel_id): location.astype(np.float32)
for location, channel_id in zip(
recording.get_channel_locations(),
recording.get_channel_ids()
)
}
data = {
'type': 'ElectrodeGeometry',
'channelLocations': channel_locations
}
return Figure(data=data, label='Electrode geometry')
def create_mountain_layout(figures: List[Figure], label: Union[str, None]=None, sorting_curation_uri: Union[str, None]=None) -> Figure:
if label is None:
label = 'SpikeSortingView'
data = {
'type': 'MountainLayout',
'views': [
{
'type': fig0.data['type'],
'label': fig0.label,
'figureDataSha1': _upload_data_and_return_sha1(fig0.get_serialized_figure_data())
}
for fig0 in figures
]
}
if sorting_curation_uri is not None:
data['sortingCurationUri'] = sorting_curation_uri
return Figure(data=data, label=label)
def _upload_data_and_return_sha1(data):
data_uri = kc.store_json(data)
data_hash = data_uri.split('/')[2]
kc.upload_file(data_uri, channel=os.environ['FIGURL_CHANNEL'], single_chunk=True)
return data_hash
```
#### File: nwb_datajoint/spikesorting/sortingview.py
```python
import json
from pathlib import Path
import datajoint as dj
import numpy as np
import sortingview as sv
import spikeinterface as si
from .spikesorting_recording import SpikeSortingRecording
from .spikesorting_curation import Curation
schema = dj.schema('spikesorting_sortingview')
@schema
class SortingviewWorkspaceSelection(dj.Manual):
definition = """
-> Curation
"""
@schema
class SortingviewWorkspace(dj.Computed):
definition = """
-> SortingviewWorkspaceSelection
---
workspace_uri: varchar(1000)
sortingview_recording_id: varchar(30)
sortingview_sorting_id: varchar(30)
channel = 'franklab2' : varchar(80) # the name of the kachery channel for data sharing
"""
def make(self, key: dict):
# Load recording, wrap it as old spikeextractors recording extractor,
# then save as h5 (sortingview requires this format)
# import Curation here to avoid circular import
recording = Curation.get_recording_extractor(key)
old_recording = si.create_extractor_from_new_recording(recording)
recording_path = (SpikeSortingRecording & key).fetch1('recording_path')
h5_recording = sv.LabboxEphysRecordingExtractor.store_recording_link_h5(old_recording,
str(Path(
recording_path) / 'recording.h5'),
dtype='int16')
workspace_name = SpikeSortingRecording._get_recording_name(key)
workspace = sv.create_workspace(label=workspace_name)
key['workspace_uri'] = workspace.uri
key['sortingview_recording_id'] = workspace.add_recording(recording=h5_recording,
label=workspace_name)
sorting = Curation.get_curated_sorting_extractor(key)
sorting = si.create_extractor_from_new_sorting(sorting)
h5_sorting = sv.LabboxEphysSortingExtractor.store_sorting_link_h5(
sorting, str(Path(recording_path) / 'sorting.h5'))
workspace_uri = (self & key).fetch1('workspace_uri')
workspace = sv.load_workspace(workspace_uri)
key['sortingview_sorting_id'] = workspace.add_sorting(recording_id=workspace.recording_ids[0],
sorting=h5_sorting)
# add metrics to the sorting if they exist
metrics = (Curation & key).fetch1('metrics')
self.add_metrics_to_sorting(
key, metrics, key['sortingview_sorting_id'])
self.insert1(key)
def remove_sorting_from_workspace(self, key):
return NotImplementedError
def add_metrics_to_sorting(self, key: dict, metrics: dict,
sortingview_sorting_id: str = None):
"""Adds a metrics to the specified sorting.
Parameters
----------
key : dict
metrics : dict
Quality metrics.
Key: name of quality metric
Value: another dict in which key: unit ID (must be str),
value: metric value (float)
sortingview_sorting_id : str, optional
if not specified, just uses the first sorting ID of the workspace
"""
# check that the unit ids are str
for metric_name in metrics:
unit_ids = metrics[metric_name].keys()
assert np.all([isinstance(unit_id, int)] for unit_id in unit_ids)
# the metrics must be in this form to be added to sortingview
external_metrics = [{'name': metric_name,
'label': metric_name,
'tooltip': metric_name,
'data': metric} for metric_name, metric in metrics.items()]
workspace_uri = (self & key).fetch1('workspace_uri')
workspace = sv.load_workspace(workspace_uri)
if sortingview_sorting_id is None:
print(
'sortingview sorting ID not specified, using the first sorting in the workspace...')
sortingview_sorting_id = workspace.sorting_ids[0]
workspace.set_unit_metrics_for_sorting(sorting_id=sortingview_sorting_id,
metrics=external_metrics)
def set_snippet_len(self, key: dict, snippet_len: int):
"""Sets the snippet length of a workspace specified by the key
Parameters
----------
key : dict
defines a workspace
"""
workspace_uri = (self & key).fetch1('workspace_uri')
workspace = sv.load_workspace(workspace_uri)
workspace.set_snippet_len(snippet_len)
def url(self, key, sortingview_sorting_id: str = None):
"""Generate a URL for visualizing the sorting on the browser
Parameters
----------
key : dict
defines a workspace
sortingview_sorting_id : str, optional
sortingview sorting ID to visualize; if None then chooses the first one
Returns
-------
url : str
"""
workspace_uri = (self & key).fetch1('workspace_uri')
workspace = sv.load_workspace(workspace_uri)
recording_id = workspace.recording_ids[0]
if sortingview_sorting_id is None:
sortingview_sorting_id = workspace.sorting_ids[0]
url = workspace.spikesortingview(recording_id=recording_id,
sorting_id=sortingview_sorting_id,
label=workspace.label,
include_curation=True)
return url
def insert_manual_curation(self, key: dict, description=''):
"""Based on information in key for an AutoCurationSorting, loads the curated sorting from sortingview,
saves it (with labels and the optional description, and inserts it to CuratedSorting
Assumes that the workspace corresponding to the recording and (original) sorting exists
Parameters
----------
key : dict
primary key of AutomaticCuration
description: str
optional description of curated sort
"""
workspace_uri = (SortingviewWorkspace & key).fetch1('workspace_uri')
workspace = sv.load_workspace(workspace_uri=workspace_uri)
sortingview_sorting_id = (SortingviewWorkspace & key).fetch1(
'sortingview_sorting_id')
manually_curated_sorting = workspace.get_curated_sorting_extractor(
sorting_id=sortingview_sorting_id)
# get the labels and remove the non-primary merged units
labels = workspace.get_sorting_curation(
sorting_id=sortingview_sorting_id)
# turn labels to list of str, only including accepted units.
unit_labels = labels['labelsByUnit']
unit_ids = [unit_id for unit_id in unit_labels]
# load the metrics
from .spikesorting_curation import QualityMetrics
metrics_path = (QualityMetrics & {'nwb_file_name': key['nwb_file_name'],
'recording_id': key['recording_id'],
'waveform_params_name': key['waveform_params_name'],
'metric_params_name': key['metric_params_name'],
'sorting_id': key['parent_sorting_id']}).fetch1('quality_metrics_path')
with open(metrics_path) as f:
quality_metrics = json.load(f)
# remove non-primary merged units
clusters_merged = bool(labels['mergeGroups'])
if clusters_merged:
for m in labels['mergeGroups']:
for merged_unit in m[1:]:
unit_ids.remove(merged_unit)
del labels['labelsByUnit'][merged_unit]
# if we've merged we have to recompute metrics
metrics = None
else:
metrics = quality_metrics
# insert this curation into the Table
return self.insert_curation(key, parent_curation_id=key['parent_curation_id'], labels=labels['labelsByUnit'],
merge_groups=labels['mergeGroups'], metrics=metrics, description='manually curated')
```
#### File: spyglass/tests/conftest.py
```python
import datajoint as dj
import pathlib
import os
import shutil
import sys
import tempfile
from .datajoint._config import DATAJOINT_SERVER_PORT
from .datajoint._datajoint_server import run_datajoint_server, kill_datajoint_server
thisdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(thisdir)
global __PROCESS
__PROCESS = None
def pytest_addoption(parser):
parser.addoption('--current', action='store_true', dest="current",
default=False, help="run only tests marked as current")
def pytest_configure(config):
config.addinivalue_line(
"markers", "current: for convenience -- mark one test as current"
)
markexpr_list = []
if config.option.current:
markexpr_list.append('current')
if len(markexpr_list) > 0:
markexpr = ' and '.join(markexpr_list)
setattr(config.option, 'markexpr', markexpr)
_set_env()
# note that in this configuration, every test will use the same datajoint server
# this may create conflicts and dependencies between tests
# it may be better but significantly slower to start a new server for every test
# but the server needs to be started before tests are collected because datajoint runs when the source
# files are loaded, not when the tests are run. one solution might be to restart the server after every test
global __PROCESS
__PROCESS = run_datajoint_server()
def pytest_unconfigure(config):
if __PROCESS:
print('Terminating datajoint compute resource process')
__PROCESS.terminate()
# TODO handle ResourceWarning: subprocess X is still running
# __PROCESS.join()
kill_datajoint_server()
shutil.rmtree(os.environ['NWB_DATAJOINT_BASE_DIR'])
def _set_env():
"""Set environment variables."""
print('Setting datajoint and kachery environment variables.')
nwb_datajoint_base_dir = pathlib.Path(tempfile.mkdtemp())
spike_sorting_storage_dir = nwb_datajoint_base_dir / 'spikesorting'
kachery_storage_dir = nwb_datajoint_base_dir / 'kachery-storage'
tmp_dir = nwb_datajoint_base_dir / 'tmp'
os.environ['NWB_DATAJOINT_BASE_DIR'] = str(nwb_datajoint_base_dir)
print('NWB_DATAJOINT_BASE_DIR set to', nwb_datajoint_base_dir)
os.environ['DJ_SUPPORT_FILEPATH_MANAGEMENT'] = 'TRUE'
os.environ['SPIKE_SORTING_STORAGE_DIR'] = str(spike_sorting_storage_dir)
# export KACHERY_DAEMON_HOST=...
# export KACHERY_DAEMON_PORT=...
os.environ['KACHERY_TEMP_DIR'] = str(tmp_dir)
os.environ['NWB_DATAJOINT_TEMP_DIR'] = str(tmp_dir)
os.environ['KACHERY_STORAGE_DIR'] = str(kachery_storage_dir)
# os.environ['FIGURL_CHANNEL'] = 'franklab2'
os.mkdir(spike_sorting_storage_dir)
os.mkdir(tmp_dir)
os.mkdir(kachery_storage_dir)
raw_dir = nwb_datajoint_base_dir / 'raw'
analysis_dir = nwb_datajoint_base_dir / 'analysis'
os.mkdir(raw_dir)
os.mkdir(analysis_dir)
dj.config['database.host'] = 'localhost'
dj.config['database.port'] = DATAJOINT_SERVER_PORT
dj.config['database.user'] = 'root'
dj.config['database.password'] = '<PASSWORD>'
dj.config['stores'] = {
'raw': {
'protocol': 'file',
'location': str(raw_dir),
'stage': str(raw_dir)
},
'analysis': {
'protocol': 'file',
'location': str(analysis_dir),
'stage': str(analysis_dir)
}
}
```
#### File: tests/data_import/test_insert_sessions.py
```python
import datetime
import datajoint as dj
from hdmf.backends.warnings import BrokenLinkWarning
import pathlib
import pynwb
import pytest
import shutil
import os
from nwb_datajoint.data_import.insert_sessions import copy_nwb_link_raw_ephys
@pytest.fixture()
def new_nwbfile_raw_file_name(tmp_path):
nwbfile = pynwb.NWBFile(
session_description='session_description',
identifier='identifier',
session_start_time=datetime.datetime.now(datetime.timezone.utc),
)
device = nwbfile.create_device('dev1')
group = nwbfile.create_electrode_group('tetrode1', 'tetrode description', 'tetrode location', device)
nwbfile.add_electrode(id=1, x=1.0, y=2.0, z=3.0, imp=-1.0, location='CA1', filtering='none',
group=group, group_name='tetrode1')
region = nwbfile.create_electrode_table_region(region=[0], description='electrode 1')
es = pynwb.ecephys.ElectricalSeries(
name='test_ts',
data=[1, 2, 3],
timestamps=[1., 2., 3.],
electrodes=region,
)
nwbfile.add_acquisition(es)
nwb_datajoint_base_dir = tmp_path / 'nwb-data'
os.environ['NWB_DATAJOINT_BASE_DIR'] = str(nwb_datajoint_base_dir)
os.mkdir(os.environ['NWB_DATAJOINT_BASE_DIR'])
raw_dir = nwb_datajoint_base_dir / 'raw'
os.mkdir(raw_dir)
dj.config['stores'] = {
'raw': {
'protocol': 'file',
'location': str(raw_dir),
'stage': str(raw_dir)
},
}
file_name = 'raw.nwb'
file_path = raw_dir / file_name
with pynwb.NWBHDF5IO(str(file_path), mode='w') as io:
io.write(nwbfile)
return file_name
@pytest.fixture()
def new_nwbfile_no_ephys_file_name():
return 'raw_no_ephys.nwb'
@pytest.fixture()
def moved_nwbfile_no_ephys_file_path(tmp_path, new_nwbfile_no_ephys_file_name):
return tmp_path / new_nwbfile_no_ephys_file_name
def test_copy_nwb(new_nwbfile_raw_file_name, new_nwbfile_no_ephys_file_name, moved_nwbfile_no_ephys_file_path):
copy_nwb_link_raw_ephys(new_nwbfile_raw_file_name, new_nwbfile_no_ephys_file_name)
# new file should not have ephys data
base_dir = pathlib.Path(os.getenv('NWB_DATAJOINT_BASE_DIR', None))
new_nwbfile_raw_file_name_abspath = base_dir / 'raw' / new_nwbfile_raw_file_name
out_nwb_file_abspath = base_dir / 'raw' / new_nwbfile_no_ephys_file_name
with pynwb.NWBHDF5IO(path=str(out_nwb_file_abspath), mode='r') as io:
nwbfile = io.read()
assert 'test_ts' in nwbfile.acquisition # this still exists but should be a link now
assert nwbfile.acquisition['test_ts'].data.file.filename == str(new_nwbfile_raw_file_name_abspath)
# test readability after moving the linking raw file (paths are stored as relative paths in NWB)
# so this should break the link (moving the linked-to file should also break the link)
shutil.move(out_nwb_file_abspath, moved_nwbfile_no_ephys_file_path)
with pynwb.NWBHDF5IO(path=str(moved_nwbfile_no_ephys_file_path), mode='r') as io:
with pytest.warns(BrokenLinkWarning):
nwbfile = io.read() # should raise BrokenLinkWarning
assert 'test_ts' not in nwbfile.acquisition
```
#### File: tests/datajoint/_datajoint_server.py
```python
import hither2 as hi
import kachery_client as kc
import multiprocessing
import os
from pymysql.err import OperationalError
import traceback
import time
from ._config import DATAJOINT_SERVER_PORT
DOCKER_IMAGE_NAME = 'datajoint-server-pytest'
def run_service_datajoint_server():
# The following cleanup is needed because we terminate this compute resource process
# See: https://pytest-cov.readthedocs.io/en/latest/subprocess-support.html
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
os.environ['RUNNING_PYTEST'] = 'TRUE'
with hi.ConsoleCapture(label='[datajoint-server]'):
ss = kc.ShellScript(f"""
#!/bin/bash
set -ex
docker kill {DOCKER_IMAGE_NAME} > /dev/null 2>&1 || true
docker rm {DOCKER_IMAGE_NAME} > /dev/null 2>&1 || true
exec docker run --name {DOCKER_IMAGE_NAME} -e MYSQL_ROOT_PASSWORD=<PASSWORD> -p {DATAJOINT_SERVER_PORT}:3306 datajoint/mysql
""", redirect_output_to_stdout=True) # noqa: E501
ss.start()
ss.wait()
def run_datajoint_server():
print('Starting datajoint server')
ss_pull = kc.ShellScript("""
#!/bin/bash
set -ex
exec docker pull datajoint/mysql
""")
ss_pull.start()
ss_pull.wait()
process = multiprocessing.Process(target=run_service_datajoint_server, kwargs=dict())
process.start()
try:
_wait_for_datajoint_server_to_start()
except Exception:
kill_datajoint_server()
raise
return process
# yield process
# process.terminate()
# kill_datajoint_server()
def kill_datajoint_server():
print('Terminating datajoint server')
ss2 = kc.ShellScript(f"""
#!/bin/bash
set -ex
docker kill {DOCKER_IMAGE_NAME} || true
docker rm {DOCKER_IMAGE_NAME}
""")
ss2.start()
ss2.wait()
def _wait_for_datajoint_server_to_start():
time.sleep(15) # it takes a while to start the server
timer = time.time()
print('Waiting for DataJoint server to start. Time', timer)
while True:
try:
from nwb_datajoint.common import Session # noqa: F401
return
except OperationalError as e: # e.g. Connection Error
print('DataJoint server not yet started. Time', time.time())
print(e)
except Exception:
print('Failed to import Session. Time', time.time())
print(traceback.format_exc())
current_time = time.time()
elapsed = current_time - timer
if elapsed > 300:
raise Exception('Timeout while waiting for datajoint server to start and '
'import Session to succeed. Time', current_time)
time.sleep(5)
``` |
{
"source": "jihyungSong/identity",
"score": 2
} |
#### File: identity/info/domain_info.py
```python
import functools
import json
from spaceone.api.core.v1 import handler_pb2
from spaceone.api.identity.v1 import domain_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.core import utils
from spaceone.identity.model.domain_model import Domain
__all__ = ['DomainInfo', 'DomainsInfo', 'DomainPublicKeyInfo']
def DomainInfo(domain_vo: Domain, minimal=False):
info = {
'domain_id': domain_vo.domain_id,
'name': domain_vo.name,
'state': domain_vo.state
}
if not minimal:
info.update({
'plugin_info': PluginInfo(domain_vo.plugin_info),
'config': change_struct_type(domain_vo.config),
'created_at': utils.datetime_to_iso8601(domain_vo.created_at),
'deleted_at': utils.datetime_to_iso8601(domain_vo.deleted_at),
'tags': change_struct_type(utils.tags_to_dict(domain_vo.tags))
})
return domain_pb2.DomainInfo(**info)
def DomainsInfo(domain_vos, total_count, **kwargs):
results = list(map(functools.partial(DomainInfo, **kwargs), domain_vos))
return domain_pb2.DomainsInfo(results=results, total_count=total_count)
def PluginInfo(plugin_info):
if plugin_info:
info = {
'plugin_id': plugin_info.plugin_id,
'version': plugin_info.version,
'options': change_struct_type(plugin_info.options),
'metadata': change_struct_type(plugin_info.metadata),
'secret_id': plugin_info.secret_id,
'upgrade_mode': plugin_info.upgrade_mode
}
return domain_pb2.PluginInfo(**info)
return None
def DomainPublicKeyInfo(public_key, domain_id):
info = {
'public_key': json.dumps(public_key).__str__(),
'domain_id': domain_id
}
return handler_pb2.AuthenticationResponse(**info)
```
#### File: identity/info/role_binding_info.py
```python
import functools
from spaceone.api.identity.v1 import role_binding_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.core import utils
from spaceone.identity.model.role_binding_model import RoleBinding
from spaceone.identity.info.role_info import RoleInfo
from spaceone.identity.info.project_info import ProjectInfo
from spaceone.identity.info.project_group_info import ProjectGroupInfo
__all__ = ['RoleBindingInfo', 'RoleBindingsInfo']
def RoleBindingInfo(role_binding_vo: RoleBinding, minimal=False):
info = {
'role_binding_id': role_binding_vo.role_binding_id,
'resource_type': role_binding_vo.resource_type,
'resource_id': role_binding_vo.resource_id,
'role_info': RoleInfo(role_binding_vo.role, minimal=True) if role_binding_vo.role else None
}
if not minimal:
info.update({
'project_info': ProjectInfo(role_binding_vo.project, minimal=True) if role_binding_vo.project else None,
'project_group_info': ProjectGroupInfo(role_binding_vo.project_group, minimal=True) if role_binding_vo.project_group else None,
'labels': change_list_value_type(role_binding_vo.labels),
'tags': change_struct_type(utils.tags_to_dict(role_binding_vo.tags)),
'domain_id': role_binding_vo.domain_id,
'created_at': utils.datetime_to_iso8601(role_binding_vo.created_at)
})
if not role_binding_vo.project_id and role_binding_vo.project:
role_binding_vo.update({'project_id': role_binding_vo.project.project_id})
if not role_binding_vo.project_group_id and role_binding_vo.project_group:
role_binding_vo.update({'project_group_id': role_binding_vo.project_group.project_group_id})
return role_binding_pb2.RoleBindingInfo(**info)
def RoleBindingsInfo(role_binding_vos, total_count, **kwargs):
results = list(map(functools.partial(RoleBindingInfo, **kwargs), role_binding_vos))
return role_binding_pb2.RoleBindingsInfo(results=results, total_count=total_count)
```
#### File: identity/manager/service_account_manager.py
```python
import logging
from spaceone.core.error import *
from spaceone.core.manager import BaseManager
from spaceone.core.connector.space_connector import SpaceConnector
from spaceone.identity.model.service_account_model import ServiceAccount
_LOGGER = logging.getLogger(__name__)
class ServiceAccountManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.service_account_model: ServiceAccount = self.locator.get_model('ServiceAccount')
def create_service_account(self, params):
def _rollback(service_account_vo):
_LOGGER.info(f'[create_service_account._rollback] '
f'Create service_account : {service_account_vo.name} '
f'({service_account_vo.service_account_id})')
service_account_vo.delete()
service_account_vo: ServiceAccount = self.service_account_model.create(params)
self.transaction.add_rollback(_rollback, service_account_vo)
return service_account_vo
def update_service_account(self, params):
service_account_vo: ServiceAccount = self.get_service_account(params['service_account_id'],
params['domain_id'])
return self.update_service_account_by_vo(params, service_account_vo)
def update_service_account_by_vo(self, params, service_account_vo):
def _rollback(old_data):
_LOGGER.info(f'[update_service_account._rollback] Revert Data : '
f'{old_data["service_account_id"]}')
service_account_vo.update(old_data)
self.transaction.add_rollback(_rollback, service_account_vo.to_dict())
return service_account_vo.update(params)
def delete_service_account(self, service_account_id, domain_id):
service_account_vo: ServiceAccount = self.get_service_account(service_account_id, domain_id)
service_account_vo.delete()
def get_service_account(self, service_account_id, domain_id, only=None):
return self.service_account_model.get(service_account_id=service_account_id, domain_id=domain_id, only=only)
def list_service_accounts(self, query={}):
return self.service_account_model.query(**query)
def stat_service_accounts(self, query):
return self.service_account_model.stat(**query)
def update_secret_project(self, service_account_id, project_id, domain_id):
secret_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='secret')
response = self._list_secrets(secret_connector, service_account_id, domain_id)
secrets = response.get('results', [])
for secret_info in secrets:
secret_connector.dispatch('Secret.update', {
'secret_id': secret_info['secret_id'],
'project_id': project_id,
'domain_id': domain_id
})
def release_secret_project(self, service_account_id, domain_id):
secret_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='secret')
response = self._list_secrets(secret_connector, service_account_id, domain_id)
secrets = response.get('results', [])
for secret_info in secrets:
secret_connector.dispatch('Secret.update', {
'secret_id': secret_info['secret_id'],
'release_project': True,
'domain_id': domain_id
})
def delete_service_account_secrets(self, service_account_id, domain_id):
secret_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='secret')
response = self._list_secrets(secret_connector, service_account_id, domain_id)
for secret_info in response.get('results', []):
secret_connector.dispatch('Secret.delete', {
'secret_id': secret_info['secret_id'],
'domain_id': domain_id
})
def check_service_account_secrets(self, service_account_id, domain_id):
secret_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='secret')
response = self._list_secrets(secret_connector, service_account_id, domain_id)
total_count = response.get('total_count', 0)
if total_count > 0:
raise ERROR_EXIST_RESOURCE(parent='ServiceAccount', child='Secret')
@staticmethod
def _list_secrets(secret_connector, service_account_id, domain_id):
return secret_connector.dispatch('Secret.list', {
'service_account_id': service_account_id,
'domain_id': domain_id
})
```
#### File: identity/service/api_key_service.py
```python
from spaceone.core.service import *
from spaceone.identity.manager import APIKeyManager, UserManager
@authentication_handler(exclude=['get'])
@authorization_handler(exclude=['get'])
@mutation_handler
@event_handler
class APIKeyService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
self.api_key_mgr: APIKeyManager = self.locator.get_manager('APIKeyManager')
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['user_id', 'domain_id'])
def create(self, params):
""" Create api key
Args:
params (dict): {
'user_id': 'str',
'domain_id': 'str'
}
Returns:
api_key_vo (object)
"""
user_id = params['user_id']
domain_id = params['domain_id']
# Check user is exists.
user_mgr: UserManager = self.locator.get_manager('UserManager')
user_vo = user_mgr.get_user(user_id=user_id, domain_id=domain_id)
return self.api_key_mgr.create_api_key(user_vo, domain_id)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['api_key_id', 'domain_id'])
def enable(self, params):
""" Enable api key
Args:
params (dict): {
'api_key_id': 'str',
'domain_id': 'str'
}
Returns:
api_key_vo (object)
"""
return self.api_key_mgr.enable_api_key(params['api_key_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['api_key_id', 'domain_id'])
def disable(self, params):
""" Disable api key
Args:
params (dict): {
'api_key_id': 'str',
'domain_id': 'str'
}
Returns:
api_key_vo (object)
"""
return self.api_key_mgr.disable_api_key(params['api_key_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['api_key_id', 'domain_id'])
def delete(self, params):
""" Delete api key
Args:
params (dict): {
'api_key_id': 'str',
'domain_id': 'str'
}
Returns:
None
"""
self.api_key_mgr.delete_api_key(params['api_key_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['api_key_id', 'domain_id'])
def get(self, params):
""" Get api key
Args:
params (dict): {
'api_key_id': 'str',
'domain_id': 'str',
'only': 'list'
}
Returns:
api_key_vo (object)
"""
return self.api_key_mgr.get_api_key(params['api_key_id'], params['domain_id'], params.get('only'))
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
@append_query_filter(['api_key_id', 'state', 'user_id', 'domain_id'])
@append_keyword_filter(['api_key_id', 'user_id'])
def list(self, params):
""" List api keys
Args:
params (dict): {
'api_key_id': 'str',
'state': 'str',
'user_id': 'str',
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.Query)'
}
Returns:
results (list): 'list of api_key_vo'
total_count (int)
"""
return self.api_key_mgr.list_api_keys(params.get('query', {}))
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['query', 'domain_id'])
@append_query_filter(['domain_id'])
@append_keyword_filter(['api_key_id', 'user_id'])
def stat(self, params):
"""
Args:
params (dict): {
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list): 'list of statistics data'
total_count (int)
"""
query = params.get('query', {})
return self.api_key_mgr.stat_api_keys(query)
```
#### File: identity/service/provider_service.py
```python
from spaceone.core import cache
from spaceone.core.service import *
from spaceone.core import utils
from spaceone.identity.manager.provider_manager import ProviderManager
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
class ProviderService(BaseService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.provider_mgr: ProviderManager = self.locator.get_manager('ProviderManager')
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['provider', 'name', 'domain_id'])
def create(self, params):
"""
Args:
params (dict): {
'provider': 'str',
'name': 'str',
'template': 'dict',
'metadata': 'dict',
'capability': 'dict',
'tags': 'dict',
'domain_id': 'str'
}
Returns:
provider_vo (object)
"""
# TODO: validate a template data
# TODO: validate a capability data
if 'tags' in params:
params['tags'] = utils.dict_to_tags(params['tags'])
return self.provider_mgr.create_provider(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['provider', 'domain_id'])
def update(self, params):
"""
Args:
params (dict): {
'provider': 'str',
'name': 'str',
'template': 'dict',
'metadata': 'dict',
'capability': 'dict',
'tags': 'list',
'domain_id': 'str'
}
Returns:
provider_vo (object)
"""
# TODO: validate a template data
# TODO: validate a capability data
if 'tags' in params:
params['tags'] = utils.dict_to_tags(params['tags'])
return self.provider_mgr.update_provider(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['provider', 'domain_id'])
def delete(self, params):
"""
Args:
params (dict): {
'provider': 'str',
'domain_id': 'str'
}
Returns:
None
"""
self.provider_mgr.delete_provider(params['provider'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['provider', 'domain_id'])
def get(self, params):
"""
Args:
params (dict): {
'provider': 'str',
'only': 'list',
'domain_id': 'str'
}
Returns:
provider_vo (object)
"""
self._create_default_provider()
return self.provider_mgr.get_provider(params['provider'], params.get('only'))
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
@append_query_filter(['provider', 'name'])
@change_tag_filter('tags')
@append_keyword_filter(['provider', 'name'])
def list(self, params):
"""
Args:
params (dict): {
'query': 'dict (spaceone.api.core.v1.Query)',
'provider': 'str',
'name': 'str',
'domain_id': 'str'
}
Returns:
results (list): 'list of provider_vo'
total_count (int)
"""
self._create_default_provider()
return self.provider_mgr.list_providers(params.get('query', {}))
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['query', 'domain_id'])
@change_tag_filter('tags')
@append_keyword_filter(['provider', 'name'])
def stat(self, params):
"""
Args:
params (dict): {
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)',
'domain_id': 'str'
}
Returns:
values (list): 'list of statistics data'
total_count (int)
"""
query = params.get('query', {})
return self.provider_mgr.stat_providers(query)
@cache.cacheable(key='provider:default:init', expire=300)
def _create_default_provider(self):
provider_vos, total_count = self.provider_mgr.list_providers()
installed_providers = [provider_vo.provider for provider_vo in provider_vos]
self.provider_mgr.create_default_providers(installed_providers)
return True
``` |
{
"source": "jihyungSong/notification",
"score": 2
} |
#### File: notification/service/project_channel_service.py
```python
from jsonschema import validate
from spaceone.core import utils
from spaceone.core.service import *
from spaceone.notification.error import *
from spaceone.notification.lib.schedule import *
from spaceone.notification.lib.schema import *
from spaceone.notification.manager import IdentityManager
from spaceone.notification.manager import ProtocolManager
from spaceone.notification.manager import ProjectChannelManager
from spaceone.notification.manager import SecretManager
from spaceone.notification.model import ProjectChannel
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
class ProjectChannelService(BaseService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.project_channel_mgr: ProjectChannelManager = self.locator.get_manager('ProjectChannelManager')
self.identity_mgr: IdentityManager = self.locator.get_manager('IdentityManager')
self.protocol_mgr: ProtocolManager = self.locator.get_manager('ProtocolManager')
self.secret_mgr: SecretManager = self.locator.get_manager('SecretManager')
@transaction(append_meta={'authorization.scope': 'PROJECT'})
@check_required(['protocol_id', 'name', 'data', 'project_id', 'domain_id'])
def create(self, params):
""" Create Project Channel
Args:
params (dict): {
'protocol_id': 'str',
'name': 'str',
'data': 'dict',
'is_subscribe': 'bool',
'subscriptions': 'list',
'notification_level': 'str',
'is_scheduled': 'bool',
'schedule': 'dict',
'project_id': 'str',
'tags': 'dict',
'domain_id': 'str'
}
Returns:
project_channel_vo (object)
"""
protocol_id = params['protocol_id']
domain_id = params['domain_id']
data = params['data']
project_id = params['project_id']
is_subscribe = params.get('is_subscribe', False)
is_scheduled = params.get('is_scheduled', False)
if not is_subscribe:
params['subscriptions'] = []
if is_scheduled:
validate_schedule(params.get('schedule', {}))
else:
params['schedule'] = None
self.identity_mgr.get_resource(project_id, 'identity.Project', domain_id)
protocol_vo = self.protocol_mgr.get_protocol(protocol_id, domain_id)
if protocol_vo.state == 'DISABLED':
raise ERROR_PROTOCOL_DISABLED()
metadata = protocol_vo.plugin_info.metadata
schema = metadata.get('data', {}).get('schema')
if schema:
validate_json_schema(data, schema)
if metadata['data_type'] == 'SECRET':
new_secret_parameters = {
'name': utils.generate_id('project-ch', 4),
'secret_type': 'CREDENTIALS',
'data': data,
'project_id': project_id,
'domain_id': domain_id
}
project_channel_secret = self.secret_mgr.create_secret(new_secret_parameters)
params.update({
'secret_id': project_channel_secret['secret_id'],
'data': {}
})
# Create Project Channel
return self.project_channel_mgr.create_project_channel(params)
@transaction(append_meta={'authorization.scope': 'PROJECT'})
@check_required(['project_channel_id', 'domain_id'])
def update(self, params):
""" Update project channel
Args:
params (dict): {
'project_channel_id': 'str',
'name': 'str',
'data': 'dict',
'notification_level': 'str',
'tags': 'dict',
'domain_id': 'str'
}
Returns:
project_channel_vo (object)
"""
project_channel_id = params['project_channel_id']
domain_id = params['domain_id']
project_channel_vo: ProjectChannel = self.project_channel_mgr.get_project_channel(project_channel_id, domain_id)
if 'data' in params:
protocol_vo = self.protocol_mgr.get_protocol(project_channel_vo.protocol_id, domain_id)
metadata = protocol_vo.plugin_info.metadata
schema = metadata.get('data', {}).get('schema')
if schema:
validate_json_schema(params['data'], schema)
if project_channel_vo.secret_id:
secret_params = {
'secret_id': project_channel_vo.secret_id,
'data': params['data'],
'domain_id': domain_id
}
self.secret_mgr.update_secret_data(secret_params)
params['data'] = {}
return self.project_channel_mgr.update_project_channel_by_vo(params, project_channel_vo)
@transaction(append_meta={'authorization.scope': 'PROJECT'})
@check_required(['project_channel_id', 'domain_id'])
def set_schedule(self, params):
""" Set schedule for Project Channel
Args:
params (dict): {
'project_channel_id': 'str',
'is_scheduled': bool,
'schedule': dict,
'domain_id': 'str'
}
Returns:
project_channel_vo (object)
"""
project_channel_vo = self.project_channel_mgr.get_project_channel(params['project_channel_id'],
params['domain_id'])
is_scheduled = params.get('is_scheduled', False)
if is_scheduled:
validate_schedule(params.get('schedule', {}))
else:
params.update({
'is_scheduled': False,
'schedule': None
})
return self.project_channel_mgr.update_project_channel_by_vo(params, project_channel_vo)
@transaction(append_meta={'authorization.scope': 'PROJECT'})
@check_required(['project_channel_id', 'domain_id'])
def set_subscription(self, params):
""" Set subscriptions for Project Channel
Args:
params (dict): {
'project_channel_id': 'str',
'is_subscribe': bool,
'subscriptions': list,
'domain_id': 'str'
}
Returns:
project_channel_vo (object)
"""
if not params.get('is_subscribe', False):
params.update({
'is_subscribe': False,
'subscriptions': []
})
return self.project_channel_mgr.update_project_channel(params)
@transaction(append_meta={'authorization.scope': 'PROJECT'})
@check_required(['project_channel_id', 'domain_id'])
def delete(self, params):
""" Delete project channel
Args:
params (dict): {
'project_channel_id': 'str',
'domain_id': 'str'
}
Returns:
None
"""
project_channel_id = params['project_channel_id']
domain_id = params['domain_id']
project_channel_vo = self.project_channel_mgr.get_project_channel(project_channel_id, domain_id)
if secret_id := project_channel_vo.secret_id:
self.secret_mgr.delete_secret({'secret_id': secret_id, 'domain_id': domain_id})
self.project_channel_mgr.delete_project_channel_by_vo(project_channel_vo)
@transaction(append_meta={'authorization.scope': 'PROJECT'})
@check_required(['project_channel_id', 'domain_id'])
def enable(self, params):
""" Enable project channel
Args:
params (dict): {
'project_channel_id': 'str',
'domain_id': 'str'
}
Returns:
project_channel_vo (object)
"""
return self.project_channel_mgr.enable_project_channel(params['project_channel_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'PROJECT'})
@check_required(['project_channel_id', 'domain_id'])
def disable(self, params):
""" Disable project channel
Args:
params (dict): {
'project_channel_id': 'str',
'domain_id': 'str'
}
Returns:
project_channel_vo (object)
"""
return self.project_channel_mgr.disable_project_channel(params['project_channel_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'PROJECT'})
@check_required(['project_channel_id', 'domain_id'])
def get(self, params):
""" Get Project Channel
Args:
params (dict): {
'project_channel_id': 'str',
'only': 'list'
}
Returns:
project_channel_vo (object)
"""
return self.project_channel_mgr.get_project_channel(params['project_channel_id'],
params['domain_id'],
params.get('only'))
@transaction(append_meta={
'authorization.scope': 'PROJECT',
'mutation.append_parameter': {'user_projects': 'authorization.projects'}
})
@check_required(['domain_id'])
@append_query_filter(['project_channel_id', 'name', 'state', 'secret_id', 'is_subscribe', 'is_scheduled',
'notification_level', 'protocol_id', 'project_id', 'user_projects', 'domain_id'])
@change_tag_filter('tags')
@append_keyword_filter(['project_channel_id'])
def list(self, params):
""" List Project Channels
Args:
params (dict): {
'project_channel_id': 'str',
'name': 'str',
'state': 'str',
'secret_id': 'str',
'is_subscribe': 'bool',
'is_scheduled': 'bool',
'notification_level': 'str',
'protocol_id': 'str',
'project_id': 'str',
'query': 'dict (spaceone.api.core.v1.Query)',
'domain_id': 'str'
}
Returns:
results (list): 'list of project_channel_vo'
total_count (int)
"""
query = params.get('query', {})
return self.project_channel_mgr.list_project_channels(query)
@transaction(append_meta={
'authorization.scope': 'PROJECT',
'mutation.append_parameter': {'user_projects': 'authorization.projects'}
})
@check_required(['query', 'domain_id'])
@append_query_filter(['domain_id', 'user_projects'])
@change_tag_filter('tags')
@append_keyword_filter(['project_channel_id', 'name'])
def stat(self, params):
"""
Args:
params (dict): {
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list): 'list of statistics data'
total_count (int)
"""
query = params.get('query', {})
return self.project_channel_mgr.stat_project_channels(query)
```
#### File: notification/service/protocol_service.py
```python
import logging
from spaceone.core import cache
from spaceone.core import utils
from spaceone.core import config
from spaceone.core.service import *
from spaceone.notification.error import *
from spaceone.notification.manager import RepositoryManager
from spaceone.notification.manager import ProtocolManager
from spaceone.notification.manager import PluginManager
from spaceone.notification.manager import ProjectChannelManager
from spaceone.notification.manager import UserChannelManager
from spaceone.notification.manager import SecretManager
from spaceone.notification.model import Protocol
from spaceone.notification.conf.protocol_conf import *
_LOGGER = logging.getLogger(__name__)
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
class ProtocolService(BaseService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.protocol_mgr: ProtocolManager = self.locator.get_manager('ProtocolManager')
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['name', 'plugin_info', 'domain_id'])
def create(self, params):
""" Create Protocol
Args:
params (dict): {
'name': 'str',
'plugin_info': 'dict',
'tags': 'dict',
'domain_id': 'str',
'upgrade_mode': 'str'
}
Returns:
protocol_vo (object)
"""
domain_id = params['domain_id']
plugin_info = params['plugin_info']
self._check_plugin_info(plugin_info)
_plugin = self._get_plugin(plugin_info, domain_id)
plugin_capability = _plugin.get('capability', {})
if 'supported_schema' in plugin_capability:
params['capability'] = {'supported_schema': plugin_capability['supported_schema']}
else:
raise ERROR_WRONG_PLUGIN_SETTINGS(key='capability.supported_schema')
_LOGGER.debug(f'[create] capability: {params["capability"]}')
_LOGGER.debug(f'[create] name: {params["name"]}')
plugin_metadata, endpoint_info = self._init_plugin(plugin_info, domain_id)
request_plugin = {
'plugin_id': plugin_info['plugin_id'],
'options': plugin_info.get('options', {}),
'metadata': plugin_metadata
}
if version := endpoint_info.get('updated_version', plugin_info.get('version')):
request_plugin.update({
'version': version
})
if 'secret_data' in plugin_info:
secret_mgr: SecretManager = self.locator.get_manager('SecretManager')
secret_params = {
'name': utils.generate_id('secret-noti-proto', 4),
'secret_type': 'CREDENTIALS',
'data': plugin_info['secret_data'],
'schema': plugin_info['schema'],
'domain_id': domain_id
}
protocol_secret = secret_mgr.create_secret(secret_params)
request_plugin.update({
'secret_id': protocol_secret['secret_id'],
'schema': plugin_info['schema']
})
params['plugin_info'] = request_plugin
protocol_vo: Protocol = self.protocol_mgr.create_protocol(params)
return protocol_vo
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['protocol_id', 'domain_id'])
def update(self, params):
""" Update protocol
Args:
params (dict): {
'protocol_id': 'str',
'name': 'str',
'tags': 'dict',
'domain_id': 'str'
}
Returns:
protocol_vo (object)
"""
domain_id = params['domain_id']
protocol_id = params['protocol_id']
protocol_vo = self.protocol_mgr.get_protocol(protocol_id, domain_id)
if protocol_vo.protocol_type == 'INTERNAL':
raise ERROR_NOT_ALLOWED_UPDATE_PROTOCOL_TYPE(protocol_id=protocol_id)
return self.protocol_mgr.update_protocol_by_vo(params, protocol_vo)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['protocol_id', 'domain_id'])
def update_plugin(self, params):
""" Update protocol plugin
Args:
params (dict): {
'protocol_id': 'str',
'version': 'str',
'options': 'dict',
'domain_id': 'str'
}
Returns:
protocol_vo (object)
"""
protocol_id = params['protocol_id']
domain_id = params['domain_id']
protocol_vo = self.protocol_mgr.get_protocol(protocol_id, domain_id)
plugin_info = protocol_vo.plugin_info.to_dict()
if plugin_info['upgrade_mode'] == 'AUTO':
plugin_metadata, endpoint_info = self._init_plugin(plugin_info, domain_id)
plugin_info['metadata'] = plugin_metadata
if version := endpoint_info.get('updated_version'):
plugin_info['version'] = version
else:
if version := params.get('version'):
# Update plugin_version
plugin_id = plugin_info['plugin_id']
repo_mgr = self.locator.get_manager('RepositoryManager')
repo_mgr.check_plugin_version(plugin_id, version, domain_id)
plugin_info['version'] = version
plugin_info['metadata'] = self._init_plugin(plugin_info, domain_id)
if options := params.get('options', {}):
# Overwrite
plugin_info['options'] = options
params = {
'protocol_id': protocol_id,
'domain_id': domain_id,
'plugin_info': plugin_info
}
_LOGGER.debug(f'[update_plugin] {plugin_info}')
return self.protocol_mgr.update_protocol_by_vo(params, protocol_vo)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['protocol_id', 'domain_id'])
def delete(self, params):
""" Delete protocol
Args:
params (dict): {
'protocol_id': 'str',
'domain_id': 'str'
}
Returns:
None
"""
protocol_id = params['protocol_id']
domain_id = params['domain_id']
protocol_vo: Protocol = self.protocol_mgr.get_protocol(protocol_id, domain_id)
self.check_existed_channel_using_protocol(protocol_vo)
if secret_id := protocol_vo.plugin_info.secret_id:
secret_mgr: SecretManager = self.locator.get_manager('SecretManager')
secret_mgr.delete_secret({'secret_id': secret_id, 'domain_id': domain_id})
return self.protocol_mgr.delete_protocol_by_vo(protocol_vo)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['protocol_id', 'domain_id'])
def enable(self, params):
""" Enable protocol
Args:
params (dict): {
'protocol_id': 'str',
'domain_id': 'str'
}
Returns:
protocol_vo (object)
"""
return self.protocol_mgr.enable_protocol(params['protocol_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['protocol_id', 'domain_id'])
def disable(self, params):
""" Disable protocol
Args:
params (dict): {
'protocol_id': 'str',
'domain_id': 'str'
}
Returns:
protocol_vo (object)
"""
return self.protocol_mgr.disable_protocol(params['protocol_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['protocol_id', 'domain_id'])
def get(self, params):
""" Disable domain
Args:
params (dict): {
'domain_id': 'str',
'only': 'list'
}
Returns:
domain_vo (object)
"""
protocol_id = params['protocol_id']
domain_id = params['domain_id']
# Create Default Protocol if protocol is not exited
self._create_default_protocol(domain_id)
self._initialize_protocols(domain_id)
return self.protocol_mgr.get_protocol(protocol_id, domain_id, params.get('only'))
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
@append_query_filter(['protocol_id', 'name', 'state', 'protocol_type', 'domain_id'])
@change_tag_filter('tags')
@append_keyword_filter(['protocol_id'])
def list(self, params):
""" List protocol
Args:
params (dict): {
'protocol_id': 'str',
'name': 'str',
'state': 'str',
'protocol_type',
'query': 'dict (spaceone.api.core.v1.Query)',
'domain_id': 'str'
}
Returns:
results (list): 'list of protocol_vo'
total_count (int)
"""
domain_id = params['domain_id']
query = params.get('query', {})
# Create Default Protocol if protocol is not exited
self._create_default_protocol(domain_id)
self._initialize_protocols(domain_id)
return self.protocol_mgr.list_protocols(query)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['query', 'domain_id'])
@append_query_filter(['domain_id'])
@change_tag_filter('tags')
@append_keyword_filter(['protocol_id', 'name'])
def stat(self, params):
"""
Args:
params (dict): {
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list): 'list of statistics data'
total_count (int)
"""
query = params.get('query', {})
return self.protocol_mgr.stat_protocols(query)
def _get_plugin(self, plugin_info, domain_id):
plugin_id = plugin_info['plugin_id']
repo_mgr: RepositoryManager = self.locator.get_manager('RepositoryManager')
plugin_info = repo_mgr.get_plugin(plugin_id, domain_id)
if version := plugin_info.get('version'):
repo_mgr.check_plugin_version(plugin_id, version, domain_id)
return plugin_info
def _init_plugin(self, plugin_info, domain_id):
options = plugin_info.get('options', {})
plugin_mgr: PluginManager = self.locator.get_manager('PluginManager')
endpoint_info = plugin_mgr.initialize(plugin_info, domain_id)
metadata = plugin_mgr.init_plugin(options)
return metadata, endpoint_info
def check_existed_channel_using_protocol(self, protocol_vo):
project_channel_mgr: ProjectChannelManager = self.locator.get_manager('ProjectChannelManager')
user_channel_mgr: UserChannelManager = self.locator.get_manager('UserChannelManager')
query = {'filter': [{'k': 'protocol_id', 'v': protocol_vo.protocol_id, 'o': 'eq'}]}
project_channel_vos, prj_ch_total_count = project_channel_mgr.list_project_channels(query)
user_channel_vos, user_ch_total_count = user_channel_mgr.list_user_channels(query)
if prj_ch_total_count > 0 or user_ch_total_count > 0:
raise EROR_DELETE_PROJECT_EXITED_CHANNEL(protocol_id=protocol_vo.protocol_id)
@staticmethod
def _check_plugin_info(plugin_info_params):
if 'plugin_id' not in plugin_info_params:
raise ERROR_REQUIRED_PARAMETER(key='plugin_info.plugin_id')
if 'secret_data' in plugin_info_params and 'schema' not in plugin_info_params:
raise ERROR_REQUIRED_PARAMETER(key='plugin_info.schema')
if 'upgrade_mode' in plugin_info_params and plugin_info_params['upgrade_mode'] == 'MANUAL':
if 'version' not in plugin_info_params:
raise ERROR_REQUIRED_PARAMETER(key='plugin_info.version')
@cache.cacheable(key='default-protocol:{domain_id}', expire=300)
def _create_default_protocol(self, domain_id):
_LOGGER.debug(f'[_create_default_protocol] domain_id: {domain_id}')
query = {'filter': [{'k': 'domain_id', 'v': domain_id, 'o': 'eq'}]}
protocol_vos, total_count = self.protocol_mgr.list_protocols(query)
installed_protocol_names = [protocol_vo.name for protocol_vo in protocol_vos]
_LOGGER.debug(f'[_create_default_protocol] Installed Plugins : {installed_protocol_names}')
for default_protocol in DEFAULT_INTERNAL_PROTOCOLS:
if default_protocol['name'] not in installed_protocol_names:
_LOGGER.debug(f'Create default protocol: {default_protocol["name"]}')
default_protocol['domain_id'] = domain_id
self.protocol_mgr.create_protocol(default_protocol)
return True
@cache.cacheable(key='init-protocol:{domain_id}', expire=300)
def _initialize_protocols(self, domain_id):
_LOGGER.debug(f'[_initialize_protocol] domain_id: {domain_id}')
query = {'filter': [{'k': 'domain_id', 'v': domain_id, 'o': 'eq'}]}
protocol_vos, total_count = self.protocol_mgr.list_protocols(query)
installed_protocol_ids = [protocol_vo.plugin_info.plugin_id for protocol_vo in protocol_vos]
_LOGGER.debug(f'[_initialize_protocol] Installed Plugins : {installed_protocol_ids}')
global_conf = config.get_global()
for _protocol in global_conf.get('INSTALLED_PROTOCOL_PLUGINS', []):
if _protocol['plugin_info']['plugin_id'] not in installed_protocol_ids:
try:
_LOGGER.debug(f'[_initialize_protocol] Create init protocol: {_protocol["plugin_info"]["plugin_id"]}')
_protocol['domain_id'] = domain_id
self.create(_protocol)
except Exception as e:
_LOGGER.error(f'[_initialize_protocol] {e}')
return True
``` |
{
"source": "jihyungSong/plugin-alibaba-cloud-ecs",
"score": 2
} |
#### File: manager/ecs/disk_manager.py
```python
from spaceone.core.manager import BaseManager
from spaceone.inventory.model.disk import Disk
class DiskManager(BaseManager):
def __init__(self, params, ecs_connector=None):
self.params = params
self.ecs_connector = ecs_connector
def get_disk_info(self, instance_id, disks):
"""
disk_data = {
"device_index": 0,
"device": "",
"disk_type": "all" || "system" || "data",
"size": 100,
"tags": {
"disk_id": "",
"disk_name": "",
"encrypted": true | false,
"iops_read": 0,
"iops_write": 0,
"performance_level": "",
"disk_charge_type": "PrePaid" || "PostPaid"
}
}
"""
if disks is None:
return None
disks_data_list = []
index = 0
matched_disks = self.get_matched_disks(instance_id, disks)
for matched_disk in matched_disks:
disk_data = {
"device": matched_disk.get("Device"),
"device_index": index,
"device_type": matched_disk.get("Type"),
"size": matched_disk.get("Size"),
"tags": {
"disk_id": matched_disk.get("DiskId"),
"disk_name": matched_disk.get("DiskName"),
"encrypted": matched_disk.get("Encrypted"),
"performance_level": matched_disk.get("PerformanceLevel"),
"disk_charge_type": matched_disk.get("DiskChargeType"),
"serial_number": matched_disk.get("SerialNumber", None),
},
}
if "Iops" in matched_disk:
disk_data["tags"].update(
{
"iops_read": matched_disk.get("IOPSRead"),
"iops_write": matched_disk.get("IOPSWrite"),
}
)
disks_data_list.append(Disk(disk_data, strict=False))
index += 1
return disks_data_list
@staticmethod
def get_matched_disks(instance_id, disks):
matched_disks = []
for disk in disks:
if instance_id == disk.get("InstanceId"):
matched_disks.append(disk)
return matched_disks
@staticmethod
def get_volumes_from_ids(volume_ids, volumes):
return [volume for volume in volumes if volume["VolumeId"] in volume_ids]
@staticmethod
def get_device(volume):
attachments = volume.get("Attachments", [])
for attachment in attachments:
return attachment.get("Device")
return ""
```
#### File: manager/ecs/scaling_group_manager.py
```python
from spaceone.core.manager import BaseManager
from spaceone.inventory.model.scaling_group import ScalingGroup
class ScalingGroupManager(BaseManager):
def __init__(self, params, ecs_connector=None):
self.params = params
self.ecs_connector = ecs_connector
def get_scaling_info(self, instance_id, scaling_groups, scaling_instances):
"""
data.scaling_group = {
'id': '',
'name': '',
'active_scaling_configuration_id': '',
'launch_template': {
'id': '',
'version': ''
}
}
"""
if scaling_instances is None or scaling_groups is None:
return None
matched_scaling_group = self.get_matched_scaling_group(
instance_id, scaling_groups, scaling_instances
)
if matched_scaling_group:
scaling_group_data = {
"id": matched_scaling_group.get("ScalingGroupId", ""),
"name": matched_scaling_group.get("ScalingGroupName", ""),
}
if "LaunchTemplateId" in matched_scaling_group:
scaling_group_data.update(
{
"launch_template": {
"id": matched_scaling_group.get("LaunchTemplateId", ""),
"version": matched_scaling_group.get(
"LaunchTemplateVersion", ""
),
}
}
)
if "ActiveScalingConfigurationId" in matched_scaling_group:
scaling_group_data.update(
{
"active_scaling_configuration_id": matched_scaling_group.get(
"ActiveScalingConfigurationId"
)
}
)
return ScalingGroup(scaling_group_data, strict=False)
else:
return None
@staticmethod
def get_matched_scaling_group(instance_id, scaling_groups, scaling_instances):
matched_scaling_group_id = None
for scaling_instance in scaling_instances:
if instance_id == scaling_instance.get("InstanceId"):
matched_scaling_group_id = scaling_instance.get("ScalingGroupId", "")
for scaling_group in scaling_groups:
if matched_scaling_group_id == scaling_group.get("ScalingGroupId", ""):
return scaling_group
``` |
{
"source": "jihyungSong/plugin-aws-health",
"score": 2
} |
#### File: monitoring/connector/health_connector.py
```python
import boto3
import re
import logging
import sys
from botocore.exceptions import ClientError
from multiprocessing import Pool
from datetime import datetime
from spaceone.core import utils
from spaceone.core.transaction import Transaction
from spaceone.core.error import *
from spaceone.core.connector import BaseConnector
from spaceone.monitoring.error import *
__all__ = ["HealthConnector"]
_LOGGER = logging.getLogger(__name__)
DEFAULT_REGION = 'us-east-1'
NUMBER_OF_CONCURRENT = 1
class HealthConnector(BaseConnector):
def __init__(self, transaction, config):
super().__init__(transaction, config)
def create_session(self, schema, options, secret_data):
""" Verify health Session
"""
create_session(schema, secret_data, options)
def collect_info(self, schema, query, options, secret_data, start, end, resource, sort, limit=200):
"""
Args:
query (dict): example
{
'instance_id': ['i-123', 'i-2222', ...]
'instance_type': 'm4.xlarge',
'region_name': ['aaaa']
}
resource: arn:aws:ec2:<REGION>:<ACCOUNT_ID>:instance/<instance-id>
If there is region_name in query, this indicates searching only these regions
"""
(query, resource_ids, region_name) = self._check_query(query)
post_filter_cache = False if len(region_name) > 0 else True
try:
(resource_ids, regions) = _parse_arn(resource)
print(resource_ids)
print(regions)
except Exception as e:
_LOGGER.error(f'[collect_info] fail to parse arn:{e}')
params = []
region_name_list = [] # For filter_cache
# health is global API
regions = [DEFAULT_REGION]
for region in regions:
params.append({
'schema': schema,
'region_name': region,
'query': query,
'options': options,
'resource_ids': resource_ids,
'secret_data': secret_data,
'start': start,
'end': end,
'sort': sort,
'limit': limit
})
with Pool(NUMBER_OF_CONCURRENT) as pool:
result = pool.map(discover_health, params)
no_result = True
for resources in result:
(collected_resources, region_name) = resources
if len(collected_resources) > 0:
region_name_list.append(region_name)
try:
no_result = False
response = _prepare_response_schema()
response['result'] = {'logs': collected_resources}
yield response
except Exception as e:
_LOGGER.error(f'[collect_info] skip return {resource}, {e}')
if no_result:
response = _prepare_response_schema()
response['result'] = {'logs': []}
yield response
@staticmethod
def _check_query(query):
resource_ids = []
filters = []
region_name = []
for key, value in query.items():
if key == 'instance_id' and isinstance(value, list):
resource_ids = value
elif key == 'region_name' and isinstance(value, list):
region_name.extend(value)
else:
if not isinstance(value, list):
value = [value]
if len(value) > 0:
filters.append({'Name': key, 'Values': value})
return (filters, resource_ids, region_name)
#######################
# AWS Boto3 session
#######################
def create_session(schema, secret_data: dict, options={}):
_check_secret_data(secret_data)
aws_access_key_id = secret_data['aws_access_key_id']
aws_secret_access_key = secret_data['aws_secret_access_key']
role_arn = secret_data.get('role_arn')
if schema:
return getattr(sys.modules[__name__], f'_create_session_{schema}')(aws_access_key_id,
aws_secret_access_key,
role_arn)
else:
raise ERROR_REQUIRED_CREDENTIAL_SCHEMA()
def _check_secret_data(secret_data):
if 'aws_access_key_id' not in secret_data:
raise ERROR_REQUIRED_PARAMETER(key='secret.aws_access_key_id')
if 'aws_secret_access_key' not in secret_data:
raise ERROR_REQUIRED_PARAMETER(key='secret.aws_secret_access_key')
def _create_session_aws_access_key(aws_access_key_id, aws_secret_access_key, role_arn=None):
return boto3.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
def _create_session_aws_assume_role(aws_access_key_id, aws_secret_access_key, role_arn):
session = _create_session_aws_access_key(aws_access_key_id, aws_secret_access_key)
sts = session.client('sts')
assume_role_object = sts.assume_role(RoleArn=role_arn, RoleSessionName=utils.generate_id('AssumeRoleSession'))
credentials = assume_role_object['Credentials']
return boto3.Session(aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'])
def _set_connect(schema, secret_data, region_name, service="health"):
"""
"""
session = create_session(schema, secret_data)
return session.client(service, region_name=region_name)
def discover_health(params):
"""
Args: params (dict): {
'schema': 'str,
'region_name': 'str',
'query': 'dict',
'options': 'dict',
'resource_ids': 'list'
'secret_data': 'dict',
'start': 'datetime',
'end': 'datetime',
'sort': 'dict',
'limit': 'int'
}
Returns: Resources, region_name
"""
print(f'[discover_health] {params["region_name"]}')
client = _set_connect(params['schema'], params['secret_data'], params['region_name'])
try:
resources = _lookup_events(client, params)
return resources
except ERROR_SUBSCRIPTION_REQUIRED as e:
raise ERROR_SUBSCRIPTION_REQUIRED()
except Exception as e:
_LOGGER.error(f'[discover_health] skip region: {params["region_name"]}, {e}')
return [], params['region_name']
def _lookup_events(client, params):
events = []
resource_list = []
event_query = {}
filter_query = {}
region_name = params['region_name']
options = params.get('options', {})
all_events = options.get('all_events', False)
if 'eventStatusCodes' in options:
filter_query.update({'eventStatusCodes': options['eventStatusCodes']})
else:
filter_query.update({'eventStatusCodes': ['open', 'upcoming']})
filter_query.update({'startTimes': [{'from': params['start'], 'to': params['end']}]})
# Paginator config
limit = params.get('limit')
print(f'limit: {limit}')
page_size = limit if limit < 50 else 50
event_query.update({'filter': filter_query, 'PaginationConfig': {'MaxItems': limit, 'PageSize': page_size}})
try:
print(event_query)
paginator = client.get_paginator('describe_events')
response_iterator = paginator.paginate(**event_query)
for response in response_iterator:
events.extend(response['events'])
if len(events) == 0:
# Fast return if No resources
print("No events")
return events, region_name
except ClientError as e:
print('=' * 30)
print(e.response['Error']['Code'])
print('=' * 30)
if e.response['Error']['Code'] == 'SubscriptionRequiredException':
raise ERROR_SUBSCRIPTION_REQUIRED()
else:
print(e)
except Exception as e:
print(f'[_lookup_events] Fail to lookup health events: {e}')
return resource_list, region_name
# Find Events
for event in events:
try:
result = _parse_health_event(event)
entity_count = _find_affected_entities(client, result['arn'])
if entity_count > 0:
result['count'] = entity_count
result['reference'] = _add_reference(result['arn'])
resource_list.append(result)
elif all_events is True:
result['count'] = 0
result['reference'] = _add_reference(result['arn'])
resource_list.append(result)
except Exception as e:
print(f'[_lookup_events] error {e}')
return resource_list, region_name
def _add_reference(eventArn):
reference = {
'resource_id': eventArn,
'external_link': f'https://phd.aws.amazon.com/phd/home#/event-log?eventID={eventArn}&eventTab=details&layout=vertical'
}
return reference
def _find_affected_entities(client, eventArn):
filter_query = {
'eventArns': [eventArn]
}
try:
resp = client.describe_entity_aggregates(**filter_query)
if 'entityAggregates' in resp:
return resp['entityAggregates'][0].get('count', 0)
return 0
except Exception as e:
_LOGGER.debug(f'[_find_affected_entities] failed {e}')
return 0
def _parse_health_event(health_event):
""" Parse health Event
Args: healthEvent (raw data)
Returns: dict
"""
result = {}
wanted_items = ['service', 'arn', 'eventTypeCode', 'eventTypeCategory', 'region', 'availabilityZone', 'startTime', 'endTime',
'lastUpdatedTime', 'statusCode']
for item in wanted_items:
if item in health_event:
if isinstance(health_event[item], datetime):
result[item] = health_event[item].isoformat()
else:
result[item] = health_event[item]
#print(f'parse cloud trail event: {result}')
return result
def _parse_arn(arn):
"""
ec2) arn:aws:ec2:<REGION>:<ACCOUNT_ID>:instance/<instance-id>
arn:partition:service:region:account-id:resource-id
arn:partition:service:region:account-id:resource-type/resource-id
arn:partition:service:region:account-id:resource-type:resource-id
Returns: resource_list, [regions]
"""
p = (r"(?P<arn>arn):"
r"(?P<partition>aws|aws-cn|aws-us-gov):"
r"(?P<service>[A-Za-z0-9_\-]*):"
r"(?P<region>[A-Za-z0-9_\-]*):"
r"(?P<account>[A-Za-z0-9_\-]*):"
r"(?P<resources>[A-Za-z0-9_\-:/]*)")
r = re.compile(p)
match = r.match(arn)
if match:
d = match.groupdict()
else:
return (None, None)
region = d.get('region', None)
resource_id = None
resources = d.get('resources', None)
if resources:
items = re.split('/|:', resources)
if len(items) == 1:
resource_id = items[0]
elif len(items) == 2:
resource_type = items[0]
resource_id = items[1]
else:
print(f'ERROR parsing: {resources}')
return [resource_id], [region]
def _prepare_response_schema() -> dict:
return {
'resource_type': 'monitoring.Log',
'actions': [
{
'method': 'process'
}],
'result': {}
}
if __name__ == "__main__":
import os
aki = os.environ.get('AWS_ACCESS_KEY_ID', "<YOUR_AWS_ACCESS_KEY_ID>")
sak = os.environ.get('AWS_SECRET_ACCESS_KEY', "<YOUR_AWS_SECRET_ACCESS_KEY>")
secret_data = {
# 'region_name': 'ap-northeast-2',
'aws_access_key_id': aki,
'aws_secret_access_key': sak
}
conn = HealthConnector(Transaction(), secret_data)
#opts = conn.verify({}, secret_data)
#print(opts)
options = {'eventStatusCodes': ['open', 'upcoming', 'closed'],
'all_events': True}
query = {}
#query = {'region_name': ['ap-northeast-2', 'us-east-1']}
#query = {}
from datetime import datetime, timedelta
end = datetime.utcnow()
start = end - timedelta(days=7)
ec2_arn = 'arn:aws:ec2:ap-northeast-2:072548720675:instance/i-08c5592e084b24e20'
sort = ""
limit = 50
resource_stream = conn.collect_info(schema='aws_access_key', query=query, options=options, secret_data=secret_data,
start=start, end=end, resource=ec2_arn, sort=sort, limit=limit)
import pprint
for resource in resource_stream:
pprint.pprint(resource)
```
#### File: monitoring/info/log_info.py
```python
__all__ = ['PluginLogsResponse']
from spaceone.api.core.v1 import plugin_pb2
from spaceone.api.monitoring.plugin import log_pb2
from spaceone.core.pygrpc.message_type import *
def PluginAction(action_data):
info = {
'method': action_data['method'],
}
if 'options' in action_data:
info.update({'options': change_struct_type(action_data['options'])})
return plugin_pb2.PluginAction(**info)
def LogsInfo(result):
info = {'logs': change_list_value_type(result['logs'])}
return log_pb2.LogsInfo(**info)
def PluginLogsResponse(resource_dict):
result = {
'resource_type': resource_dict['resource_type'],
'result': LogsInfo(resource_dict['result'])
}
if resource_dict.get('actions'):
result['actions'] = list(map(PluginAction, resource_dict['actions']))
return log_pb2.PluginLogsResponse(**result)
```
#### File: monitoring/service/monitoring_service.py
```python
import logging
from datetime import datetime
from datetime import timedelta
from spaceone.core.error import *
from spaceone.core.service import *
from spaceone.monitoring.error import *
_LOGGER = logging.getLogger(__name__)
DEFAULT_SCHEMA = 'aws_access_key'
FILTER_FORMAT = [
]
NUM_OF_LIMIT = 30
@authentication_handler
class MonitoringService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
@transaction
@check_required(['options', 'secret_data', 'filter', 'start', 'end'])
@change_timestamp_value(['start', 'end'], timestamp_format='iso8601')
def list_resources(self, params):
""" Get quick list of resources
Args:
params (dict) {
'schema': 'str',
'options': 'dict',
'secret_data': 'dict',
'filter': 'dict',
'resource': 'str',
'start': 'timestamp',
'end': 'timestamp',
'sort': 'dict',
'limit': 'int'
}
Returns: list of resources
"""
manager = self.locator.get_manager('MonitoringManager')
schema = params.get('schema', DEFAULT_SCHEMA)
options = params['options']
secret_data = params['secret_data']
filters = params['filter']
resource = params.get('resource', None)
start = params.get('start', datetime.utcnow() - timedelta(days=1))
end = params.get('end', datetime.utcnow())
sort = params.get('sort', None)
limit = params.get('limit', NUM_OF_LIMIT)
if options == {}:
options = {'eventStatusCodes':['open', 'upcoming', 'closed'],
'all_events': True}
if start > end:
start = end
return manager.list_resources(schema, options, secret_data, filters, resource, start, end, sort, limit)
``` |
{
"source": "jihyungSong/plugin-aws-personal-health-dashboard",
"score": 2
} |
#### File: inventory/connector/personal_health_dashboard.py
```python
import logging
from botocore.exceptions import ClientError
from spaceone.inventory.libs.connector import AWSConnector
from spaceone.inventory.error.custom import *
__all__ = ['PersonalHealthDashboardConnector']
_LOGGER = logging.getLogger(__name__)
class PersonalHealthDashboardConnector(AWSConnector):
service = 'health'
def __init__(self, **kwargs):
super().__init__(**kwargs)
def describe_events(self, **query):
query = self.generate_query(is_paginate=True, **query)
try:
paginator = self.client.get_paginator('describe_events')
response_iterator = paginator.paginate(**query)
events = []
for response in response_iterator:
events.extend(response['events'])
return events
except ClientError as e:
if e.response['Error']['Code'] == 'SubscriptionRequiredException':
raise ERROR_SUBSCRIPTION_REQUIRED()
else:
print(e)
except Exception as e:
print(f'Fail to describe events: {e}')
return []
def describe_event_details(self, event_arns):
try:
response = self.client.describe_event_details(eventArns=event_arns)
return response.get('successfulSet', [])
except ClientError as e:
if e.response['Error']['Code'] == 'SubscriptionRequiredException':
raise ERROR_SUBSCRIPTION_REQUIRED()
else:
print(e)
except Exception as e:
print(f'Fail to describe event details: {e}')
return []
def describe_entity_aggregates(self, event_arn, **query):
query = self.generate_query(is_paginate=True, **query)
try:
response = self.client.describe_entity_aggregates(eventArns=event_arn, **query)
return response.get('events', [])
except ClientError as e:
if e.response['Error']['Code'] == 'SubscriptionRequiredException':
raise ERROR_SUBSCRIPTION_REQUIRED()
else:
print(e)
except Exception as e:
print(f'Fail to describe entity aggregates: {e}')
return []
def describe_affected_entities(self, **query):
query = self.generate_query(is_paginate=True, **query)
try:
paginator = self.client.get_paginator('describe_affected_entities')
response_iterator = paginator.paginate(**query)
entities = []
for response in response_iterator:
entities.extend(response['entities'])
return entities
except ClientError as e:
if e.response['Error']['Code'] == 'SubscriptionRequiredException':
raise ERROR_SUBSCRIPTION_REQUIRED()
else:
print(e)
except Exception as e:
print(f'Fail to describe affected entities: {e}')
return []
```
#### File: model/personal_health_dashboard/data.py
```python
from schematics import Model
from schematics.types import ModelType, ListType, StringType, DateTimeType, BooleanType, IntType
class Tags(Model):
key = StringType()
value = StringType()
class AffectedResource(Model):
entity_arn = StringType(deserialize_from='entityArn')
event_arn = StringType(deserialize_from='eventArn')
entity_value = StringType()
entity_type = StringType(choices=('account', 'resource'))
entity_url = StringType(deserialize_from='entityUrl', serialize_when_none=False)
aws_account_id = StringType(deserialize_from='awsAccountId')
last_update_time = DateTimeType(deserialize_from='lastUpdatedTime')
status_code = StringType(deserialize_from='statusCode', choices=('IMPAIRED', 'UNIMPAIRED', 'UNKNOWN'))
tags = ListType(ModelType(Tags), default=[])
class Event(Model):
arn = StringType()
service = StringType(serialize_when_none=False)
status_code = StringType(deserialize_from='statusCode', choices=('open', 'closed', 'upcoming'))
event_scope_code = StringType(deserialize_from='eventScopeCode', choices=('PUBLIC', 'ACCOUNT_SPECIFIC', 'NONE'))
event_type_code = StringType(deserialize_from="eventTypeCode")
event_title = StringType(default='')
event_type_category = StringType(deserialize_from="eventTypeCategory",
choices=('issue', 'accountNotification', 'scheduledChange', 'investigation'))
availability_zone = StringType(deserialize_from='availabilityZone', serialize_when_none=False)
start_time = DateTimeType(deserialize_from="startTime")
last_update_time = DateTimeType(deserialize_from="lastUpdatedTime")
end_time = DateTimeType(deserialize_from="endTime")
affected_resources = ListType(ModelType(AffectedResource), default=[])
affected_resource_display = StringType(default='-')
affected_resources_count = IntType(default=0)
has_affected_resources = BooleanType(default=False)
description = StringType(default='')
region = StringType()
account_id = StringType(default="")
def reference(self):
return {
"resource_id": self.arn,
"external_link": f"https://phd.aws.amazon.com/phd/home#/event-log?eventID={self.arn}&eventTab=details"
}
``` |
{
"source": "jihyungSong/plugin-aws-price-info",
"score": 2
} |
#### File: inventory/manager/pricing_manager.py
```python
import json
from spaceone.inventory.libs.manager import AWSManager
from spaceone.inventory.connector.pricing import PricingConnector
from spaceone.inventory.model.pricing.cloud_service_type import CLOUD_SERVICE_TYPES
class PricingManager(AWSManager):
conn = None
def __init__(self, transaction=None, **kwargs):
super().__init__(transaction=transaction)
self.conn: PricingConnector = self.locator.get_connector('PricingConnector', **kwargs)
self.conn.set_client()
def list_service_codes(self):
services = self.conn.describe_services()
return [service.get('ServiceCode') for service in services if service.get('ServiceCode')]
def list_products(self, service_code):
for product in self.conn.get_products(service_code):
yield json.loads(product)
@staticmethod
def collect_cloud_service_types():
for cloud_service_type in CLOUD_SERVICE_TYPES:
yield cloud_service_type
``` |
{
"source": "jihyungSong/plugin-azure-cloud-services",
"score": 2
} |
#### File: inventory/connector/cosmos_db.py
```python
import logging
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.error.custom import *
__all__ = ['CosmosDBConnector']
_LOGGER = logging.getLogger(__name__)
class CosmosDBConnector(AzureConnector):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_connect(kwargs.get('secret_data'))
def list_all_cosmos_db_accounts(self):
return self.cosmosdb_client.database_accounts.list()
def list_keys(self, account_name, resource_group_name):
return self.cosmosdb_client.database_accounts.list_keys(account_name= account_name, resource_group_name=resource_group_name)
def list_sql_resources(self, account_name, resource_group_name):
return self.cosmosdb_client.sql_resources.list_sql_databases(account_name=account_name, resource_group_name=resource_group_name)
```
#### File: inventory/connector/postgresql_server.py
```python
import logging
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.error.custom import *
__all__ = ['PostgreSQLServerConnector']
_LOGGER = logging.getLogger(__name__)
class PostgreSQLServerConnector(AzureConnector):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_connect(kwargs.get('secret_data'))
def list_servers(self):
return self.postgre_sql_client.servers.list()
def list_firewall_rules_by_server(self, resource_group_name, server_name):
return self.postgre_sql_client.firewall_rules.list_by_server(resource_group_name=resource_group_name, server_name=server_name)
def list_virtual_network_rules_by_server(self, resource_group_name, server_name):
return self.postgre_sql_client.virtual_network_rules.list_by_server(resource_group_name=resource_group_name,
server_name=server_name)
def list_replicas_by_server(self, resource_group_name, server_name):
return self.postgre_sql_client.replicas.list_by_server(resource_group_name=resource_group_name, server_name=server_name)
def list_server_administrators(self, resource_group_name, server_name):
return self.postgre_sql_client.server_administrators.list(resource_group_name=resource_group_name, server_name=server_name)
```
#### File: inventory/manager/public_ip_address_manager.py
```python
from spaceone.inventory.libs.manager import AzureManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.public_ip_address import PublicIPAddressConnector
from spaceone.inventory.model.publicipaddress.cloud_service import *
from spaceone.inventory.model.publicipaddress.cloud_service_type import CLOUD_SERVICE_TYPES
from spaceone.inventory.model.publicipaddress.data import *
import json
import time
import ipaddress
import logging
_LOGGER = logging.getLogger(__name__)
class PublicIPAddressManager(AzureManager):
connector_name = 'PublicIPAddressConnector'
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
Args:
params (dict):
- 'options' : 'dict'
- 'schema' : 'str'
- 'secret_data' : 'dict'
- 'filter' : 'dict'
- 'zones' : 'list'
- 'subscription_info' : 'dict'
Response:
CloudServiceResponse (list) : dictionary of azure public ip address data resource information
ErrorResourceResponse (list) : list of error resource information
"""
_LOGGER.debug("** Public IP Address START **")
start_time = time.time()
subscription_info = params['subscription_info']
public_ip_address_conn: PublicIPAddressConnector = self.locator.get_connector(self.connector_name,**params)
public_ip_address_responses = []
error_responses = []
public_ip_addresses_list = public_ip_address_conn.list_all_public_ip_addresses()
for public_ip_address in public_ip_addresses_list:
public_ip_address_id = ''
try:
public_ip_address_dict = self.convert_nested_dictionary(self, public_ip_address)
public_ip_address_id = public_ip_address_dict['id']
# update application_gateway_dict
public_ip_address_dict.update({
'resource_group': self.get_resource_group_from_id(public_ip_address_id),
# parse resource_group from ID
'subscription_id': subscription_info['subscription_id'],
'subscription_name': subscription_info['subscription_name'],
})
if public_ip_address_dict.get('ip_configuration') is not None:
associated_to = public_ip_address_dict['ip_configuration']['id'].split('/')[8]
if associated_to:
public_ip_address_dict.update({
'associated_to': associated_to
})
public_ip_address_data = PublicIPAddress(public_ip_address_dict, strict=False)
public_ip_address_resource = PublicIPAddressResource({
'data': public_ip_address_data,
'region_code': public_ip_address_data.location,
'reference': ReferenceModel(public_ip_address_data.reference()),
'name': public_ip_address_data.name,
'account': public_ip_address_data.subscription_id,
'instance_type': public_ip_address_data.sku.name
})
# Must set_region_code method for region collection
self.set_region_code(public_ip_address_data['location'])
_LOGGER.debug(f'[PUBLIC IP ADDRESS INFO] {public_ip_address_resource.to_primitive()}')
public_ip_address_responses.append(PublicIPAddressResponse({'resource': public_ip_address_resource}))
except Exception as e:
_LOGGER.error(f'[list_instances] {public_ip_address_id} {e}', exc_info=True)
error_resource_response = self.generate_resource_error_response(e, 'Network', 'PublicIPAddress', public_ip_address_id)
error_responses.append(error_resource_response)
_LOGGER.debug(f'** Public IP Address Finished {time.time() - start_time} Seconds **')
return public_ip_address_responses, error_responses
```
#### File: model/disk/data.py
```python
from schematics import Model
from schematics.types import ModelType, ListType, StringType, FloatType, DateTimeType, IntType, BooleanType
class Sku(Model):
name = StringType(choices=('Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS'),
serialize_when_none=False)
tier = StringType(choices=('Premium', 'Standard'), serialize_when_none=False)
class ImageDiskReference(Model):
id = StringType(serialize_when_none=False)
lun = IntType(serialize_when_none=False)
class CreationData(Model):
creation_option = StringType(choices=('Attach', 'Copy', 'Empty', 'FromImage', 'Import', 'Restore', 'Upload'), serialize_when_none=False)
gallery_image_reference = ModelType(ImageDiskReference, serialize_when_none=False)
image_reference = ModelType(ImageDiskReference, serialize_when_none=False)
logical_sector_size = IntType(serialize_when_none=False)
source_resource_id = StringType(serialize_when_none=False)
source_unique_id = StringType(serialize_when_none=False)
source_uri = StringType(serialize_when_none=False)
storage_account_id = StringType(serialize_when_none=False)
upload_size_bytes = IntType(serialize_when_none=False)
class SourceVault(Model):
id = StringType(serialize_when_none=False)
class DiskEncryptionKey(Model):
source_vault = ModelType(SourceVault, serialize_when_none=False)
secret_url = StringType(serialize_when_none=False)
class KeyEncryptionKey(Model):
source_vault = ModelType(SourceVault)
key_url = StringType()
class EncryptionSettingsCollection(Model):
disk_encryption_key = ModelType(DiskEncryptionKey, serialize_when_none=False)
key_encryption_key = ModelType(KeyEncryptionKey, serialize_when_none=False)
class Encryption(Model):
disk_encryption_set_id = StringType(default='', serialize_when_none=False)
type = StringType(choices=('EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys',
'EncryptionAtRestWithPlatformKey'),
default='EncryptionAtRestWithPlatformKey', serialize_when_none=False)
class ShareInfoElement(Model):
vm_uri = StringType(serialize_when_none=False)
class Tags(Model):
key = StringType()
value = StringType()
class Disk(Model):
name = StringType()
id = StringType()
type = StringType()
resource_group = StringType()
location = StringType()
managed_by = StringType(default='')
managed_by_extended = ListType(StringType, serialize_when_none=False)
max_shares = IntType(serialize_when_none=False, default=0)
sku = ModelType(Sku)
zones = ListType(StringType(), serialize_when_none=False)
disk_size_gb = IntType()
disk_iops_read_write = IntType()
disk_iops_read_only = BooleanType(serialize_when_none=False)
disk_size_bytes = IntType()
size = IntType() # disk size for statistics
encryption_settings_collection = ModelType(EncryptionSettingsCollection, serialize_when_none=False)
encryption = ModelType(Encryption)
hyper_v_generation = StringType(serialize_when_none=False)
time_created = DateTimeType()
creation_data = ModelType(CreationData)
os_type = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Failed', 'Succeeded'), serialize_when_none=False)
share_info = ModelType(ShareInfoElement, serialize_when_none=False)
unique_id = StringType()
disk_m_bps_read_write = IntType()
subscription_id = StringType()
subscription_name = StringType()
disk_m_bps_read_only = BooleanType(serialize_when_none=False)
disk_state = StringType(choices=('ActiveSAS', 'ActiveUpload', 'Attached', 'ReadyToUpload', 'Reserved', 'Unattached'))
network_access_policy = StringType(choices=('AllowAll', 'AllowPrivate', 'DenyAll'), serialize_when_none=False)
network_access_policy_display = StringType()
tier_display = StringType(default='')
tags = ListType(ModelType(Tags), default=[])
def reference(self):
return {
"resource_id": self.id,
"external_link": f"https://portal.azure.com/#@.onmicrosoft.com/resource{self.id}/overview",
}
```
#### File: model/networksecuritygroup/data.py
```python
from schematics import Model
from schematics.types import ModelType, ListType, StringType, IntType, BooleanType, NumberType, DateTimeType, \
TimestampType, UTCDateTimeType, TimedeltaType, FloatType
class Tags(Model):
key = StringType(serialize_when_none=False)
value = StringType(serialize_when_none=False)
class SubResource(Model):
id = StringType()
class ExtendedLocation(Model):
name = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
class ApplicationSecurityGroup(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class SecurityRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
access = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
description = StringType(serialize_when_none=False)
destination_address_prefix = StringType(serialize_when_none=False)
destination_address_prefixes = ListType(StringType, serialize_when_none=False)
destination_application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
destination_port_range = StringType(serialize_when_none=False)
destination_port_ranges = ListType(StringType, serialize_when_none=False)
direction = StringType(choices=('Inbound', 'Outbound'), serialize_when_none=False)
priority = IntType(serialize_when_none=False)
protocol = StringType(choices=('*', 'Ah', 'Esp', 'Icmp', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
source_address_prefix = StringType(serialize_when_none=False)
source_address_prefixes = ListType(StringType, serialize_when_none=False)
source_application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
source_port_range = StringType(serialize_when_none=False)
source_port_ranges = ListType(StringType, serialize_when_none=False)
class TrafficAnalyticsConfigurationProperties(Model):
enabled = BooleanType(serialize_when_none=False)
traffic_analytics_interval = IntType(serialize_when_none=False)
workspace_id = StringType(serialize_when_none=False)
workspace_region = StringType(serialize_when_none=False)
workspace_resource_id = StringType(serialize_when_none=False)
class TrafficAnalyticsProperties(Model):
network_watcher_flow_analytics_configuration = ModelType(TrafficAnalyticsConfigurationProperties,
serialize_when_none=False)
class FlowLogFormatType(Model):
json = StringType(serialize_when_none=False)
class FlowLogFormatParameters(Model):
type = ModelType(FlowLogFormatType, serialize_when_none=False)
version = IntType(serialize_when_none=False)
class RetentionPolicyParameters(Model):
days = IntType(serialize_when_none=False)
enabled = BooleanType(serialize_when_none=False)
class FlowLog(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
enable = BooleanType(serialize_when_none=False)
flow_analytics_configuration = ModelType(TrafficAnalyticsProperties, serialize_when_none=False)
format = ModelType(FlowLogFormatParameters, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
retention_policy = ModelType(RetentionPolicyParameters, serialize_when_none=False)
storage_id = StringType(serialize_when_none=False)
target_resource_guid = StringType(serialize_when_none=False)
target_resource_id = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class NetworkInterfaceDnsSettings(Model):
applied_dns_servers = ListType(StringType, serialize_when_none=False)
dns_servers = ListType(StringType, serialize_when_none=False)
internal_dns_name_label = StringType(serialize_when_none=False)
internal_domain_name_suffix = StringType(serialize_when_none=False)
internal_fqdn = StringType(serialize_when_none=False)
class NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties(Model):
fqdns = ListType(StringType, serialize_when_none=False)
group_id = StringType(serialize_when_none=False)
required_member_name = StringType(serialize_when_none=False)
class PublicIPAddressSku(Model):
name = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
tier = StringType(choices=('Global', 'Regional'), serialize_when_none=False)
class IpTag(Model):
ip_tag_type = StringType(serialize_when_none=False)
tag = StringType(serialize_when_none=False)
class DdosSettings(Model):
ddos_custom_policy = ModelType(SubResource, serialize_when_none=False)
protected_ip = BooleanType(serialize_when_none=False)
protection_coverage = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
class PublicIPAddressDnsSettings(Model):
domain_name_label = StringType(serialize_when_none=False)
fqdn = StringType(serialize_when_none=False)
reverse_fqdn = StringType(serialize_when_none=False)
class IPConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = StringType(serialize_when_none=False) # Change to Public IP Address's ID
subnet = StringType(serialize_when_none=False)
class NatGatewaySku(Model):
name = StringType(choices=('Standard', None), serialize_when_none=False)
class NatGateway(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_addresses = ListType(ModelType(SubResource), serialize_when_none=False)
public_ip_prefixes = ListType(ModelType(SubResource), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
subnets = ListType(ModelType(SubResource), serialize_when_none=False)
sku = ModelType(NatGatewaySku, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
zones = ListType(StringType, serialize_when_none=False)
class PublicIPAddress(Model):
etag = StringType(serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
ddos_settings = ModelType(DdosSettings, serialize_when_none=False)
dns_settings = ModelType(PublicIPAddressDnsSettings, serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
ip_address = StringType(serialize_when_none=False)
ip_configuration = ModelType(IPConfiguration, serialize_when_none=False)
ip_tags = ListType(ModelType(IpTag), serialize_when_none=False)
# linked_public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
migration_phase = StringType(choices=('Abort', 'Commit', 'Committed', 'None', 'Prepare'), serialize_when_none=False)
nat_gateway = ModelType(NatGateway, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
public_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
sku = ModelType(PublicIPAddressSku, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
zones = ListType(StringType, serialize_when_none=False)
class NetworkInterfaceIPConfiguration(Model): # ip configuration in a network interface
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
private_link_connection_properties = ModelType(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
virtual_network_taps = ListType(ModelType(SubResource), serialize_when_none=False)
class NetworkSecurityGroup(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(default='-', serialize_when_none=False)
default_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
flow_logs = ListType(ModelType(FlowLog), serialize_when_none=False)
network_interfaces = StringType(serialize_when_none=False) # Change to Network interfaces' Id
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
subnets = ListType(StringType, serialize_when_none=False) # Change to Subnet IDs
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class PrivateLinkServiceConnectionState(Model):
actions_required = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
status = StringType(serialize_when_none=False)
class PrivateLinkServiceConnection(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
group_ids = ListType(StringType, serialize_when_none=False)
private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
private_link_service_id = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
request_message = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
class CustomDnsConfigPropertiesFormat(Model):
fqdn = StringType(serialize_when_none=False)
ip_addresses = ListType(StringType, serialize_when_none=False)
class PrivateEndpointRef(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
custom_dns_configs = ListType(ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False)
manual_private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection),
serialize_when_none=False)
network_interfaces = ListType(StringType(), serialize_when_none=False) # Change to network interfaces id
private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to subnet ID
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class AutoApproval(Model):
subscriptions = ListType(StringType, serialize_when_none=False)
class PrivateLinkServiceIpConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
class InboundNatPool(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
backend_port = IntType(serialize_when_none=False)
enable_floating_ip = BooleanType(serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
frontend_ip_configuration = ModelType(SubResource, serialize_when_none=False)
frontend_port_range_end = IntType(serialize_when_none=False)
frontend_port_range_start = IntType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class ApplicationSecurityGroupRef(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class NetworkInterfaceIPConfigurationRef(Model): # ip configuration in a network interface
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
application_security_groups = ListType(ModelType(ApplicationSecurityGroupRef), serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
private_link_connection_properties = ModelType(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = StringType(default='', serialize_when_none=False) # Change Public IP Address to id
subnet = StringType(default='', serialize_when_none=False) # Change Subnet to id
virtual_network_taps = ListType(ModelType(SubResource), serialize_when_none=False)
class InboundNatRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
backend_ip_configurations = ListType(ModelType(NetworkInterfaceIPConfigurationRef), serialize_when_none=False)
target_virtual_machine = ListType(StringType, serialize_when_none=False)
backend_port = IntType(serialize_when_none=False)
enable_floating_ip = BooleanType(serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
frontend_ip_configuration = ModelType(SubResource, serialize_when_none=False)
frontend_ip_configuration_display = StringType(serialize_when_none=False)
frontend_port = IntType(serialize_when_none=False)
port_mapping_display = StringType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class LoadBalancingRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
backend_address_pool = ModelType(SubResource, serialize_when_none=False)
backend_address_pool_display = StringType(serialize_when_none=False)
backend_port = IntType(serialize_when_none=False)
disable_outbound_s_nat = BooleanType(serialize_when_none=False)
enable_floating_ip = BooleanType(serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
frontend_ip_configuration = ModelType(SubResource, serialize_when_none=False)
frontend_ip_configuration_display = StringType(serialize_when_none=False)
frontend_port = IntType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
load_distribution = StringType(choices=('Default', 'SourceIP', 'SourceIPProtocol'), serialize_when_none=False)
load_distribution_display = StringType(serialize_when_none=False)
probe = ModelType(SubResource, serialize_when_none=False)
protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class OutboundRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
allocated_outbound_ports = IntType(serialize_when_none=False)
backend_address_pool = ModelType(SubResource, serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
frontend_ip_configurations = ListType(ModelType(SubResource), serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class FrontendIPConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
inbound_nat_pools = ListType(ModelType(InboundNatPool), serialize_when_none=False)
inbound_nat_rules = ListType(ModelType(InboundNatRule), serialize_when_none=False)
load_balancing_rules = ListType(ModelType(LoadBalancingRule), serialize_when_none=False)
outbound_rules = ListType(ModelType(OutboundRule), serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = StringType(serialize_when_none=False)
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
zones = ListType(StringType, serialize_when_none=False)
class PrivateLinkServiceConnectionState(Model):
actions_required = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
status = StringType(serialize_when_none=False)
class PrivateLinkServiceConnectionState(Model):
actions_required = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
status = StringType(serialize_when_none=False)
class PrivateEndpointConnection(Model):
etag = StringType(serialize_when_none=False)
id = StringType()
name = StringType(serialize_when_none=False)
link_identifier = StringType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpointRef)
private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class Visibility(Model):
subscriptions = ListType(StringType, serialize_when_none=False)
class PrivateLinkService(Model):
etag = StringType(serialize_when_none=False)
id = StringType()
name = StringType(serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
alias = StringType(serialize_when_none=False)
auto_approval = ModelType(AutoApproval, serialize_when_none=False)
enable_proxy_protocol = BooleanType(serialize_when_none=False)
fqdns = ListType(StringType, serialize_when_none=False)
ip_configurations = ListType(ModelType(PrivateLinkServiceIpConfiguration), serialize_when_none=False)
loadBalancer_frontend_ip_configurations = ListType(ModelType(FrontendIPConfiguration), serialize_when_none=False)
network_interfaces = ListType(StringType, serialize_when_none=False) # Change to network interfaces' id
private_endpoint_connections = ListType(ModelType(PrivateEndpointConnection), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
visibility = ModelType(Visibility, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class NetworkInterfaceTapConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class NetworkInterface(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
dns_settings = ModelType(NetworkInterfaceDnsSettings, serialize_when_none=False)
dscp_configuration = ModelType(SubResource, serialize_when_none=False)
enable_accelerated_networking = BooleanType(serialize_when_none=False)
enable_ip_forwarding = BooleanType(serialize_when_none=False)
hosted_workloads = ListType(StringType, serialize_when_none=False)
ip_configurations = ListType(ModelType(NetworkInterfaceIPConfiguration), serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
public_ip_address = StringType(serialize_when_none=False)
mac_address = StringType(serialize_when_none=False)
migration_phase = StringType(choices=('Abort', 'Commit', 'Committed', 'None', 'Prepare'), serialize_when_none=False)
nic_type = StringType(choices=('Elastic', 'Standard'), serialize_when_none=False)
network_security_group = ModelType(NetworkSecurityGroup, serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpointRef, serialize_when_none=False)
private_link_service = ModelType(PrivateLinkService, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
tap_configurations = ListType(ModelType(NetworkInterfaceTapConfiguration), serialize_when_none=False)
virtual_machine = ModelType(SubResource, serialize_when_none=False)
virtual_machine_display = StringType(default='-')
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
### Subnet Class ###
class ServiceEndpointPropertiesFormat(Model):
locations = ListType(StringType, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
service = StringType(serialize_when_none=False)
subnet = StringType(serialize_when_none=False)
class ApplicationGatewayIPConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
subnet = ModelType(SubResource, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class Delegation(Model):
etag = StringType(serialize_when_none=False)
id = StringType()
name = StringType(default='-', serialize_when_none=False)
actions = ListType(StringType, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
service_name = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
class IPConfigurationProfile(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
class AzureFirewallRCAction(Model):
type = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
class AzureFirewallApplicationRuleProtocol(Model):
port = IntType(serialize_when_none=False)
protocol_type = StringType(choices=('Http', 'Https', 'Mssql'), serialize_when_none=False)
class AzureFirewallApplicationRule(Model):
description = StringType(serialize_when_none=False)
fqdn_tags = ListType(StringType, serialize_when_none=False)
name = StringType(serialize_when_none=False)
protocols = ListType(ModelType(AzureFirewallApplicationRuleProtocol), serialize_when_none=False)
source_addresses = ListType(StringType, serialize_when_none=False)
source_ip_groups = ListType(StringType, serialize_when_none=False)
target_fqdns = ListType(StringType, serialize_when_none=False)
class AzureFirewallApplicationRuleCollection(Model):
etag = StringType()
id = StringType()
name = StringType(serialize_when_none=False)
action = ModelType(AzureFirewallRCAction, serialize_when_none=False)
priority = IntType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
rules = ListType(ModelType(AzureFirewallApplicationRule), serialize_when_none=False)
class AzureFirewallPublicIPAddress(Model):
address = StringType(serialize_when_none=False)
class HubPublicIPAddresses(Model):
address = ListType(ModelType(AzureFirewallPublicIPAddress), serialize_when_none=False)
count = IntType(serialize_when_none=False)
class HubIPAddresses(Model):
private_ip_address = StringType(serialize_when_none=False)
public_ips = ModelType(HubPublicIPAddresses, serialize_when_none=False)
class AzureFirewallIPConfiguration(Model):
etag = StringType()
id = StringType()
name = StringType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
public_ip_address = ModelType(SubResource, serialize_when_none=False)
subnet = ModelType(SubResource, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class AzureFirewallIpGroups(Model):
change_number = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
class AzureFirewallNatRule(Model):
description = StringType(serialize_when_none=False)
destination_addresses = ListType(StringType, serialize_when_none=False)
destination_ports = ListType(StringType, serialize_when_none=False)
name = StringType(serialize_when_none=False)
protocols = ListType(StringType, serialize_when_none=False)
source_addresses = ListType(StringType, serialize_when_none=False)
source_ip_groups = ListType(StringType, serialize_when_none=False)
translated_address = StringType(serialize_when_none=False)
translated_fqdn = StringType(serialize_when_none=False)
translated_port = StringType(serialize_when_none=False)
class AzureFirewallNatRuleCollection(Model):
etag = StringType()
id = StringType()
name = StringType(serialize_when_none=False)
action = StringType(choices=('Dnat', 'Snat'), serialize_when_none=False)
priority = IntType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
rules = ListType(ModelType(AzureFirewallNatRule), serialize_when_none=False)
class AzureFirewallNetworkRule(Model):
description = StringType(serialize_when_none=False)
destination_addresses = ListType(StringType, serialize_when_none=False)
destination_ports = ListType(StringType, serialize_when_none=False)
destination_fqdns = ListType(StringType, serialize_when_none=False)
destination_ip_groups = ListType(StringType, serialize_when_none=False)
name = StringType(serialize_when_none=False)
protocols = ListType(StringType, serialize_when_none=False)
source_addresses = ListType(StringType, serialize_when_none=False)
source_ip_groups = ListType(StringType, serialize_when_none=False)
translated_address = StringType(serialize_when_none=False)
translated_fqdn = StringType(serialize_when_none=False)
translated_port = StringType(serialize_when_none=False)
class AzureFirewallNetworkRuleCollection(Model):
etag = StringType()
id = StringType()
name = StringType(serialize_when_none=False)
action = ModelType(AzureFirewallRCAction, serialize_when_none=False)
priority = IntType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
rules = ListType(ModelType(AzureFirewallNetworkRule), serialize_when_none=False)
class AzureFirewallSku(Model):
name = StringType(choices=('AZFW_Hub', 'AZFW_VNet'), serialize_when_none=False)
tier = StringType(choices=('Premium', 'Standard'), serialize_when_none=False)
class AzureFirewall(Model):
etag = StringType()
id = StringType()
location = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
subnet = StringType(serialize_when_none=False)
application_rule_collections = ListType(ModelType(AzureFirewallApplicationRuleCollection), serialize_when_none=False)
firewall_policy = ModelType(SubResource, serialize_when_none=False)
hub_ip_addresses = ModelType(HubIPAddresses, serialize_when_none=False)
ip_configurations = ListType(ModelType(AzureFirewallIPConfiguration), serialize_when_none=False)
ip_groups = ListType(ModelType(AzureFirewallIpGroups), serialize_when_none=False)
management_ip_configuration = ModelType(AzureFirewallIPConfiguration, serialize_when_none=False)
nat_rule_collections = ListType(ModelType(AzureFirewallNatRuleCollection), serialize_when_none=False)
network_rule_collections = ListType(ModelType(AzureFirewallNetworkRuleCollection), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
sku = ModelType(AzureFirewallSku, serialize_when_none=False)
threat_intel_mode = StringType(choices=('Alert', 'Deny', 'Off'), serialize_when_none=False)
virtual_hub = ModelType(SubResource, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
zones = ListType(StringType, serialize_when_none=False)
class ResourceNavigationLink(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
link = StringType(serialize_when_none=False)
linked_resource_type = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class Route(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
address_prefix = StringType(serialize_when_none=False)
next_hop_ip_address = StringType(serialize_when_none=False)
next_hop_type = StringType(choices=('Internet', 'None', 'VirtualAppliance', 'VirtualNetworkGateway', 'VnetLocal'),
serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
class RouteTable(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
disable_bgp_route_propagation = BooleanType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
routes = ListType(ModelType(Route), serialize_when_none=False)
subnets = ListType(StringType, default=[], serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class PrivateEndpoint(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
custom_dns_configs = ListType(ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False)
manual_private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection),
serialize_when_none=False)
network_interfaces = ListType(ModelType(NetworkInterface), serialize_when_none=False)
private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
subnet = StringType(serialize_when_none=False)
resource_group = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class ServiceAssociationLink(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
allow_delete = BooleanType(serialize_when_none=False)
link = StringType(serialize_when_none=False)
linked_resource_type = StringType(serialize_when_none=False)
locations = ListType(ModelType(ExtendedLocation), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class ServiceEndpointPolicyDefinition(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
service = StringType(serialize_when_none=False)
service_resources = ListType(StringType)
class ServiceEndpointPolicy(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
service_endpoint_policy_definitions = ListType(ModelType(ServiceEndpointPolicyDefinition),
serialize_when_none=False)
subnets = ListType(StringType, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
class Subnet(Model):
etag = StringType(serialize_when_none=False)
id = StringType()
virtual_network = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
address_prefix = StringType(serialize_when_none=False)
address_prefixes = ListType(StringType, serialize_when_none=False)
application_gateway_ip_configurations = ModelType(ApplicationGatewayIPConfiguration, serialize_when_none=False)
delegations = ListType(ModelType(Delegation), serialize_when_none=False)
ip_allocations = ListType(ModelType(SubResource), serialize_when_none=False)
ip_configuration_profiles = ListType(ModelType(IPConfigurationProfile), serialize_when_none=False)
ip_configurations = ListType(ModelType(IPConfiguration), serialize_when_none=False)
azure_firewall = ListType(ModelType(AzureFirewall), serialize_when_none=False)
nat_gateway = ModelType(SubResource, serialize_when_none=False)
network_security_group = ModelType(NetworkSecurityGroup, serialize_when_none=False)
private_endpoint_network_policies = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
private_endpoints = ListType(ModelType(PrivateEndpoint), serialize_when_none=False)
private_link_service_network_policies = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
purpose = StringType(serialize_when_none=False)
resource_navigation_links = ListType(ModelType(ResourceNavigationLink, serialize_when_none=False))
route_table = ModelType(RouteTable, serialize_when_none=False)
service_association_links = ListType(ModelType(ServiceAssociationLink), serialize_when_none=False)
service_endpoint_policies = ListType(ModelType(ServiceEndpointPolicy), serialize_when_none=False)
service_endpoints = ListType(ModelType(ServiceEndpointPropertiesFormat), serialize_when_none=False)
type = StringType(serialize_when_none=False)
class NetworkSecurityGroup(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = StringType(serialize_when_none=False)
resource_group = StringType(serialize_when_none=False)
name = StringType(default='-', serialize_when_none=False)
subscription_id = StringType(serialize_when_none=False)
subscription_name = StringType(serialize_when_none=False)
default_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
inbound_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
outbound_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
flow_logs = ListType(ModelType(FlowLog), serialize_when_none=False)
network_interfaces = ListType(ModelType(NetworkInterface), serialize_when_none=False)
provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
subnets = ListType(ModelType(Subnet), serialize_when_none=False)
virtual_machines_display = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
def reference(self):
return {
"resource_id": self.id,
"external_link": f"https://portal.azure.com/#@.onmicrosoft.com/resource{self.id}/overview",
}
```
#### File: test/manager/test_cosmos_db_manager.py
```python
import unittest
import time
import os
from datetime import datetime, timedelta
from unittest.mock import patch
from spaceone.core.unittest.result import print_data
from spaceone.core.unittest.runner import RichTestRunner
from spaceone.core import config
from spaceone.core.transaction import Transaction
from spaceone.core import utils
from spaceone.inventory.error import *
from spaceone.inventory.connector.cosmos_db import CosmosDBConnector
from spaceone.inventory.manager.cosmos_db_manager import CosmosDBManager
class TestCosmosDBManager(unittest.TestCase):
@classmethod
def setUpClass(cls):
config.init_conf(package='spaceone.inventory')
config_path = os.environ.get('TEST_CONFIG')
test_config = utils.load_yaml_from_file(config_path)
cls.schema = 'azure_client_secret'
cls.azure_credentials = test_config.get('AZURE_CREDENTIALS', {})
cls.cosmos_db_connector = CosmosDBConnector(transaction=Transaction(), config={}, secret_data=cls.azure_credentials)
cls.cosmos_db_manager = CosmosDBManager(Transaction())
super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
def test_collect_cloud_service(self, *args):
secret_data = self.azure_credentials
subscription_info = {
'subscription_id': '3ec64e1e-1ce8-4f2c-82a0-a7f6db0899ca',
'subscription_name': 'Azure subscription 1',
'tenant_id': '35f43e22-0c0b-4ff3-90aa-b2c04ef1054c'
}
params = {'options': {}, 'secret_data': secret_data, 'filter': {}, 'subscription_info': subscription_info}
cosmos_dbs = self.cosmos_db_manager.collect_cloud_service(params)
for cosmos_db in cosmos_dbs:
print(cosmos_db.to_primitive())
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
``` |
{
"source": "jihyungSong/plugin-azure-power-state",
"score": 2
} |
#### File: inventory/libs/manager.py
```python
from spaceone.core.manager import BaseManager
from spaceone.inventory.libs.connector import AzureConnector
class AzureManager(BaseManager):
connector_name = None
cloud_service_types = []
response_schema = None
collected_region_codes = []
def verify(self, options, secret_data, **kwargs):
""" Check collector's status.
"""
connector: AzureConnector = self.locator.get_connector('AzureConnector', secret_data=secret_data)
connector.verify()
def collect_power_state(self, params) -> list:
raise NotImplemented
def collect_resources(self, params) -> list:
return self.collect_power_state(params)
def list_all_resource_groups(self, secret_data):
connector: AzureConnector = self.locator.get_connector('AzureConnector')
connector.set_connect(secret_data)
return connector.list_resource_groups()
```
#### File: inventory/model/virtual_machine.py
```python
import logging
from schematics import Model
from schematics.types import ModelType, StringType, ListType, DictType
from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse
_LOGGER = logging.getLogger(__name__)
class Compute(Model):
instance_id = StringType()
instance_state = StringType(choices=('STARTING', 'RUNNING', 'STOPPING', 'STOPPED', 'DEALLOCATING', 'DEALLOCATED'))
class PowerState(Model):
status = StringType(choices=('RUNNING', 'STOPPED', 'UNHEALTHY'))
class Server(Model):
compute = ModelType(Compute)
power_state = ModelType(PowerState, serialize_when_none=False)
def reference(self):
return {
"resource_id": self.compute.instance_id,
}
class VirtualMachineResource(CloudServiceResource):
cloud_service_group = StringType(default='Compute')
cloud_service_type = StringType(default='VirtualMachine')
data = ModelType(Server)
class VirtualMachineResponse(CloudServiceResponse):
match_rules = DictType(ListType(StringType), default={'1': ['reference.resource_id']})
resource_type = StringType(default='inventory.Server')
resource = ModelType(VirtualMachineResource)
``` |
{
"source": "jihyungSong/plugin-azure-vm-inven-collector",
"score": 3
} |
#### File: inventory/libs/utils.py
```python
import yaml
def get_data_from_yaml(file_path):
with open(file_path) as f:
dict = yaml.load(f, Loader=yaml.FullLoader)
return dict
```
#### File: model/metadata/metadata.py
```python
from schematics import Model
from schematics.types import ListType, ModelType, PolyModelType
from spaceone.inventory.model.metadata.metadata_dynamic_layout import BaseLayoutField, QuerySearchTableDynamicLayout
from spaceone.inventory.model.metadata.metadata_dynamic_search import BaseDynamicSearch, BaseDynamicSearchItem
from spaceone.inventory.model.metadata.metadata_dynamic_widget import BaseDynamicWidget
class MetaDataViewTable(Model):
layout = PolyModelType(BaseLayoutField)
class MetaDataViewSubData(Model):
layouts = ListType(PolyModelType(BaseLayoutField))
class MetaDataView(Model):
table = PolyModelType(MetaDataViewTable, serialize_when_none=False)
sub_data = PolyModelType(MetaDataViewSubData, serialize_when_none=False)
search = ListType(PolyModelType(BaseDynamicSearch), serialize_when_none=False)
widget = ListType(PolyModelType(BaseDynamicWidget), serialize_when_none=False)
class ServerMetadata(Model):
view = ModelType(MetaDataView)
@classmethod
def set_layouts(cls, layouts=[]):
sub_data = MetaDataViewSubData({'layouts': layouts})
return cls({'view': MetaDataView({'sub_data': sub_data})})
class CloudServiceTypeMetadata(Model):
view = ModelType(MetaDataView)
@classmethod
def set_fields(cls, name='', fields=[]):
_table = MetaDataViewTable({'layout': QuerySearchTableDynamicLayout.set_fields(name, fields)})
return cls({'view': MetaDataView({'table': _table})})
@classmethod
def set_meta(cls, name='', fields=[], search=[], widget=[]):
table_meta = MetaDataViewTable({'layout': QuerySearchTableDynamicLayout.set_fields(name, fields)})
return cls({'view': MetaDataView({'table': table_meta, 'search': search, 'widget': widget})})
```
#### File: test/api/test_collector.py
```python
import os
import unittest
import json
from spaceone.core.unittest.result import print_data
from spaceone.core.unittest.runner import RichTestRunner
from spaceone.core import config
from spaceone.core import utils
from spaceone.core.transaction import Transaction
from spaceone.tester import TestCase, print_json
class TestCollector(TestCase):
@classmethod
def setUpClass(cls):
azure_cred = os.environ.get('AZURE_CRED')
test_config = utils.load_yaml_from_file(azure_cred)
cls.schema = 'azure_client_secret'
cls.azure_credentials = test_config.get('AZURE_CREDENTIALS', {})
super().setUpClass()
def test_init(self):
v_info = self.inventory.Collector.init({'options': {}})
print_json(v_info)
def test_verify(self):
options = {
}
v_info = self.inventory.Collector.verify({'options': options, 'secret_data': self.azure_credentials})
print_json(v_info)
def test_collect(self):
options = {}
filter = {}
resource_stream = self.inventory.Collector.collect({'options': options, 'secret_data': self.azure_credentials, 'filter': filter})
# print(f'resource_stream: {resource_stream}')
for res in resource_stream:
print_json(res)
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
``` |
{
"source": "jihyungSong/plugin-azure-vm",
"score": 2
} |
#### File: inventory/service/collector_service.py
```python
import time
import logging
import concurrent.futures
from spaceone.core.service import *
from spaceone.inventory.manager.collector_manager import CollectorManager
_LOGGER = logging.getLogger(__name__)
FILTER_FORMAT = [
{
'key': 'project_id',
'name': 'Project ID',
'type': 'str',
'resource_type': 'SERVER',
'search_key': 'identity.Project.project_id',
'change_rules': [{
'resource_key': 'data.compute.instance_id',
'change_key': 'instance_id'
}, {
'resource_key': 'data.compute.region',
'change_key': 'region_name'
}]
}, {
'key': 'collection_info.service_accounts',
'name': 'Service Account ID',
'type': 'str',
'resource_type': 'SERVER',
'search_key': 'identity.ServiceAccount.service_account_id',
'change_rules': [{
'resource_key': 'data.compute.instance_id',
'change_key': 'instance_id'
}, {
'resource_key': 'data.compute.region',
'change_key': 'region_name'
}]
}, {
'key': 'server_id',
'name': 'Server ID',
'type': 'list',
'resource_type': 'SERVER',
'search_key': 'inventory.Server.server_id',
'change_rules': [{
'resource_key': 'data.compute.instance_id',
'change_key': 'instance_id'
}, {
'resource_key': 'data.compute.region',
'change_key': 'region_name'
}]
}, {
'key': 'instance_id',
'name': 'Instance ID',
'type': 'list',
'resource_type': 'CUSTOM'
},
{
'key': 'region_name',
'name': 'Region',
'type': 'list',
'resource_type': 'CUSTOM'
}
]
SUPPORTED_FEATURES = ['garbage_collection']
SUPPORTED_RESOURCE_TYPE = ['inventory.Server', 'inventory.Region']
NUMBER_OF_CONCURRENT = 20
@authentication_handler
class CollectorService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
self.collector_manager: CollectorManager = self.locator.get_manager('CollectorManager')
@transaction
@check_required(['options'])
def init(self, params):
""" init plugin by options
"""
capability = {
'filter_format': FILTER_FORMAT,
'supported_resource_type': SUPPORTED_RESOURCE_TYPE,
'supported_features': SUPPORTED_FEATURES
}
return {'metadata': capability}
@transaction
@check_required(['options', 'secret_data'])
def verify(self, params):
""" verify options capability
Args:
params
- options
- secret_data: may be empty dictionary
Returns:
Raises:
ERROR_VERIFY_FAILED:
"""
manager = self.locator.get_manager('CollectorManager')
secret_data = params['secret_data']
options = params.get('options', {})
active = manager.verify(options, secret_data)
return {}
@transaction
@check_required(['options', 'secret_data', 'filter'])
def list_resources(self, params):
""" Get quick list of resources
Args:
params:
- options
- secret_data
- filter
Returns: list of resources
"""
start_time = time.time()
resource_regions = []
collected_region_code = []
server_resource_format = {'resource_type': 'inventory.Server',
'match_rules': {'1': ['reference.resource_id']}}
region_resource_format = {'resource_type': 'inventory.Region',
'match_rules': {'1': ['region_code', 'provider']}}
cloud_service_type_resource_format = {'resource_type': 'inventory.CloudServiceType',
'match_rules': {'1': ['name', 'group', 'provider']}}
for cloud_service_type in self.collector_manager.list_cloud_service_types():
yield cloud_service_type, cloud_service_type_resource_format
resource_groups = self.collector_manager.list_all_resource_groups(params)
mt_params = []
for rg in resource_groups:
vms = self.collector_manager.list_vms(params, rg.name)
if list(vms):
mt_params.append({
'secret_data': params['secret_data'],
'resource_group': rg,
'vms': vms
})
if mt_params:
with concurrent.futures.ThreadPoolExecutor(max_workers=NUMBER_OF_CONCURRENT) as executor:
future_executors = []
for mt_param in mt_params:
future_executors.append(executor.submit(self.collector_manager.list_resources, mt_param))
for future in concurrent.futures.as_completed(future_executors):
for result in future.result():
collected_region = self.collector_manager.get_region_from_result(result)
if collected_region is not None and collected_region.region_code not in collected_region_code:
resource_regions.append(collected_region)
collected_region_code.append(collected_region.region_code)
yield result, server_resource_format
for resource_region in resource_regions:
yield resource_region, region_resource_format
print(f'############## TOTAL FINISHED {time.time() - start_time} Sec ##################')
``` |
{
"source": "jihyungSong/plugin-email-notification-protocol",
"score": 2
} |
#### File: notification/connector/smtp.py
```python
import smtplib
import markdown
import logging
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from spaceone.core.connector import BaseConnector
from spaceone.notification.conf.email_conf import *
__all__ = ['SMTPConnector']
_LOGGER = logging.getLogger(__name__)
class SMTPConnector(BaseConnector):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.smtp = None
def set_smtp(self, host, port, user, password):
self.smtp = smtplib.SMTP(host, port)
self.smtp.connect(host, port)
self.smtp.ehlo()
self.smtp.starttls()
self.smtp.ehlo()
self.smtp.login(user, password)
def send_email(self, mail_list, subject, messages, mark_down=None):
multipart_msg = MIMEMultipart("alternative")
multipart_msg["Subject"] = subject
multipart_msg["From"] = SENDER_EMAIL_ADDR
multipart_msg["To"] = mail_list
if mark_down:
contents = markdown.markdown(mark_down)
else:
contents = messages
multipart_msg.attach(MIMEText(contents, 'html'))
self.smtp.sendmail(SENDER_EMAIL_ADDR, mail_list.split(','), multipart_msg.as_string())
self.smtp.quit()
```
#### File: notification/service/notification_service.py
```python
import os
import logging
import time
from jinja2 import Environment, FileSystemLoader
from spaceone.core import utils
from spaceone.core.service import *
from spaceone.notification.manager.notification_manager import NotificationManager
from spaceone.notification.conf.email_conf import *
_LOGGER = logging.getLogger(__name__)
@authentication_handler
class NotificationService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
@transaction
@check_required(['options', 'message', 'notification_type'])
def dispatch(self, params):
"""
Args:
params:
- options
- message
- title
- markdown
- description
- tags (list)
- key
- value
- options
- callbacks (list)
- url
- label
- options
- occured_at
- notification_type
- secret_data:
- smtp_host
- smtp_port
- user
- password
- channel_data
- email
"""
secret_data = params.get('secret_data', {})
channel_data = params.get('channel_data', {})
notification_type = params['notification_type']
params_message = params['message']
title = params_message['title']
contents = self.make_contents(params_message, notification_type)
smtp_host = secret_data.get('smtp_host', DEFAULT_SMTP_SERVER)
smtp_port = secret_data.get('smtp_port', DEFAULT_SMTP_PORT)
user = secret_data.get('user', DEFAULT_SMTP_USER)
password = secret_data.get('password', DEFAULT_SMTP_PASSWORD)
email_list = channel_data.get('email')
noti_mgr: NotificationManager = self.locator.get_manager('NotificationManager')
noti_mgr.dispatch(smtp_host, smtp_port, user, password, email_list, title, contents)
def make_contents(self, message, notification_type):
env = Environment(loader=FileSystemLoader(searchpath="/"))
template = env.get_template(self.get_html_template_path())
template_kargs = {
'notification_type_color': self.get_notification_type_color(notification_type),
'title': message.get('title', ''),
'description': message.get('description', ''),
'tags': message.get('tags', []),
'callbacks': message.get('callbacks', [])
}
if 'link' in message:
template_kargs.update({'link': message['link']})
if 'image_url' in message:
template_kargs.update({'image_url': message['image_url']})
if 'occurred_at' in message:
if occurred_at := self.convert_occured_at(message['occurred_at']):
template_kargs.update({'occurred_at': occurred_at})
return template.render(**template_kargs)
@staticmethod
def get_html_template_path():
full_path = os.path.split(__file__)[0]
split_dir = full_path.split('/')[:-1]
split_dir.append('templates')
split_dir[0] = '/' # root directory
return os.path.join(*split_dir, 'notification_template.html')
@staticmethod
def get_notification_type_color(notification_type):
return NOTIFICATION_TYPE_COLOR_MAP.get(notification_type, NOTIFICATION_TYPE_DEFAULT_COLOR)
@staticmethod
def convert_occured_at(occured_at):
if dt := utils.iso8601_to_datetime(occured_at):
return dt.strftime("%B %d, %Y %H:%M %p (UTC)")
return None
``` |
{
"source": "jihyungSong/plugin-google-cloud-compute",
"score": 2
} |
#### File: inventory/connector/google_cloud_compute_connector.py
```python
__all__ = ["GoogleCloudComputeConnector"]
import logging
import os
import google.oauth2.service_account
import googleapiclient
import googleapiclient.discovery
from spaceone.core.connector import BaseConnector
from pprint import pprint
_LOGGER = logging.getLogger(__name__)
INSTANCE_TYPE_FILE = '%s/conf/%s' % (os.path.dirname(os.path.abspath(__file__)), 'instances.json')
class GoogleCloudComputeConnector(BaseConnector):
def __init__(self, transaction=None, config=None):
self.client = None
self.project_id = None
def verify(self, options, secret_data):
self.get_connect(secret_data)
return "ACTIVE"
def get_connect(self, secret_data):
"""
cred(dict)
- type: ..
- project_id: ...
- token_uri: ...
- ...
"""
try:
self.project_id = secret_data.get('project_id')
credentials = google.oauth2.service_account.Credentials.from_service_account_info(secret_data)
self.client = googleapiclient.discovery.build('compute', 'v1', credentials=credentials)
except Exception as e:
print(e)
raise self.client(message='connection failed. Please check your authentication information.')
def list_regions(self):
result = self.client.regions().list(project=self.project_id).execute()
return result.get('items', [])
def list_zones(self):
result = self.client.zones().list(project=self.project_id).execute()
return result.get('items', [])
def list_instances(self, **query):
status_filter = {'key': 'status', 'values': ['PROVISIONING', 'STAGING', 'RUNNING', 'STOPPING', 'REPAIRING', 'SUSPENDING', 'SUSPENDED', 'TERMINATED']}
if 'filter' in query:
query.get('filter').append(status_filter)
else:
query.update({'filter': [status_filter]})
query = self.generate_key_query('filter', self._get_filter_to_params(**query), '', is_default=True, **query)
result = self.client.instances().list(**query).execute()
compute_instances = result.get('items', [])
return compute_instances
def list_machine_types(self, **query):
query = self.generate_query(**query)
result = self.client.machineTypes().list(**query).execute()
instance_types = result.get('items', [])
return instance_types
def list_url_maps(self, **query):
query = self.generate_query(**query)
response = self.client.urlMaps().list(**query).execute()
url_map = response.get('items', [])
return url_map
def list_backend_svcs(self, **query):
query = self.generate_query(**query)
response = self.client.backendServices().list(**query).execute()
url_map = response.get('items', [])
return url_map
def list_disk(self, **query):
query = self.generate_query(**query)
response = self.client.disks().list(**query).execute()
disks = response.get('items', [])
return disks
def list_disk_types(self, **query):
query = self.generate_query(**query)
response = self.client.diskTypes().list(**query).execute()
disks_types = response.get('items', [])
return disks_types
def list_auto_scalers(self, **query):
query = self.generate_query(**query)
response = self.client.autoscalers().list(**query).execute()
auto_scaler = response.get('items', [])
return auto_scaler
def list_firewalls(self, **query):
query = self.generate_query(**query)
response = self.client.firewalls().list(**query).execute()
firewall = response.get('items', [])
return firewall
def list_public_images(self, **query):
response = self.client.images().list(**query).execute()
firewall = response.get('items', [])
return firewall
def list_images(self, **query):
query = self.generate_query(**query)
response = self.client.images().list(**query).execute()
firewall = response.get('items', [])
return firewall
def list_instance_groups(self, **query):
query = self.generate_query(**query)
response = self.client.instanceGroups().list(**query).execute()
firewall = response.get('items', [])
return firewall
def list_instance_from_instance_groups(self, instance_group_name, zone, **query):
query = self.generate_query(**query)
query.update({
'instanceGroup': instance_group_name,
'zone': zone
})
response = self.client.instanceGroups().listInstances(**query).execute()
instance_list = response.get('items', [])
return instance_list
def list_instance_group_managers(self, **query):
query = self.generate_query(**query)
response = self.client.instanceGroupManagers().list(**query).execute()
firewall = response.get('items', [])
return firewall
def list_vpcs(self, **query):
query = self.generate_query(**query)
response = self.client.networks().list(**query).execute()
return response.get('items', [])
def list_subnets(self, **query):
query = self.generate_query(**query)
response = self.client.subnetworks().list(**query).execute()
return response.get('items', [])
def list_region_url_maps(self, **query):
query = self.generate_query(**query)
response = self.client.regionUrlMaps().list(**query).execute()
return response.get('items', [])
def list_region_backend_svcs(self, **query):
query = self.generate_query(**query)
response = self.client.regionBackendServices().list(**query).execute()
return response.get('items', [])
def list_target_pools(self, **query):
query = self.generate_query(**query)
response = self.client.targetPools().list(**query).execute()
return response.get('items', [])
def list_forwarding_rules(self, **query):
query = self.generate_query(**query)
response = self.client.forwardingRules().list(**query).execute()
return response.get('items', [])
def set_instance_into_instance_group_managers(self, instance_group_managers, zone):
for instance_group in instance_group_managers:
instance_group_name = instance_group.get('baseInstanceName', '')
inst_list = self.list_instance_from_instance_groups(instance_group_name, zone)
instance_group.update({
'instance_list': inst_list
})
def _get_filter_to_params(self, **query):
filtering_list = []
filters = query.get('filter', None)
if filters and isinstance(filters, list):
for single_filter in filters:
filter_key = single_filter.get('key', '')
filter_values = single_filter.get('values', [])
filter_str = self._get_full_filter_string(filter_key, filter_values)
if filter_str != '':
filtering_list.append(filter_str)
return ' AND '.join(filtering_list)
def generate_query(self, **query):
query.update({
'project': self.project_id,
})
return query
def generate_key_query(self, key, value, delete, is_default=False, **query):
if is_default:
if delete != '':
query.pop(delete, None)
query.update({
key: value,
'project': self.project_id
})
return query
@staticmethod
def get_region(zone):
index = zone.find('-')
region = zone[0:index] if index > -1 else ''
return region
@staticmethod
def _get_full_filter_string(filter_key, filter_values):
filter_string = ''
if filter_key != '' and filter_values != [] and isinstance(filter_values, list):
single_filter_list = [f'{filter_key}={x}' for x in filter_values]
join_string = ' OR '.join(single_filter_list)
filter_string = f'({join_string})'
elif filter_key != '' and filter_values != [] and not isinstance(filter_values, dict):
filter_string = f'({filter_key}={filter_values})'
return filter_string
``` |
{
"source": "jihyungSong/plugin-google-cloud-stackdriver",
"score": 2
} |
#### File: monitoring/info/metric_info.py
```python
import functools
from spaceone.api.monitoring.plugin import metric_pb2
from spaceone.api.core.v1 import plugin_pb2
from spaceone.core.pygrpc.message_type import *
__all__ = ['PluginMetricsResponse', 'PluginMetricDataResponse']
def PluginAction(action):
info = {
'method': action['method'],
}
if 'options' in action:
info['options'] = change_struct_type(action['options'])
return plugin_pb2.PluginAction(**info)
def MetricInfo(metric):
info = {
'key': metric['key'],
'name': metric['name'],
'unit': change_struct_type(metric['unit']),
'chart_type': metric['chart_type']
}
if 'chart_options' in metric:
info.update({
'chart_options': change_struct_type(metric['chart_options'])
})
return metric_pb2.MetricInfo(**info)
def MetricsInfo(result):
info = {
'metrics': [MetricInfo(metric) for metric in result['metrics']]
}
return metric_pb2.MetricsInfo(**info)
def PluginMetricsResponse(response):
info = {
'resource_type': response['resource_type'],
'result': MetricsInfo(response['result'])
}
if response.get('actions'):
info['actions']: [PluginAction(action) for action in response.get('actions', [])]
return metric_pb2.PluginMetricsResponse(**info)
def MetricDataInfo(result):
info = {
'labels': change_list_value_type(result['labels']),
'values': change_list_value_type(result['values'])
}
return metric_pb2.MetricDataInfo(**info)
def PluginMetricDataResponse(response):
info = {
'resource_type': response['resource_type'],
'result': MetricDataInfo(response['result'])
}
if response.get('actions'):
info['actions']: [PluginAction(action) for action in response.get('actions', [])]
return metric_pb2.PluginMetricDataResponse(**info)
```
#### File: monitoring/manager/google_cloud_manager.py
```python
import logging
import time
from spaceone.core.manager import BaseManager
from spaceone.monitoring.error import *
from spaceone.monitoring.connector.google_cloud_connector import GoogleCloudConnector
_LOGGER = logging.getLogger(__name__)
_STAT_MAP = {
'MEAN': 'ALIGN_MEAN',
'MAX': 'ALIGN_MAX',
'MIN': 'ALIGN_MIN',
'SUM': 'ALIGN_SUM'
}
class GoogleCloudManager(BaseManager):
google_cloud_connector = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.google_cloud_connector: GoogleCloudConnector = self.locator.get_connector('GoogleCloudConnector')
def verify(self, schema, options, secret_data):
""" Check connection
"""
self.google_cloud_connector.set_connect(schema, options, secret_data)
def set_connector(self, schema, secret_data):
self.google_cloud_connector.set_connect(schema, {}, secret_data)
def list_metrics(self, schema, options, secret_data, resource):
resource_type, filters = self._get_metric_filters(resource)
self.google_cloud_connector.set_connect(schema, options, secret_data)
return self.google_cloud_connector.list_metrics(filters)
def get_metric_data(self, schema, options, secret_data, resource, metric, start, end, period, stat):
if period is None:
period = self._make_period_from_time_range(start, end)
stat = self._convert_stat(stat)
self.google_cloud_connector.set_connect(schema, options, secret_data)
return self.google_cloud_connector.get_metric_data(resource, metric, start, end, period, stat)
@staticmethod
def _get_metric_filters(resource):
return resource.get('type', None), resource.get('filters', [])
@staticmethod
def _convert_stat(stat):
if stat is None:
stat = 'ALIGN_MEAN'
if stat not in _STAT_MAP.keys():
raise ERROR_NOT_SUPPORT_STAT(supported_stat=' | '.join(_STAT_MAP.keys()))
return _STAT_MAP[stat]
@staticmethod
def _make_period_from_time_range(start, end):
start_time = int(time.mktime(start.timetuple()))
end_time = int(time.mktime(end.timetuple()))
time_delta = end_time - start_time
interval = 0
# Max 60 point in start and end time range
if time_delta <= 60*60: # ~ 1h
interval = 60
elif time_delta <= 60*60*6: # 1h ~ 6h
interval = 60*10
elif time_delta <= 60*60*12: # 6h ~ 12h
interval = 60*20
elif time_delta <= 60*60*24: # 12h ~ 24h
interval = 60*30
elif time_delta <= 60*60*24*3: # 1d ~ 2d
interval = 60*60
elif time_delta <= 60*60*24*7: # 3d ~ 7d
interval = 60*60*3
elif time_delta <= 60*60*24*14: # 1w ~ 2w
interval = 60*60*6
elif time_delta <= 60*60*24*14: # 2w ~ 4w
interval = 60*60*12
else: # 4w ~
interval = 60*60*24
return str(interval)+'s'
@staticmethod
def _get_chart_info(namespace, dimensions, metric_name):
return 'line', {}
``` |
{
"source": "jihyungSong/plugin-megazone-sms-notification-protocol",
"score": 2
} |
#### File: notification/manager/notification_manager.py
```python
from spaceone.core.manager import BaseManager
from spaceone.notification.manager.megazone_sms_manager import MegazoneSMSManager
class NotificationManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def dispatch(self, access_key, secret_key, title, body, to, **kwargs):
mz_sms_mgr: MegazoneSMSManager = self.locator.get_manager('MegazoneSMSManager')
mz_sms_mgr.set_connector(access_key, secret_key)
mz_sms_mgr.request_send_sms(title, body, to, **kwargs)
``` |
{
"source": "jihyungSong/plugin-oracle-cloud-services",
"score": 3
} |
#### File: model/autonomous_database/data.py
```python
from schematics import Model
from schematics.types import ModelType, ListType, StringType, FloatType, DateTimeType, IntType, BooleanType
class Tags(Model):
key = StringType()
value = StringType()
class Database(Model):
id = StringType()
name = StringType()
tags = ListType(ModelType(Tags), default=[])
def reference(self):
return {
"resource_id": self.id,
"external_link": "",
}
``` |
{
"source": "jihyungSong/plugin-slack-notification-protocol",
"score": 2
} |
#### File: notification/manager/notification_manager.py
```python
from spaceone.core.manager import BaseManager
from spaceone.notification.manager.slack_manager import SlackManager
class NotificationManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def dispatch(self, token, slack_channel, message, **kwargs):
slack_mgr: SlackManager = self.locator.get_manager('SlackManager')
slack_mgr.set_connector(token)
slack_mgr.send_message(slack_channel, message, **kwargs)
```
#### File: notification/service/protocol_service.py
```python
import logging
from spaceone.core.service import *
_LOGGER = logging.getLogger(__name__)
@authentication_handler
class ProtocolService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
@check_required(['options'])
def init(self, params):
""" init plugin by options
"""
return {'metadata': {
'data_type': 'SECRET',
'data': {
'schema': {
'properties': {
'token': {
'description': 'App-Level token value to control your Slack app',
'minLength': 4,
'title': 'Slack Token',
'type': 'string',
'examples': ['<KEY>']
},
'channel': {
'description': 'Slack channel to be received messages in your workspace',
'minLength': 4,
'title': 'Slack Channel',
'type': 'string',
'examples': ['everyone']
}
},
'required': [
'token',
'channel'
],
'type': 'object'
}
}
}}
@transaction
@check_required(['options'])
def verify(self, params):
"""
Args:
params:
- options
- secret_data
"""
options = params['options']
secret_data = params.get('secret_data', {})
return {}
``` |
{
"source": "jihyungSong/plugin-voicecall-notification-protocol",
"score": 2
} |
#### File: test/api/test_notification_api.py
```python
import os
import logging
from spaceone.core import utils, config
from spaceone.tester import TestCase, print_json, to_json
from google.protobuf.json_format import MessageToDict
_LOGGER = logging.getLogger(__name__)
ACCESS_KEY = os.environ.get('ACCESS_KEY', None)
SECRET_KEY = os.environ.get('SECRET_KEY', None)
PHONE = os.environ.get('PHONE', None)
COUNTRY_CODE = os.environ.get('COUNTRY_CODE', None)
if ACCESS_KEY == None or SECRET_KEY == None:
print("""
##################################################
# ERROR
#
# Configure your Slack Token first for test
##################################################
example)
export ACCESS_KEY=<MEGAZONE_MESSAGE_ACCESS_KEY>
export SECRET_KEY=<MEGAZONE_MESSAGE_SECRET_KEY>
""")
exit
class TestVoiceCallNotification(TestCase):
config = utils.load_yaml_from_file(
os.environ.get('SPACEONE_TEST_CONFIG_FILE', './config.yml'))
endpoints = config.get('ENDPOINTS', {})
secret_data = {
'access_key': ACCESS_KEY,
'secret_key': SECRET_KEY,
}
channel_data = {
'phone_number': PHONE,
'country_code': COUNTRY_CODE
}
def test_init(self):
v_info = self.notification.Protocol.init({'options': {}})
print_json(v_info)
def test_verify(self):
options = {}
self.notification.Protocol.verify({'options': options, 'secret_data': self.secret_data})
def test_dispatch(self):
options = {}
self.notification.Notification.dispatch({
'options': options,
'message': {
'title': 'ํฐ์ผ ๋ฌ์ด์. ํฐ์ผ ๋ฌ์ด์. ์๋ฒ ํ์ธ ์ข ์ ญ์ ์ข.',
'short_message': '์๋ฉ์์ง ํ
์คํธํฉ๋๋ค.',
'callbacks': [{
'url': 'https://monitoring-webhook.dev.spaceone.dev/monitoring/v1/alert/alert-3b606b4b2964/2fa0eca1ed3bd4ab12564d7c6d7fc3de/ACKNOWLEDGED',
}]
},
'notification_type': 'INFO',
'secret_data': self.secret_data,
'channel_data': self.channel_data
})
``` |
{
"source": "jihyungSong/plugin-zabbix-mon-webhook",
"score": 2
} |
#### File: test/api/test_event_api.py
```python
import logging
import unittest
import os
from spaceone.core.unittest.runner import RichTestRunner
from spaceone.tester import TestCase, print_json
_LOGGER = logging.getLogger(__name__)
TEST_JSON = os.environ.get('test_json', None)
class TestEvent(TestCase):
def test_parse(self):
params = {
"options": {
},
"data": {
"to": "<EMAIL>",
"event": {
"name": "Load average is too high (per CPU load over 1.5 for 5m)",
"recovery_id": "{EVENT.RECOVERY.ID}",
"id": "7467",
"severity": "Average",
"recovery_name": "{EVENT.RECOVERY.NAME}",
"status": "PROBLEM"
},
"message": "Problem started at 12:51:50 on 2021.08.27\r\nProblem name: Load average is too high (per CPU load over 1.5 for 5m)\r\nHost: zabbix-test\r\nSeverity: Average\r\nOperational data: Load averages(1m 5m 15m): (1.69 1.39 1.01), # of CPUs: 1\r\nOriginal problem ID: 7467\r\n",
"title": "Problem: Load average is too high (per CPU load over 1.5 for 5m)",
"item": {
"key": "system.cpu.load[all,avg1]",
"id": "42529",
"value": "1.69"
},
"host": {
"name": "zabbix-test",
"description": "",
"dns": "",
"connection_info": "172.31.4.239",
"id": "10490",
"visible_name": "zabbix-test"
},
"trigger": {
"severity": "Average",
"name": "Load average is too high (per CPU load over 1.5 for 5m)",
"status": "PROBLEM",
"id": "21028"
}
}
}
test_cases = [params]
for idx, test_case in enumerate(test_cases):
print(f'###### {idx} ########')
parsed_data = self.monitoring.Event.parse({'options': {}, 'data': test_case.get('data')})
print_json(parsed_data)
print()
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
``` |
{
"source": "jihyungSong/python-core",
"score": 2
} |
#### File: celery/src/add_schedule.py
```python
import os
from pprint import pprint
from random import randint
from time import sleep
from typing import List
from spaceone.core import config
from spaceone.core.command import _set_file_config, _set_python_path
from spaceone.core.locator import Locator
from spaceone.core.celery.types import SpaceoneTaskData
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
package = 'spaceone.work'
module_path = BASE_DIR
config_path = f"{BASE_DIR}/spaceone/work/conf/custom_beat.yaml"
def config_server():
# 1. Set a python path
_set_python_path(package, module_path)
# 2. Initialize config from command argument
config.init_conf(
package=package,
server_type='grpc',
port='50051'
)
# 3. Get service config from global_conf.py
config.set_service_config()
# 4. Merge file conf
with open(config_path, 'r') as f:
_set_file_config(f)
def print_schedules(schedules: List[SpaceoneTaskData]):
print(f"id | schedule | total_run_count | last_run")
for sch in schedules:
print(f"{sch.schedule_id} | {sch.schedule_info} | {sch.total_run_count} | {sch.last_run_at}")
print('\n\n')
if __name__ == '__main__':
config_server()
locator = Locator()
svc = locator.get_service('ScheduleService')
print('list all schedules')
print_schedules(svc.list())
print('add schedule')
sch_name = f"test_sche_{randint(1, 1000)}"
svc.add({
'domain_id': "sample",
'enabled': True,
'task': 'spaceone.core.celery.tasks.test_task',
'name': sch_name,
'interval': {
'period': 'seconds',
'every': randint(6, 12)
},
'args':['sample'],
'kwargs':{
"sch_name":sch_name
}
})
task2 = 'spaceone.work.task.sync_scheduler.domain_scheduler'
sch_name2 = f"test_sche_{randint(1, 1000)}"
svc.add({
'domain_id': "sample",
'enabled': True,
'task': task2,
'name': sch_name2,
'cron': {
'minute':'*',
'hour': '*',
'day_of_week': '*',
'day_of_month': '*',
'month_of_year': '*',
}
})
print(f"Total Schedule : {len(svc.list())}")
while True:
sleep(6)
print_schedules(svc.list())
```
#### File: api/v1/domain.py
```python
from spaceone.api.report.v1 import domain_pb2, domain_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class Domain(BaseAPI, domain_pb2_grpc.DomainServicer):
pb2 = domain_pb2
pb2_grpc = domain_pb2_grpc
def get(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('DomainService', metadata) as svc:
return self.locator.get_info('DomainInfo', svc.get(params))
def list(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('DomainService', metadata) as svc:
svc_vos, total_count = svc.list(params)
return self.locator.get_info('DomainsInfo', svc_vos, total_count)
def enable(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('DomainService', metadata) as svc:
return self.locator.get_info('DomainInfo', svc.enable(params))
def disable(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('DomainService', metadata) as svc:
svc.disable(params)
return self.locator.get_info('EmptyInfo')
```
#### File: work/info/domain_info.py
```python
from spaceone.api.report.v1 import domain_pb2
from spaceone.core.pygrpc.message_type import *
__all__ = ['DomainInfo', 'DomainsInfo']
def IdentityInfo(vo):
info = {
"key": vo.key,
"value": vo.value,
}
return domain_pb2.IdentityInfo(**info)
def RelationResourceInfo(vo):
info = {
"service": vo.service,
"resource": vo.resource,
"identity": list(map(IdentityInfo, vo.identity)),
"tags": change_struct_type(vo.tags),
}
return domain_pb2.RelationResourceInfo(**info)
def DomainInfo(vo):
info = {
"domain_id": vo.domain_id,
"register_templates": vo.register_templates,
"relation_resources": list(map(RelationResourceInfo, vo.relation_resources)),
}
return domain_pb2.DomainInfo(**info)
def DomainsInfo(results, total_count):
info = {
"results": list(map(DomainInfo, results)),
"total_count": total_count,
}
return domain_pb2.DomainsInfo(**info)
```
#### File: spaceone/core/base.py
```python
from spaceone.core.locator import Locator
from spaceone.core.transaction import Transaction
class CoreObject(object):
def __init__(self, transaction: Transaction = None):
if transaction:
self.transaction = transaction
else:
self.transaction = Transaction()
self.locator = Locator(self.transaction)
```
#### File: core/config/__init__.py
```python
import copy
import logging
import sys
import consul
from spaceone.core import utils
from spaceone.core.config import default_conf
_REMOTE_URL = []
_GLOBAL = {}
_LOGGER = logging.getLogger(__name__)
def init_conf(package, **kwargs):
set_default_conf()
_GLOBAL['PACKAGE'] = package
_GLOBAL['SERVICE'] = package.rsplit('.', 1)[-1:][0]
if 'server_type' in kwargs:
_GLOBAL['SERVER_TYPE'] = kwargs['server_type']
if 'host' in kwargs:
_GLOBAL['HOST'] = kwargs['HOST']
if 'port' in kwargs:
_GLOBAL['PORT'] = kwargs['port']
def set_default_conf():
for key, value in vars(default_conf).items():
if not key.startswith('__'):
_GLOBAL[key] = value
def get_package():
return _GLOBAL['PACKAGE']
def get_service():
return _GLOBAL['SERVICE']
def get_extension_apis():
return _GLOBAL.get('EXTENSION_APIS', {})
def get_handler(name):
return _GLOBAL.get('HANDLERS', {}).get(name, {})
def get_connector(name):
return _GLOBAL.get('CONNECTORS', {}).get(name, {})
def set_service_config():
"""
Get config from service ({package}.conf.global_conf)
"""
package = _GLOBAL['PACKAGE']
if package is None:
raise ValueError(f'Package is undefined.')
global_module = __import__(f'{package}.conf.global_conf', fromlist=['global_conf'])
for key, value in vars(global_module).items():
if not key.startswith('__'):
_GLOBAL[key] = value
def get_global(key=None, default=None):
if key:
return _GLOBAL.get(key, default)
else:
return _GLOBAL
def set_global(**config):
global_conf = get_global()
for key, value in config.items():
if key in global_conf:
if not isinstance(value, type(global_conf[key])) and global_conf[key] is not None:
value_type_name = type(global_conf[key]).__name__
raise ValueError(f'Value type is invalid. (GLOBAL.{key} = {value_type_name})')
if isinstance(value, dict):
global_conf[key] = utils.deep_merge(value, global_conf[key])
else:
global_conf[key] = value
def set_global_force(**config):
for key, value in config.items():
_GLOBAL[key] = value
def set_file_conf(config_yml: str):
file_conf: dict = utils.load_yaml_from_file(config_yml)
global_conf: dict = file_conf.get('GLOBAL', {})
set_global(**global_conf)
import_conf: list = file_conf.get('IMPORT', [])
if isinstance(import_conf, list):
for uri in import_conf:
import_remote_conf(uri)
# DEPRECATED: REMOTE_URL setting changed to IMPORT
import_conf: list = file_conf.get('REMOTE_URL', [])
if isinstance(import_conf, list):
for uri in import_conf:
import_remote_conf(uri)
def import_remote_conf(uri):
endpoint = utils.parse_endpoint(uri)
scheme = endpoint.get('scheme')
if scheme == 'file':
remote_conf = utils.load_yaml_from_file(endpoint['path'])
elif scheme in ['http', 'https']:
remote_conf = utils.load_yaml_from_url(uri)
elif scheme == 'consul':
remote_conf = load_consul_config(endpoint)
if isinstance(remote_conf, dict):
set_global(**remote_conf)
def load_consul_config(endpoint):
hostname = endpoint.get('hostname')
port = endpoint.get('port')
key = endpoint.get('path', '')[1:]
try:
conf = {}
if hostname:
conf['host'] = hostname
if port:
conf['port'] = port
c = consul.Consul(**conf)
index, data = c.kv.get(key)
if data:
print(data)
json_str = data['Value'].decode('utf-8')
return utils.load_json(json_str)
return {}
except Exception as e:
raise Exception(f'Consul Call Error: {e}')
```
#### File: core/extension/grpc_health.py
```python
import logging
from enum import Enum
from spaceone.core import config
from grpc_health.v1.health import HealthServicer, SERVICE_NAME
from grpc_health.v1 import health_pb2, health_pb2_grpc
_LOGGER = logging.getLogger(__name__)
class GRPCHealth(HealthServicer):
def __init__(self, experimental_non_blocking=True,
experimental_thread_pool=None):
super().__init__(experimental_non_blocking, experimental_thread_pool)
self.health_mgr = HealthManager()
self.health_mgr.add_health_update(self)
self.health_mgr.check()
@property
def name(self):
return 'Health'
@property
def pb2_grpc_module(self):
return health_pb2_grpc
@property
def service_name(self):
return SERVICE_NAME
def Check(self, request, context):
try:
status = self.health_mgr.check()
status = status.value
self.update_status(status)
except Exception as e:
_LOGGER.error(f'[Check] Health Check Error: {e}')
status = 'UNKNOWN'
return health_pb2.HealthCheckResponse(status=status)
def update_status(self, status):
service_name = config.get_service()
self.set(service_name, status)
class HealthManager(object):
_checkers = []
class Status(Enum):
UNKNOWN = 'UNKNOWN'
"""When your application's status is indeterminable."""
SERVING = 'SERVING'
"""When your application is ready."""
NOT_SERVING = 'NOT_SERVING'
"""When your application is not ready."""
def check(self):
status = self.Status.SERVING
return status
def add_health_update(self, obj):
self._checkers.append(obj)
def update_status(self, status):
for obj in self._checkers:
obj.update_status(status.value)
```
#### File: core/fastapi/server.py
```python
import logging
import uvicorn
from spaceone.core import config
from spaceone.core.logger import set_logger
_LOGGER = logging.getLogger(__name__)
def api_app():
conf = config.get_global()
package = conf['PACKAGE']
rest_route_module = __import__(f'{package}.interface.rest.router', fromlist=['router'])
return getattr(rest_route_module, 'app', {})
def serve():
conf = config.get_global()
# Enable logging configuration
if conf.get('SET_LOGGING', True):
set_logger()
_LOGGER.info(f'Start REST Server ({config.get_service()}): '
f'host={conf["HOST"]} port={conf["PORT"]}')
uvicorn.run('spaceone.core.fastapi.server:api_app', host=conf['HOST'], port=conf['PORT'], factory=True)
```
#### File: core/model/__init__.py
```python
class BaseModel(object):
@classmethod
def connect(cls):
"""
Args:
Returns:
None
"""
raise NotImplementedError('model.connect not implemented!')
@classmethod
def create(cls, data):
"""
Args:
data (dict)
Returns:
model_vo (object)
"""
raise NotImplementedError('model.create not implemented!')
def update(self, data):
"""
Args:
data (dict)
Returns:
model_vo (object)
"""
raise NotImplementedError('model.update not implemented!')
def delete(self):
"""
Args:
Returns:
None
"""
raise NotImplementedError('model.delete not implemented!')
def terminate(self):
"""
Args:
Returns:
None
"""
raise NotImplementedError('model.terminate not implemented!')
def increment(self, key, amount=1):
"""
Args:
key (str)
amount (int)
Returns:
model_vo (object)
"""
raise NotImplementedError('model.increment not implemented!')
def decrement(self, key, amount=1):
"""
Args:
key (str)
amount (int)
Returns:
model_vo (object)
"""
raise NotImplementedError('model.decrement not implemented!')
@classmethod
def get(cls, **conditions):
"""
Args:
**conditions (kwargs)
- key (str): value (any)
Returns:
model_vo (object)
"""
raise NotImplementedError('model.get not implemented!')
@classmethod
def filter(cls, **conditions):
"""
Args:
**conditions (kwargs)
- key (str): value (any)
Returns:
model_vos (list)
"""
raise NotImplementedError('model.filter not implemented!')
def to_dict(self):
"""
Args:
Returns:
model_data (dict)
"""
raise NotImplementedError('model.to_dict not implemented!')
@classmethod
def query(cls, **query):
"""
Args:
**query (kwargs)
- filter (list)
[
{
'key' : 'field (str)',
'value' : 'value (any)',
'operator' : 'lt | lte | gt | gte | eq | not | exists | contain |
not_contain | in | not_in | not_contain_in | match | regex | regex_in |
datetime_lt | datetime_lte | datetime_gt | datetime_gte |
timediff_lt | timediff_lte | timediff_gt | timediff_gte'
},
...
]
- filter_or (list)
[
{
'key' : 'field (str)',
'value' : 'value (any)',
'operator' : 'lt | lte | gt | gte | eq | not | exists | contain |
not_contain | in | not_in | not_contain_in | match | regex | regex_in |
datetime_lt | datetime_lte | datetime_gt | datetime_gte |
timediff_lt | timediff_lte | timediff_gt | timediff_gte'
},
...
]
- sort (dict)
{
'key' : 'field (str)',
'desc' : True | False
}
- page (dict)
{
'start': 'start_row (int)',
'limit' : 'row_limit (int)'
}
- distinct (str): 'field'
- only (list): ['field1', 'field2', '...']
- exclude(list): ['field1', 'field2', '...']
- minimal (bool)
- count_only (bool)
Returns:
model_vos (list)
total_count (int)
"""
raise NotImplementedError('model.query not implemented!')
@classmethod
def stat(cls, **query):
"""
Args:
**query (kwargs)
- filter (list)
[
{
'key' : 'field (str)',
'value' : 'value (any)',
'operator' : 'lt | lte | gt | gte | eq | not | exists | contain | not_contain |
in | not_in | contain_in | not_contain_in | match | regex | regex_in |
datetime_lt | datetime_lte | datetime_gt | datetime_gte |
timediff_lt | timediff_lte | timediff_gt | timediff_gte'
},
...
]
- filter_or(list)
[
{
'key' : 'field (str)',
'value' : 'value (any)',
'operator' : 'lt | lte | gt | gte | eq | not | exists | contain | not_contain |
in | not_in | contain_in | not_contain_in | match | regex | regex_in |
datetime_lt | datetime_lte | datetime_gt | datetime_gte |
timediff_lt | timediff_lte | timediff_gt | timediff_gte'
},
...
]
- aggregate (dict)
{
'unwind': [
{
'path': 'key path (str)'
}
],
'group': {
'keys': [
{
'key': 'field (str)',
'name': 'alias name (str)'
},
...
],
'fields': [
{
'key': 'field (str)',
'name': 'alias name (str)',
'operator': 'count | sum | avg | max | min | size | add_to_set | merge_objects'
},
...
]
}
'count': {
'name': 'alias name (str)'
}
}
- sort(dict)
{
'name' : 'field (str)',
'desc' : True | False
}
- page(dict)
{
'start': 'start_row (int)',
'limit' : 'row_limit (int)'
}
Returns:
values (list)
"""
raise NotImplementedError('model.stat not implemented!')
```
#### File: skeleton/api/helloworld.py
```python
from spaceone.api.sample.v1 import helloworld_pb2, helloworld_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class HelloWorld(BaseAPI, helloworld_pb2_grpc.HelloWorldServicer):
pb2 = helloworld_pb2
pb2_grpc = helloworld_pb2_grpc
def say_hello(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('HelloWorldService', metadata) as helloworld_svc:
return self.locator.get_info('HelloWorldInfo', helloworld_svc.say_hello(params))
```
#### File: skeleton/service/helloworld_service.py
```python
from spaceone.core.service import *
__all__ = ['HelloWorldService']
@authentication_handler
@authorization_handler
@event_handler
class HelloWorldService(BaseService):
@transaction
@check_required(['name'])
def say_hello(self, params):
helloworld_mgr = self.locator.get_manager('HelloWorldManager')
return helloworld_mgr.say_hello(params['name'])
``` |
{
"source": "jihyungSong/repository",
"score": 2
} |
#### File: repository/service/policy_service.py
```python
import logging
from spaceone.core.service import *
from spaceone.core import utils
from spaceone.repository.error import *
from spaceone.repository.model.capability_model import Capability
from spaceone.repository.manager.identity_manager import IdentityManager
from spaceone.repository.manager.policy_manager.local_policy_manager import LocalPolicyManager
from spaceone.repository.manager.policy_manager.remote_policy_manager import RemotePolicyManager
from spaceone.repository.manager.repository_manager import RepositoryManager
_LOGGER = logging.getLogger(__name__)
@authentication_handler(exclude=['get'])
@authorization_handler(exclude=['get'])
@mutation_handler
@event_handler
class PolicyService(BaseService):
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['name', 'permissions', 'domain_id'])
def create(self, params):
"""Create Policy (local repo only)
Args:
params (dict): {
'name': 'str',
'permissions': 'list',
'labels': 'list',
'tags': 'dict',
'project_id': 'str',
'domain_id': 'str'
}
Returns:
policy_vo (object)
"""
if 'tags' in params:
params['tags'] = utils.dict_to_tags(params['tags'])
self._check_project(params.get('project_id'), params['domain_id'])
policy_mgr: LocalPolicyManager = self.locator.get_manager('LocalPolicyManager')
# Only LOCAL repository can be created
repo_mgr: RepositoryManager = self.locator.get_manager('RepositoryManager')
params['repository'] = repo_mgr.get_local_repository()
params['repository_id'] = params['repository'].repository_id
return policy_mgr.create_policy(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['policy_id', 'domain_id'])
def update(self, params):
"""Update Policy. (local repo only)
Args:
params (dict): {
'policy_id': 'str',
'name': 'str',
'permissions': 'list',
'labels': 'list',
'tags': 'dict'
'domain_id': 'str'
}
Returns:
policy_vo (object)
"""
if 'tags' in params:
params['tags'] = utils.dict_to_tags(params['tags'])
policy_mgr: LocalPolicyManager = self.locator.get_manager('LocalPolicyManager')
return policy_mgr.update_policy(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['policy_id', 'domain_id'])
def delete(self, params):
"""Delete Policy (local repo only)
Args:
params (dict): {
'policy_id': 'str',
'domain_id': 'str'
}
Returns:
policy_vo (object)
"""
policy_id = params['policy_id']
domain_id = params['domain_id']
policy_mgr: LocalPolicyManager = self.locator.get_manager('LocalPolicyManager')
return policy_mgr.delete_policy(policy_id, domain_id)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['policy_id'])
@change_only_key({'repository_info': 'repository'})
def get(self, params):
""" Get Policy (local & remote)
Args:
params (dict): {
'policy_id': 'str',
'repository_id': 'str',
'domain_id': 'str',
'only': 'list'
}
Returns:
policy_vo (object)
"""
policy_id = params['policy_id']
domain_id = params.get('domain_id')
repo_id = params.get('repository_id')
only = params.get('only')
repo_mgr: RepositoryManager = self.locator.get_manager('RepositoryManager')
repo_vos = repo_mgr.get_all_repositories(repo_id)
for repo_vo in repo_vos:
_LOGGER.debug(f'[get] find at name: {repo_vo.name} '
f'(repo_type: {repo_vo.repository_type})')
policy_mgr = self._get_policy_manager_by_repo(repo_vo)
try:
policy_vo = policy_mgr.get_policy(policy_id, domain_id, only)
except Exception as e:
policy_vo = None
if policy_vo:
return policy_vo
raise ERROR_NO_POLICY(policy_id=policy_id)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['repository_id'])
@change_only_key({'repository_info': 'repository'}, key_path='query.only')
@append_query_filter(['repository_id', 'policy_id', 'name', 'project_id', 'domain_id'])
@change_tag_filter('tags')
@append_keyword_filter(['policy_id', 'name', 'labels'])
def list(self, params):
""" List policies (local or repo)
Args:
params (dict): {
'repository_id': 'str',
'policy_id': 'str',
'name': 'str',
'project_id': 'str',
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.Query)'
}
Returns:
policy_vos (object)
total_count
"""
repo_mgr: RepositoryManager = self.locator.get_manager('RepositoryManager')
repository_id = params['repository_id']
repo_vo = repo_mgr.get_repository(repository_id)
policy_mgr = self._get_policy_manager_by_repo(repo_vo)
query = params.get('query', {})
return policy_mgr.list_policies(query)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['query', 'repository_id'])
@append_query_filter(['repository_id', 'domain_id'])
@change_tag_filter('tags')
@append_keyword_filter(['policy_id', 'name', 'labels'])
def stat(self, params):
"""
Args:
params (dict): {
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list) : 'list of statistics data'
"""
repo_mgr: RepositoryManager = self.locator.get_manager('RepositoryManager')
repository_id = params['repository_id']
repo_vo = repo_mgr.get_repository(repository_id)
policy_mgr = self._get_policy_manager_by_repo(repo_vo)
query = params.get('query', {})
return policy_mgr.stat_policies(query)
def _get_policy_manager_by_repo(self, repo_vo):
if repo_vo.repository_type == 'local':
local_policy_mgr: LocalPolicyManager = self.locator.get_manager('LocalPolicyManager', repository=repo_vo)
return local_policy_mgr
else:
remote_policy_mgr: RemotePolicyManager = self.locator.get_manager('RemotePolicyManager', repository=repo_vo)
return remote_policy_mgr
def _check_project(self, project_id, domain_id):
if project_id:
identity_mgr: IdentityManager = self.locator.get_manager('IdentityManager')
identity_mgr.get_project(project_id, domain_id)
``` |
{
"source": "jihyungSong/spacectl",
"score": 3
} |
#### File: spacectl/conf/my_conf.py
```python
import os
from spaceone.core import utils
from spacectl.conf.global_conf import *
__all__ = ['set_environment', 'get_environment', 'remove_environment', 'list_environments',
'import_config', 'set_config', 'get_config', 'set_endpoint', 'get_endpoint',
'remove_endpoint', 'list_endpoints', 'set_template', 'get_template', 'remove_template']
def set_environment(environment):
utils.create_dir(CONFIG_DIR)
utils.create_dir(ENVIRONMENT_DIR)
utils.save_yaml_to_file({'environment': environment}, ENVIRONMENT_CONF_PATH)
def get_environment():
try:
data = utils.load_yaml_from_file(ENVIRONMENT_CONF_PATH)
except Exception:
raise Exception('spaceconfig is undefined. (Use "spacectl config init")')
environment = data.get('environment')
if not environment:
raise Exception('The environment is not set. Switch the environment. (Use "spacectl config environment --help")')
return environment
def remove_environment(environment):
try:
environment_path = os.path.join(ENVIRONMENT_DIR, f'{environment}.yml')
if os.path.exists(environment_path):
os.remove(environment_path)
except Exception as e:
raise Exception(f'Environment deletion error: {e}')
environments = list_environments()
if len(environments) > 0:
utils.save_yaml_to_file({'environment': environments[0]}, ENVIRONMENT_CONF_PATH)
else:
os.remove(ENVIRONMENT_CONF_PATH)
def list_environments():
environments = []
try:
for f in os.listdir(ENVIRONMENT_DIR):
if os.path.isfile(os.path.join(ENVIRONMENT_DIR, f)) and f.find('.yml') > 1:
environments.append(f.rsplit('.', 1)[0])
except Exception:
raise Exception('spaceconfig is undefined. (Use "spacectl config init")')
return environments
def import_config(import_file_path, environment=None):
if environment is None:
environment = get_environment()
try:
environment_path = os.path.join(ENVIRONMENT_DIR, f'{environment}.yml')
data = utils.load_yaml_from_file(import_file_path)
utils.save_yaml_to_file(data, environment_path)
except Exception:
raise Exception(f'Import file format is invalid. (file = {import_file_path})')
def set_config(new_data, environment=None):
if environment is None:
environment = get_environment()
try:
environment_path = os.path.join(ENVIRONMENT_DIR, f'{environment}.yml')
utils.save_yaml_to_file(new_data, environment_path)
except Exception:
raise Exception('spaceconfig is undefined. (Use "spacectl config init")')
def get_config(key=None, default=None, environment=None):
if environment is None:
environment = get_environment()
try:
environment_path = os.path.join(ENVIRONMENT_DIR, f'{environment}.yml')
data = utils.load_yaml_from_file(environment_path)
except Exception:
raise Exception('spaceconfig is undefined. (Use "spacectl config init")')
if key:
return data.get(key, default)
else:
return data
def set_endpoint(endpoints, environment=None):
data = get_config(environment)
data['endpoints'] = endpoints
set_config(data, environment)
def get_endpoint(service=None, environment=None):
endpoints = get_config('endpoints', {}, environment)
if service:
return endpoints.get(service)
else:
return endpoints
def remove_endpoint(service, environment=None):
data = get_config(environment)
endpoints = data.get('endpoints', {})
if service in endpoints:
del endpoints[service]
data['endpoints'] = endpoints
set_config(data, environment)
def list_endpoints(environment=None):
endpoints = get_config('endpoints', {}, environment)
result = []
for service, endpoint in endpoints.items():
result.append((service, endpoint))
return result
def set_template(service, resource, data):
utils.create_dir(TEMPLATE_DIR)
my_template_path = os.path.join(TEMPLATE_DIR, f'{service}.{resource}.yml')
utils.save_yaml_to_file(data, my_template_path)
def remove_template(service, resource):
my_template_path = os.path.join(TEMPLATE_DIR, f'{service}.{resource}.yml')
if os.path.exists(my_template_path):
os.remove(my_template_path)
def get_template(service, resource):
return _get_my_template(service, resource) or _get_default_template(service, resource)
def _get_my_template(service, resource):
try:
my_template_path = os.path.join(TEMPLATE_DIR, f'{service}.{resource}.yml')
data = utils.load_yaml_from_file(my_template_path)
data['type'] = 'custom'
return data
except Exception:
return None
def _get_default_template(service, resource):
try:
default_template_path = os.path.join(DEFAULT_TEMPLATE_DIR, f'{service}.{resource}.yml')
data = utils.load_yaml_from_file(default_template_path)
data['type'] = 'default'
return data
except Exception:
return None
```
#### File: lib/apply/dot_dict_list.py
```python
import click
import copy
from spacectl.lib.output import echo
class DotDictList(list):
"""
You can access attributes of each item by '.'
"""
key = "id"
def dot_access_error_handler(self, attr):
echo("An attr named {attr} doesn't exist in {self}".format(self=self, attr=attr), err=True, terminate=True)
def __getattribute__(self, attr):
try:
return super().__getattribute__(attr)
except AttributeError as e:
try:
return [item for item in self if item[self.key] == attr][0]
except IndexError:
self.dot_access_error_handler(attr)
def to_list(self):
return list(self)
# def to_list(self):
# return [task.to_dict() for task in self]
class TaskResultList(DotDictList):
def get_task_ids(self):
return [task["id"] for task in self]
def dot_access_error_handler(self, task_id):
echo('You cannot access to a Task(id={task_id}). Accessible Task IDs are {task_ids}'.format(
task_id=task_id, task_ids=self.get_task_ids()
), err=True, terminate=True)
```
#### File: lib/apply/task_manager.py
```python
import yaml
from spacectl.lib.apply.task import Task
from spacectl.lib.apply import store
from spacectl.lib.parser import apply_manifest
from spacectl.modules import MODULES
from spacectl.lib.output import echo
import click
from pathlib import Path
import os.path
from spaceone.core import utils
from spacectl.lib.parser.default import parse_uses
import os
class TaskManager:
def __init__(self, silent):
self.task_queue = list() # Task Queue
self.silent = silent
def load(self, file_path):
data = utils.load_yaml_from_file(file_path)
# data = yaml.safe_load(file_path)
for import_file in data.get('import', []):
# import file path is relative to current file_path
absolute_location = Path(file_path).parent
self.load(os.path.join(absolute_location, import_file))
store.set_var(data.get('var', {}))
store.set_env(data.get('env', {}))
for task in data.get("tasks", []):
self.task_queue.append(task)
def run(self):
for task in self.task_queue:
context = {
"var": store.get_var(),
"env": store.get_env(),
"tasks": store.get_task_results(),
# "self": task,
}
apply_manifest.apply_template(task, task.get("id", "anonymous_task_id"))
module = parse_uses(task["uses"])
task_module = MODULES.get(module)
if task_module:
t = task_module(task, silent=self.silent)
t.execute()
else:
raise Exception(f'Not found Module: {module}')
```
#### File: spacectl/lib/output.py
```python
import click
import csv
import sys
from tabulate import tabulate
from spaceone.core import utils
def print_data(data, output, **kwargs):
if 'root_key' in kwargs:
data = utils.get_dict_value(data, kwargs['root_key'], [])
del kwargs['root_key']
if output == 'table':
if len(data) == 0:
echo('NO DATA')
else:
_print_table(data, **kwargs)
elif output == 'json':
_print_json(data, **kwargs)
elif output == 'yaml':
_print_yaml(data, **kwargs)
elif output == 'csv':
_print_csv(data, **kwargs)
elif output == 'quiet':
_print_quiet(data, **kwargs)
def _print_table(data, **kwargs):
data, headers, total_count = _parse_data_by_options(data, **kwargs)
if isinstance(data, dict):
_print_yaml(data)
else:
click.echo(tabulate(data, tablefmt='presto', headers=headers or 'keys'))
if total_count:
click.echo()
click.echo(f' Count: {len(data)} / {int(total_count)}')
def _print_csv(data, **kwargs):
data, headers, total_count = _parse_data_by_options(data, **kwargs)
if isinstance(data, dict):
_print_yaml(data)
else:
if headers:
writer = csv.writer(sys.stdout)
writer.writerow(headers)
writer.writerows(data)
else:
writer = csv.DictWriter(sys.stdout, fieldnames=data[0].keys())
writer.writeheader()
writer.writerows(data)
def _parse_data_by_options(data, **kwargs):
headers = kwargs.get('headers')
total_count = kwargs.get('total_count')
if len(data) > 0 and not isinstance(data[0], (dict, list, tuple)):
headers = ['Values']
data = [[v] for v in data]
return data, headers, total_count
def _print_json(data, **kwargs):
if data == {}:
click.echo()
else:
click.echo(utils.dump_json(data, indent=4))
def _print_yaml(data, **kwargs):
if data == {}:
click.echo()
else:
click.echo('---')
click.echo(utils.dump_yaml(data))
def _print_quiet(data, **kwargs):
for d in data:
items = list(d.values())
if len(items) != 1:
click.echo("Please Selector only one column for quiet output.", err=True)
exit(1)
click.echo(items[0])
def echo(message, flag=True, err=False, terminate=False):
if flag:
click.echo(message, err=err)
if terminate:
exit(1)
```
#### File: lib/parser/apply_manifest.py
```python
import click
from spacectl.lib.apply import store
from spacectl.lib.parser.apply_jinja import jinja_env
import jinja2.exceptions
from spacectl.lib.output import echo
def apply_template(obj, task_id):
fields = []
if isinstance(obj, (str, bool, int, float, bool)):
return 0
elif obj is None:
return 0
elif isinstance(obj, list):
# if it's a list, apply template to its items
for item in obj:
apply_template(item, task_id)
return 0
elif isinstance(obj, dict):
# if it's a dict, apply template to its items
fields = list(obj.keys())
elif hasattr(obj, "fields_to_apply_template"):
# if it has "fields_to_apply_template" apply only the correspondent fields
fields = obj.fields_to_apply_template
else:
fields = obj.__dict__
for field in fields:
value = _get_obj_value(obj, field)
if isinstance(value, str):
template = jinja_env.from_string(value)
try:
template_applied_value = template.render(
var=store.get_var(),
env=store.get_env(),
tasks=store.get_task_results()
)
if field == 'if':
obj[field] = _evaluate_if_statement(template_applied_value)
else:
obj[field] = template_applied_value
except jinja2.exceptions.UndefinedError as e:
echo(
"While applying templates for Task(id={task_id}), an undefined error has occurred\n{error_message}".format(
error_message=e.message,
task_id=task_id
), err=True, terminate=True)
else:
apply_template(value, task_id)
def _get_obj_value(obj, key):
# obj์์ key๊ฐ์ ํด๋นํ๋ value๋ฅผ ๊ฐ์ ธ์ด.
if isinstance(obj, dict):
if key not in obj:
click.echo("key '{key} doesn't exist in {obj}'".format(key=key, obj=obj), err=True)
exit(1)
return obj.get(key)
else:
return getattr(obj, key)
def _evaluate_if_statement(statement, **ctx):
r = bool(eval(statement))
return r
```
#### File: modules/resource/validate.py
```python
import click
def check_valid_verb(task_name, mode, custom_verb):
if mode == 'DEFAULT':
error_if_invalid_verb(task_name, mode, ['exec'], custom_verb)
if mode == "READ_ONLY":
error_if_invalid_verb(task_name, mode, ['create', 'update', 'exec'], custom_verb)
if mode == "NO_UPDATE":
error_if_invalid_verb(task_name, mode, ['update', 'exec'], custom_verb)
if mode == 'EXEC':
error_if_invalid_verb(task_name, mode, ['read', 'create', 'update'], custom_verb)
def error_if_invalid_verb(task_name, mode, verb_types, custom_verb):
for verb_type in verb_types:
if verb_type in custom_verb:
click.echo("You cannot define {verb_name} as {verb_type} with {mode} in task-{task_name}".format(
verb_name=custom_verb[verb_type],
verb_type=verb_type,
mode=mode,
task_name=task_name
), err=True)
exit(1)
``` |
{
"source": "jihyunhong/jihyun_archive",
"score": 2
} |
#### File: DMA_project/[2021.03 - 2021.06] project1/DMA_project1_team01.py
```python
import mysql.connector
# TODO: REPLACE THE VALUE OF VARIABLE team (EX. TEAM 1 --> team = 1)
team = 1
# Requirement1: create schema ( name: DMA_team## )
def requirement1(host, user, password):
cnx = mysql.connector.connect(host=host, user=user, password=password)
cursor = cnx.cursor()
cursor.execute('SET GLOBAL innodb_buffer_pool_size=2*1024*1024*1024;')
# TODO: WRITE CODE HERE
cursor.execute('CREATE DATABASE IF NOT EXISTS DMA_team01;')
cursor.execute('USE DMA_team01;')
# TODO: WRITE CODE HERE
cursor.close()
# Requierement2: create table
def requirement2(host, user, password):
cnx = mysql.connector.connect(host=host, user=user, password=password)
cursor = cnx.cursor()
cursor.execute('SET GLOBAL innodb_buffer_pool_size=2*1024*1024*1024;')
# TODO: WRITE CODE HERE
cursor.execute('CREATE DATABASE IF NOT EXISTS DMA_team01;')
cursor.execute('USE DMA_team01;')
# 1. user table
cursor.execute('''
CREATE TABLE IF NOT EXISTS USER(
id BIGINT(20) NOT NULL,
user_name VARCHAR(255) NOT NULL,
profile TINYINT(1) NOT NULL,
items_count INT(11) NOT NULL,
PRIMARY KEY(id),
UNIQUE(user_name));
''')
# 2. item table
cursor.execute('''
CREATE TABLE IF NOT EXISTS ITEM(
id BIGINT(20) NOT NULL,
item_name VARCHAR(255) NOT NULL,
price FLOAT,
beta_version TINYINT(1) NOT NULL,
ratings INT(11) NOT NULL,
metascore INT(11) NOT NULL,
developers VARCHAR(255),
release_date DATE,
PRIMARY KEY(id));
''')
# 3. user_item table
cursor.execute('''
CREATE TABLE IF NOT EXISTS USER_ITEM(
user_id BIGINT(20) NOT NULL,
item_id BIGINT(20) NOT NULL,
usagetime_2weeks INT(11) NOT NULL,
usagetime_total INT(11) NOT NULL);
''')
# 4. review table
cursor.execute('''
CREATE TABLE IF NOT EXISTS REVIEW(
id VARCHAR(255) NOT NULL,
user_id BIGINT(20) NOT NULL,
item_id BIGINT(20) NOT NULL,
recommend TINYINT(1) NOT NULL,
body INT(11) NOT NULL,
helpful_score FLOAT NOT NULL,
helpful_count INT(11) NOT NULL,
posted_date DATE NOT NULL,
PRIMARY KEY(id));
''')
# 5. genre table
cursor.execute('''
CREATE TABLE IF NOT EXISTS GENRE(
id VARCHAR(255) NOT NULL,
genre_name VARCHAR(255) NOT NULL,
PRIMARY KEY(id),
UNIQUE(genre_name));
''')
# 6. item_genre table
cursor.execute('''
CREATE TABLE IF NOT EXISTS ITEM_GENRE(
item_id BIGINT(20) NOT NULL,
genre_id VARCHAR(255) NOT NULL);
''')
# 7. bundle table
cursor.execute('''
CREATE TABLE IF NOT EXISTS BUNDLE(
id BIGINT(20) NOT NULL,
bundle_name VARCHAR(255) NOT NULL,
price FLOAT NOT NULL,
final_price FLOAT NOT NULL,
discount FLOAT NOT NULL,
PRIMARY KEY(id));
''')
# 8. bundle_item table
cursor.execute('''
CREATE TABLE IF NOT EXISTS BUNDLE_ITEM(
bundle_id BIGINT(20) NOT NULL,
item_id BIGINT(20) NOT NULL);
''')
# 9. bundle_genre table
cursor.execute('''
CREATE TABLE IF NOT EXISTS BUNDLE_GENRE(
bundle_id BIGINT(20) NOT NULL,
genre_id VARCHAR(255) NOT NULL,
genre_count INT(11) NOT NULL);
''')
# 10. tag --> UNIQUE(tag_name), tag_order NOT NULL ์ฌ๋ถ
cursor.execute('''
CREATE TABLE IF NOT EXISTS TAG(
item_id BIGINT(20) NOT NULL,
tag_name VARCHAR(255) NOT NULL,
tag_order INT(11) NOT NULL,
PRIMARY KEY(item_id, tag_name));
''')
# 11. item_specs
cursor.execute('''
CREATE TABLE IF NOT EXISTS ITEM_SPECS(
item_id BIGINT(20) NOT NULL,
spec_name VARCHAR(255));
''')
# TODO: WRITE CODE HERE
cursor.close()
# Requirement3: insert data
def requirement3(host, user, password, directory):
cnx = mysql.connector.connect(host=host, user=user, password=password)
cursor = cnx.cursor()
cursor.execute('SET GLOBAL innodb_buffer_pool_size=2*1024*1024*1024;')
# TODO: WRITE CODE HERE
cursor.execute('CREATE DATABASE IF NOT EXISTS DMA_team01;')
cursor.execute('USE DMA_team01;')
csv_dict = {'bundle_genre.csv': ['INT', 'VARCHAR', 'INT'],
'bundle_item.csv': ['INT', 'INT'],
'bundle.csv': ['INT', 'VARCHAR', 'FLOAT', 'FLOAT', 'FLOAT'],
'genre.csv': ['VARCHAR', 'VARCHAR'],
'item_genre.csv': ['INT', 'VARCHAR'],
'item_specs.csv': ['INT', 'VARCHAR'],
'item.csv': ['BIGINT', 'VARCHAR', 'FLOAT', 'TINYINT', 'INT', 'INT', 'VARCHAR', 'DATE'],
'review.csv': ['VARCHAR', 'BIGINT', 'INT', 'TINYINT', 'INT', 'FLOAT', 'INT', 'DATE'],
'tag.csv': ['INT', 'VARCHAR', 'INT'],
'user_item.csv': ['BIGINT', 'INT', 'INT', 'INT'],
'user.csv': ['BIGINT', 'VARCHAR', 'TINYINT', 'INT']}
verbose = False # 'print'ํ๋ ค๋ฉด, then set verbose=True
for key in csv_dict.keys():
if verbose:
print(f'\n{key}')
filepath = directory + '/' + key
type_list = csv_dict[key] # list of datatype(ex: 'INT', 'FLOAT' etc. )
with open(filepath, 'r', encoding='utf-8') as csv_data:
for i, row in enumerate(csv_data.readlines()[1:]):
if verbose:
print(f'{i}', end='\r')
row = row.strip().split(',')
# assert len(row) == len(type_list) # debugging์ฉ
for idx, r in enumerate(row):
if len(r) == 0: # r์ด ๋น string์ผ ๊ฒฝ์ฐ 'null' ์
๋ ฅ
row[idx] = 'null'
continue
if key == '<KEY>' and r == 'nan': # item table์์ r์ด nan์ผ ๊ฒฝ์ฐ 'null'์
๋ ฅ
row[idx] = 'null'
continue
if type_list[idx] in ['TINYINT', 'INT', 'BIGINT']: # r์ด ์ ์๋ฉด intํ์ผ๋ก
row[idx] = int(r)
elif type_list[idx] in ['FLOAT']: # r์ด ์ค์๋ฉด floatํ์ผ๋ก
try:
row[idx] = float(r)
except:
if r[-1] == '%': # ํผ์ผํธ ์๋ฃํ์ ๊ฒฝ์ฐ % ์ ๊ฑฐ ํ ์ค์๋ก ๋ณ๊ฒฝ
row[idx] = float(r[:-1])
elif r[0] == '$': # ๊ฐ๊ฒฉ์ ๊ฒฝ์ฐ $์ ๊ฑฐ ํ ์ค์๋ก ๋ณ๊ฒฝ
row[idx] = float(r[1:])
elif type_list[idx] in ['VARCHAR', 'DATE']: # r์ด ๋ฌธ์์ด์ด๋ฉด stringํ์ผ๋ก
row[idx] = r
else:
raise ValueError
row = tuple(row)
sql = f"REPLACE INTO {key[:-4]} VALUES {row};"
sql = sql.replace("'null'", 'null') # ์ฌํ์ฒ๋ฆฌ (โnullโ -> null ๋์ฒด)
cursor.execute(sql)
cnx.commit() # ํ์ผ๋ณ๋ก commit(์ด 11๋ฒ)
# TODO: WRITE CODE HERE
cursor.close()
# Requirement4: add constraint (foreign key)
def requirement4(host, user, password):
cnx = mysql.connector.connect(host=host, user=user, password=password)
cursor = cnx.cursor()
cursor.execute('SET GLOBAL innodb_buffer_pool_size=2*1024*1024*1024;')
# TODO: WRITE CODE HERE
cursor.execute('CREATE DATABASE IF NOT EXISTS DMA_team01;')
cursor.execute('USE DMA_team01;')
# review์ FOREIGN KEY ์ค์
cursor.execute('ALTER TABLE REVIEW ADD CONSTRAINT FOREIGN KEY(user_id) REFERENCES USER(id);')
cursor.execute('ALTER TABLE REVIEW ADD CONSTRAINT FOREIGN KEY(item_id) REFERENCES ITEM(id);')
# user_item์ FOREIGN KEY ์ค์
cursor.execute('ALTER TABLE USER_ITEM ADD CONSTRAINT FOREIGN KEY(user_id) REFERENCES USER(id);')
cursor.execute('ALTER TABLE USER_ITEM ADD CONSTRAINT FOREIGN KEY(item_id) REFERENCES ITEM(id);')
# item_genre์ FOREIGN KEY ์ค์
cursor.execute('ALTER TABLE ITEM_GENRE ADD CONSTRAINT FOREIGN KEY(item_id) REFERENCES ITEM(id);')
cursor.execute('ALTER TABLE ITEM_GENRE ADD CONSTRAINT FOREIGN KEY(genre_id) REFERENCES GENRE(id);')
# tag์ FOREIGN KEY ์ค์
cursor.execute('ALTER TABLE TAG ADD CONSTRAINT FOREIGN KEY(item_id) REFERENCES ITEM(id);')
# genre์ FOREIGN KEY ์ค์
cursor.execute('ALTER TABLE BUNDLE_GENRE ADD CONSTRAINT FOREIGN KEY(genre_id) REFERENCES GENRE(id);')
# bundle์ FOREIGN KEY ์ค์
cursor.execute('ALTER TABLE BUNDLE_ITEM ADD CONSTRAINT FOREIGN KEY(bundle_id) REFERENCES BUNDLE(id);')
# bundle_item์ FOREIGN KEY ์ค์
cursor.execute('ALTER TABLE BUNDLE_ITEM ADD CONSTRAINT FOREIGN KEY(item_id) REFERENCES ITEM(id);')
cursor.execute('ALTER TABLE BUNDLE_ITEM ADD CONSTRAINT FOREIGN KEY(bundle_id) REFERENCES BUNDLE(id);')
# item_specs์ FOREIGN KEY ์ค์
cursor.execute('ALTER TABLE ITEM_SPECS ADD CONSTRAINT FOREIGN KEY(item_id) REFERENCES ITEM(id);')
cnx.commit()
# TODO: WRITE CODE HERE
cursor.close()
# TODO: REPLACE THE VALUES OF FOLLOWING VARIABLES
host = 'localhost'
user = 'root'
password = ''
directory = './dataset'
requirement1(host=host, user=user, password=password)
requirement2(host=host, user=user, password=password)
requirement3(host=host, user=user, password=password, directory=directory)
requirement4(host=host, user=user, password=password)
```
#### File: game/[2020.12 - 2020.12] covid19_game/game.py
```python
import random
class covid():
def __init__(self):
"""๊ธฐ๋ณธ ์ค์ """
self.vaccine = [0.25, 0.5, 1.0]
self.country = ['ํ๊ตญ', '์ค๊ตญ', '์ผ๋ณธ', '๋ฏธ๊ตญ', '๋
์ผ']
self.pop = [1500, 3000, 2000, 2500, 2200]
self.pop_inf = [300, 800, 500, 750, 1000]
self.more_infect = 0 # ๋ผ์ด๋๋ง๋ค ์ถ๊ฐ๋ก ๊ฐ์ผ๋ ๊ฐ์ผ์ ์ ๋์
self.cured = 0 # ๋ผ์ด๋๋ง๋ค ๋ฐฑ์ ์ผ๋ก ์น๋ฃ๋ ๊ฐ์ผ์ ์ ๋์
def print_vaccine(self):
"""1๋ฒ ๋ฉ๋ด ์ ํ ์ ์ถ๋ ฅ๋๋ ํ๋ฉด"""
for i, rate in enumerate(self.vaccine):
print(f'๋ฐฑ์ ์ด๋ฆ : ๋ฐฑ์ {i}')
print(f'๋ฐฑ์ ์น๋ฃ์จ : {int(100 * rate)}%')
print('')
def print_country_info(self):
"""๊ฐ ๋๋ผ์ ๊ฐ์ผ ์ ๋ณด๊ฐ ์ถ๋ ฅ๋๋ ํ๋ฉด (์์น ๊ตญ๊ฐ๋ ์ ์ธ)"""
good_count = 0
for i, country in enumerate(self.country):
if self.pop_inf[i] == 0: # ํด๋น ๊ตญ๊ฐ ์์น
good_count += 1 # ์์น ์ฝ์ธ ์ ๋ฆฝ
continue
else:
print(f'๊ฐ์ผ ๊ตญ๊ฐ : {country}')
print(f'์ธ๊ตฌ์ : {self.pop[i]}๋ช
')
print(f'๊ฐ์ผ ์ธ๊ตฌ์ : {self.pop_inf[i]}๋ช
')
print('')
if good_count == len(self.country): # ๋ชจ๋ ๊ตญ๊ฐ๊ฐ ์์น์ธ ์ํ..!
print('์ถํํฉ๋๋ค!!! ์ฝ๋ก๋๋ ์ ๋ณต๋์์ต๋๋ค!!!')
# ์ฐธ๊ณ : ์ด ์ํฉ์ 5๊ฐ๊ตญ์ด ๋ชจ๋ ๋ฐฑ์ 3์ ๋ง์์ผ๋ง ๋ณผ ์ ์๋ ๋ฌธ๊ตฌ์ด๋ค.
# ๋ฑ์ฅ ํ๋ฅ ์ด.... ๋๋ฌด ํฌ๋ฐํ๋ค์ ใ
def cure(self, round_num, vac_num, country_num):
"""๋งค ๋ผ์ด๋ ํ ๋ฐฑ์ ์ผ๋ก ํ ๋๋ผ๊ฐ ์น๋ฃ๋๋ ๊ณผ์ ์ ๊ตฌํ"""
"""round_num์ 1~5์ ํด๋นํ๋ ์ ์ (๋ผ์ด๋ ๋ฒํธ)"""
"""vac_num, country_num์ ๊ฐ๊ฐ 1~3, 1~5์ ํด๋นํ๋ ์ ์"""
# ํธ์์ ๋ณ์๋ช
๋ณ๊ฒฝ
rn = round_num
vn = vac_num
cn = country_num
rate = self.vaccine[vn - 1]
country = self.country[cn - 1]
pop = self.pop[cn - 1]
pop_inf = self.pop_inf[cn - 1]
# ์ ํ๋ ๋ฐฑ์ /๋๋ผ ์ถ๋ ฅ
print(f'# {rn}๋ฒ์งธ ์๋ #')
print(f'์ ํ๋ ๋ฐฑ์ : ๋ฐฑ์ {vn}, ์น๋ฃ์จ: {rate * 100:4.1f}%')
print(f'์ ํ๋ ๋๋ผ: {country}, ์ธ๊ตฌ์: {pop}๋ช
, ๊ฐ์ผ์์: {pop_inf}๋ช
')
# ๋ฐฑ์ ํฌ์ฌ!
cured = int(rate * self.pop_inf[cn - 1])
self.cured += cured # ์น๋ฃ์ ์ ํต๊ณ ๋์
self.pop_inf[cn - 1] -= cured # ์ค์ ๊ตญ๊ฐ ๊ฐ์ผ์ ์ ๋ณ๊ฒฝ
# ํด๋น ๋ผ์ด๋ ๊ฒฐ๊ณผ ๋ณด์ฌ์ฃผ๊ธฐ
print('=' * 40)
if self.pop_inf[cn - 1] == 0: # ์์น~
print(f'์์น๋ ๊ตญ๊ฐ: {country}')
print(f'{rn}์ฐจ ๋ฐฑ์ ํฌ์ฌ ํ ๊ฐ์ผ๋ ๋๋ผ์ ๋ํ ์ ๋ณด')
print('=' * 40)
self.print_country_info()
def infect(self):
"""๊ฐ์ผ์ ์ฆ๊ฐ! (๋ชจ๋ ๋๋ผ๊ฐ ๋งค ๋ผ์ด๋ 1.15๋ฐฐ)"""
"""์์น ๊ตญ๊ฐ์ด๋ฉด ์ด์ฐจํผ 0 -> 0์ด๋ฏ๋ก ๊ด์ฐฎ."""
for i in range(len(self.pop_inf)):
more = int(0.15 * self.pop_inf[i])
self.more_infect += more # ์ถ๊ฐ ๊ฐ์ผ์ ์ ํต๊ณ ๋์
self.pop_inf[i] += more # ์ค์ ๊ตญ๊ฐ ๊ฐ์ผ์ ์ ๋ณ๊ฒฝ
def print_score(self):
"""์ต์ข
๊ฒฐ๊ณผ ์ถ๋ ฅํ๋ ํจ์"""
print('=' * 40)
print(' ' * 15 + '์ต์ข
๊ฒฐ๊ณผ' + ' ' * 15)
print('=' * 40)
print(f'๋ผ์ด๋๋ง๋ค ์ถ๊ฐ๋ก ๊ฐ์ผ๋ ๊ฐ์ผ์ ์: {self.more_infect}๋ช
')
print(f'๋ฐฑ์ ์ผ๋ก ์น๋ฃ๋ ๊ฐ์ผ์ ์: {self.cured}๋ช
')
print(f'๋ฐฑ์ ์ผ๋ก ์์น๋ ๊ตญ๊ฐ: ', end='')
count = 0 # ์์น ๊ตญ๊ฐ ๊ฐ์ ์นด์ดํธ
for i, p in enumerate(self.pop_inf):
if p != 0: # ๋ฏธ์์น
continue
else: # ์์น
print(self.country[i], end=' ')
count += 1
print(f'({count}๊ฐ)')
# ๊ฐ์ผ์ ์ ๋ง์ ์์๋๋ก ์ ๋ ฌ
D = {}
for i, c in enumerate(self.country):
D[c] = self.pop_inf[i]
D = sorted(D.items(), key=(lambda x: x[1]), reverse=True)
rank = 1 # ๊ฐ์ผ์ ์ ๋ญํน
for d in D:
country = d[0]
pop_inf = d[1]
i = self.country.index(country) # country๊ฐ self.country ๋ฆฌ์คํธ์์ ๋ช ๋ฒ์งธ ์์์ธ์ง
print(f'{rank}์')
print(f'๊ตญ๊ฐ : {country}')
print(f'์ธ๊ตฌ์ : {self.pop[i]}๋ช
')
print(f'๊ฐ์ผ ์ธ๊ตฌ์ : {pop_inf}๋ช
')
print('')
rank += 1
print('๊ฒ์ ์ข
๋ฃ!')
def main():
cv = covid()
while True:
print('-' * 30)
print(' ' * 8 + '์ฝ๋ก๋ ์ข
์ ๊ฒ์' + ' ' * 8)
print('-' * 30)
print('1. ๋ฐฑ์ ์ ๋ณด')
print('2. ๊ฐ์ผ๋ ๊ตญ๊ฐ ์ ๋ณด')
print('3. ๊ฒ์ ์์')
print('4. ๊ฒ์ ์ข
๋ฃ')
N = input()
if N == '1':
cv.print_vaccine()
elif N == '2':
cv.print_country_info()
elif N == '3':
a = input('์ฌ์ฉํ ๋ฐฑ์ (1~3)๊ณผ ๋ฐฑ์ ์ ์ ์ฉํ ๊ตญ๊ฐ(1~5)์ ๋ฒํธ๋ฅผ ์ฐจ๋ก๋๋ก ์
๋ ฅํ์ธ์.\n(๋์ด์ฐ๊ธฐ ํ ์นธ ํ์)')
a = a.split(' ')
a = [int(aa) for aa in a] # ๋ฆฌ์คํธ a์ ๊ฐ ์ฑ๋ถ์ ์ ์๋ก
vac_num, country_num = a[0], a[1]
for round_num in [1, 2, 3, 4, 5]:
cv.cure(round_num, vac_num, country_num)
cv.infect()
# ๋ง์ฝ ๊ฐ์ผ์ ์ > ์ธ๊ตฌ ์ ๋ผ๋ฉด ๊ฒ์ ์ฆ์ ์ค๋จ ํ ์ต์ข
๊ฒฐ๊ณผ ์ฐฝ์ผ๋ก ์ด๋
for i in range(len(cv.country)):
if cv.pop_inf[i] > cv.pop[i]:
print(f'๋น์! ๋น์! {cv.country[i]} ๊ฐ์ผ์ ๋ํญ๋ฐ! ๊ฒ์ ์ค๋จ!')
cv.print_score() # ์ต์ข
๊ฒฐ๊ณผ ์ฐฝ
return 0
vac_num = random.randint(1, 3) # 1 ~ 3 ์ค randomํ ์
country_num = random.randint(1, 5) # 1 ~ 5 ์ค randomํ ์
cv.print_score() # ์ต์ข
๊ฒฐ๊ณผ ์ฐฝ
return 0
elif N == '4':
print('๊ฒ์ ์ข
๋ฃ')
return 0
else:
print('์ฌ๋ฐ๋ฅธ ๋ฉ๋ด(1~4)๋ฅผ ์
๋ ฅํ์ธ์!')
main()
``` |
{
"source": "JiiHyunK/CLMR",
"score": 3
} |
#### File: CLMR/clmr/data.py
```python
from typing import Tuple
import torch
from torch import Tensor
from torch.utils.data import Dataset
class ContrastiveDataset(Dataset):
def __init__(self, dataset, input_shape, transform):
self.dataset = dataset
self.transform = transform
self.input_shape = input_shape
self.ignore_idx = []
def __getitem__(self, idx) -> Tuple[Tensor, Tensor]:
if idx in self.ignore_idx:
return self[idx + 1]
audio, label = self.dataset[idx]
if audio.shape[1] < self.input_shape[1]:
self.ignore_idx.append(idx)
return self[idx + 1]
if self.transform:
audio = self.transform(audio)
return audio, label
def __len__(self) -> int:
return len(self.dataset)
def concat_clip(self, n: int, audio_length: float) -> Tensor:
audio, label = self.dataset[n]
batch = torch.split(audio, audio_length, dim=1)
batch = torch.cat(batch[:-1])
batch = batch.unsqueeze(dim=1)
if self.transform:
batch = self.transform(batch)
return batch
```
#### File: CLMR/clmr/evaluation.py
```python
import torch
import torch.nn.functional as F
from tqdm import tqdm
from sklearn import metrics
from clmr.data import ContrastiveDataset
def evaluate(
encoder, finetuned_head, test_dataset, dataset_name: str, audio_length: int, device
) -> dict:
est_array = []
gt_array = []
encoder = encoder.to(device)
encoder.eval()
if finetuned_head is not None:
finetuned_head = finetuned_head.to(device)
finetuned_head.eval()
with torch.no_grad():
for idx in tqdm(range(len(test_dataset))):
_, label = test_dataset[idx]
batch = test_dataset.concat_clip(idx, audio_length)
batch = batch.to(device)
output = encoder(batch)
if finetuned_head:
output = finetuned_head(output)
# we always return logits, so we need a sigmoid here for multi-label classification
if dataset_name in ["magnatagatune", "msd"]:
output = torch.sigmoid(output)
else:
output = F.softmax(output, dim=1)
track_prediction = output.mean(dim=0)
est_array.append(track_prediction)
gt_array.append(label)
if dataset_name in ["magnatagatune"]:
est_array = torch.stack(est_array, dim=0).cpu().numpy()
gt_array = torch.stack(gt_array, dim=0).cpu().numpy()
roc_aucs = metrics.roc_auc_score(gt_array, est_array, average="macro")
pr_aucs = metrics.average_precision_score(gt_array, est_array, average="macro")
return {
"PR-AUC": pr_aucs,
"ROC-AUC": roc_aucs,
}
accuracy = metrics.accuracy_score(gt_array, est_array)
return {"Accuracy": accuracy}
```
#### File: clmr/modules/linear_evaluation.py
```python
import torch
import torch.nn as nn
from pytorch_lightning import LightningModule
from pytorch_lightning import metrics
class LinearEvaluation(LightningModule):
def __init__(self, args, encoder, hidden_dim, output_dim):
super().__init__()
self.save_hyperparameters(args)
self.encoder = encoder
self.hidden_dim = hidden_dim
self.output_dim = output_dim
if self.hparams.finetuner_mlp:
self.model = nn.Sequential(
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.output_dim),
)
else:
self.model = nn.Sequential(nn.Linear(self.hidden_dim, self.output_dim))
self.criterion = self.configure_criterion()
self.accuracy = metrics.Accuracy()
self.average_precision = metrics.AveragePrecision(pos_label=1)
def forward(self, x, y):
with torch.no_grad():
h0 = self.encoder(x)
preds = self.model(h0)
loss = self.criterion(preds, y)
return loss, preds
def training_step(self, batch, batch_idx):
x, y = batch
loss, preds = self.forward(x, y)
# self.log("Train/accuracy", self.accuracy(preds, y))
self.log("Train/pr_auc", self.average_precision(preds, y))
self.log("Train/loss", loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
loss, preds = self.forward(x, y)
# self.log("Test/accuracy", self.accuracy(preds, y))
self.log("Valid/pr_auc", self.average_precision(preds, y))
self.log("Valid/loss", loss)
return loss
def configure_criterion(self):
if self.hparams.dataset in ["magnatagatune", "msd"]:
criterion = nn.BCEWithLogitsLoss()
else:
criterion = nn.CrossEntropyLoss()
return criterion
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.model.parameters(),
lr=self.hparams.finetuner_learning_rate,
weight_decay=self.hparams.weight_decay,
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="min",
factor=0.1,
patience=5,
threshold=0.0001,
threshold_mode="rel",
cooldown=0,
min_lr=0,
eps=1e-08,
verbose=False,
)
if scheduler:
return {
"optimizer": optimizer,
"lr_scheduler": scheduler,
"monitor": "Valid/loss",
}
else:
return {"optimizer": optimizer}
``` |
{
"source": "jiin9802/sweng2021",
"score": 3
} |
#### File: sweng2021/pytet_v0.7/decorator.py
```python
from game import *
from matrix import *
from tetris import *
import logging
##################################################
### ColorDecorator for Tetris Class
##################################################
class ColorDecorator(Tetris,Game):
setOfCBlockObjects=0
def initCBlocks(self,setOfBlockObjects):
super(ColorDecorator,self).__init__(self.game.iScreenDy,self.game.iScreenDx)
# Tetris.init(setOfBlockObjects)
ColorDecorator.setOfCBlockObjects=[[0]* Tetris.nBlockDegrees for _ in range(Tetris.nBlockTypes)]
for i in range(Tetris.nBlockTypes):
for j in range(Tetris.nBlockDegrees):
obj=Matrix(setOfBlockObjects[i][j])
obj.mulc(i+1)
ColorDecorator.setOfCBlockObjects[i][j]=obj
### initialize self.setOfCBlockObjects
return
def __init__(self, game):
self.game = game #tetris๊ฐ์ฒด๊ฐ์ ธ์ด
self.initCBlocks(game.setOfBlockObjects)
arrayScreen = game.createArrayScreen()
self.iCScreen = Matrix(arrayScreen)
self.oCScreen = Matrix(self.iCScreen)
# self.justStarted=True
return
def accept(self, key):
if key>='0' and key<='6':
if self.justStarted==False:
self.deleteFullLines()
self.iCScreen=Matrix(self.oCScreen)
state=Tetris.accept(self,key)
self.currCBlk=ColorDecorator.setOfCBlockObjects[self.idxBlockType][self.idxBlockDegree]
tempBlk=self.iCScreen.clip(self.top,self.left,self.top+self.currCBlk.get_dy(),self.left+self.currCBlk.get_dx())
tempBlk=tempBlk+self.currCBlk
self.oCScreen=Matrix(self.iCScreen)
self.oCScreen.paste(tempBlk, self.top, self.left)
return state
def getScreen(self):
return self.oCScreen
def deleteFullLines(self):
nDeleted=0
nScanned=Tetris.deleteFullLines(self)
zero=Matrix([[0 for x in range(0, (self.iScreenDx-2*Tetris.iScreenDw))]])
for y in range(nScanned-1, -1, -1):
cy=self.top+y+nDeleted
line=self.oCScreen.clip(cy,0,cy+1,self.oCScreen.get_dx())
line=line.binary()
if line.sum()==self.oCScreen.get_dx():
temp=self.oCScreen.clip(0,0,cy,self.oCScreen.get_dx())
self.oCScreen.paste(temp,1,0)
self.oCScreen.paste(zero,0,Tetris.iScreenDw)
nDeleted+=1
return
#Tetris.deleteFullLines(self)
#self.oCScreen.paste(self.game.oScreen,0,0)
#self.oCScreen=Matrix(self.game.oScreen)
#self.oCScreen=self.oScreen
#return
```
#### File: sweng2021/pytet_v0.7_solution/decorator.py
```python
from game import *
from matrix import *
##################################################
### ColorDecorator for Tetris Class
##################################################
class ColorDecorator(Game):
def initCBlocks(self, setOfBlockObjects):
game = self.game
self.setOfCBlockObjects = [[0]* game.nBlockDegrees for _ in range(game.nBlockTypes)]
for i in range(game.nBlockTypes):
for j in range(game.nBlockDegrees):
obj = Matrix(game.setOfBlockObjects[i][j])
obj.mulc(i+1)
self.setOfCBlockObjects[i][j] = obj
return
def __init__(self, game):
self.game = game
self.initCBlocks(game.setOfBlockObjects)
arrayScreen = game.createArrayScreen()
self.iCScreen = Matrix(arrayScreen)
self.oCScreen = Matrix(self.iCScreen)
return
def accept(self, key):
if key >= '0' and key <= '6':
if self.game.justStarted == False:
self.deleteFullLines()
self.iCScreen = Matrix(self.oCScreen)
state = self.game.accept(key)
currCBlk = self.setOfCBlockObjects[self.game.idxBlockType][self.game.idxBlockDegree]
tempBlk = self.iCScreen.clip(self.game.top, self.game.left,
self.game.top + currCBlk.get_dy(),
self.game.left + currCBlk.get_dx())
tempBlk = tempBlk + currCBlk
self.oCScreen = Matrix(self.iCScreen)
self.oCScreen.paste(tempBlk, self.game.top, self.game.left)
return state
def getScreen(self):
return self.oCScreen
def deleteFullLines(self):
nDeleted = 0
nScanned = self.game.currBlk.get_dy()
if self.game.top + self.game.currBlk.get_dy() - 1 >= self.game.iScreenDy:
nScanned -= (self.game.top + self.game.currBlk.get_dy() - self.game.iScreenDy)
zero = Matrix([[ 0 for x in range(0, (self.game.iScreenDx - 2*self.game.iScreenDw))]])
for y in range(nScanned - 1, -1, -1):
cy = self.game.top + y + nDeleted
line = self.game.oScreen.clip(cy, 0, cy+1, self.game.oScreen.get_dx())
if line.sum() == self.game.oScreen.get_dx():
### Tetris screen
temp = self.game.oScreen.clip(0, 0, cy, self.game.oScreen.get_dx())
self.game.oScreen.paste(temp, 1, 0)
self.game.oScreen.paste(zero, 0, self.game.iScreenDw)
### CTetris screen
temp = self.oCScreen.clip(0, 0, cy, self.oCScreen.get_dx())
self.oCScreen.paste(temp, 1, 0)
self.oCScreen.paste(zero, 0, self.game.iScreenDw)
nDeleted += 1
return
``` |
{
"source": "jiinus/django-imagekit",
"score": 2
} |
#### File: management/commands/generateimages.py
```python
import re
from django.core.management.base import BaseCommand
from ...exceptions import MissingSource
from ...registry import cachefile_registry, generator_registry
class Command(BaseCommand):
help = ("""Generate files for the specified image generators (or all of them if
none was provided). Simple, glob-like wildcards are allowed, with *
matching all characters within a segment, and ** matching across
segments. (Segments are separated with colons.) So, for example,
"a:*:c" will match "a:b:c", but not "a:b:x:c", whereas "a:**:c" will
match both. Subsegments are always matched, so "a" will match "a" as
well as "a:b" and "a:b:c".""")
args = '[generator_ids]'
def add_arguments(self, parser):
parser.add_argument('generator_id', nargs='*', help='<app_name>:<model>:<field> for model specs')
def handle(self, *args, **options):
generators = generator_registry.get_ids()
generator_ids = options['generator_id'] if 'generator_id' in options else args
if generator_ids:
patterns = self.compile_patterns(generator_ids)
generators = (id for id in generators if any(p.match(id) for p in patterns))
for generator_id in generators:
self.stdout.write('Validating generator: %s\n' % generator_id)
for image_file in cachefile_registry.get(generator_id):
if image_file.name:
self.stdout.write(' %s\n' % image_file.name)
try:
image_file.generate()
except MissingSource as err:
self.stdout.write('\t No source associated with\n')
except Exception as err:
self.stdout.write('\tFailed %s\n' % (err))
def compile_patterns(self, generator_ids):
return [self.compile_pattern(id) for id in generator_ids]
def compile_pattern(self, generator_id):
parts = re.split(r'(\*{1,2})', generator_id)
pattern = ''
for part in parts:
if part == '*':
pattern += '[^:]*'
elif part == '**':
pattern += '.*'
else:
pattern += re.escape(part)
return re.compile('^%s(:.*)?$' % pattern)
```
#### File: django-imagekit/tests/models.py
```python
from django.db import models
from imagekit import ImageSpec
from imagekit.models import ImageSpecField, ProcessedImageField
from imagekit.processors import Adjust, ResizeToFill, SmartCrop
class Thumbnail(ImageSpec):
processors = [ResizeToFill(100, 60)]
format = 'JPEG'
options = {'quality': 60}
class ImageModel(models.Model):
image = models.ImageField(upload_to='b')
class Photo(models.Model):
original_image = models.ImageField(upload_to='photos')
# Implicit source field
thumbnail = ImageSpecField([Adjust(contrast=1.2, sharpness=1.1),
ResizeToFill(50, 50)], format='JPEG',
options={'quality': 90})
smartcropped_thumbnail = ImageSpecField([Adjust(contrast=1.2,
sharpness=1.1), SmartCrop(50, 50)], source='original_image',
format='JPEG', options={'quality': 90})
class ProcessedImageFieldModel(models.Model):
processed = ProcessedImageField([SmartCrop(50, 50)], format='JPEG',
options={'quality': 90}, upload_to='p')
class ProcessedImageFieldWithSpecModel(models.Model):
processed = ProcessedImageField(spec=Thumbnail, upload_to='p')
class CountingCacheFileStrategy:
def __init__(self):
self.on_existence_required_count = 0
self.on_content_required_count = 0
self.on_source_saved_count = 0
def on_existence_required(self, file):
self.on_existence_required_count += 1
def on_content_required(self, file):
self.on_content_required_count += 1
def on_source_saved(self, file):
self.on_source_saved_count += 1
class AbstractImageModel(models.Model):
original_image = models.ImageField(upload_to='photos')
abstract_class_spec = ImageSpecField(source='original_image',
format='JPEG',
cachefile_strategy=CountingCacheFileStrategy())
class Meta:
abstract = True
class ConcreteImageModel(AbstractImageModel):
pass
class ConcreteImageModelSubclass(ConcreteImageModel):
pass
```
#### File: django-imagekit/tests/test_abstract_models.py
```python
from imagekit.utils import get_nonabstract_descendants
from .models import (AbstractImageModel, ConcreteImageModel,
ConcreteImageModelSubclass)
def test_nonabstract_descendants_generator():
descendants = list(get_nonabstract_descendants(AbstractImageModel))
assert descendants == [ConcreteImageModel, ConcreteImageModelSubclass]
``` |
{
"source": "JIIOryo/air_conditioner_api",
"score": 3
} |
#### File: air_conditioner_api/lib/run.py
```python
import subprocess
def run_air_conditioner(
irrppy_path: str,
command_file_path: str,
gpio: int,
command: str,
):
cmd = f'python3 {irrppy_path} -p -g{gpio} -f {command_file_path} {command}'
subprocess.Popen(cmd.split())
return
```
#### File: JIIOryo/air_conditioner_api/server.py
```python
import json
import jsonschema
from flask import Flask, request, jsonify
from flask_jsonschema_validator import JSONSchemaValidator
import controller.cool as cool_controller
import controller.hot as hot_controller
import controller.dehumidify as dehumidify_controller
import controller.off as off_controller
app = Flask(__name__)
JSONSchemaValidator(
app = app,
root = "schemas"
)
ok = {
'message': 'ok',
'code': 200
}
def error_response(e) -> dict:
code = 400
res = {
'code': code,
'error': e.error,
'message': e.message,
}
return jsonify(res), 400
@app.errorhandler(jsonschema.ValidationError)
def onValidationError(e):
res = {
'code': 400,
'error': e.__class__.__name__,
'message': 'Validation error: ' + str(e),
}
return jsonify(res), 400
@app.route('/')
def index():
with open('./public/index.html') as f:
html = f.read()
return html
@app.route('/ping')
def ping(): return jsonify(ok), 200
@app.route('/on/cool', methods=['PUT'])
@app.validate('controller', 'cool')
def cool():
try: cool_controller.cool(request.json)
except Exception as e: return error_response(e)
return jsonify(ok), 200
@app.route('/on/hot', methods=['PUT'])
@app.validate('controller', 'hot')
def hot():
try: hot_controller.hot(request.json)
except Exception as e: return error_response(e)
return jsonify(ok), 200
@app.route('/on/dehumidify', methods=['PUT'])
@app.validate('controller', 'dehumidify')
def dehumidify():
try: dehumidify_controller.dehumidify(request.json)
except Exception as e: return error_response(e)
return jsonify(ok), 200
@app.route('/off', methods=['DELETE'])
def off():
try: off_controller.off()
except Exception as e: return error_response(e)
return jsonify(ok), 200
def main():
PORT = 40001
app.run(debug=True, host='0.0.0.0', port=PORT)
if __name__ == '__main__':
main()
``` |
{
"source": "JIIOryo/ams-client",
"score": 2
} |
#### File: ams-client/lib/assets.py
```python
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from lib.config import get_config_item
AMS_ROOT_PATH = get_config_item('ROOT_PATH')
AMS_BOOT_ASCII_ART_PATH = '/'.join([AMS_ROOT_PATH, 'assets', 'ascii_art', 'ams_boot.txt'])
def get_boot_ascii_art():
with open(AMS_BOOT_ASCII_ART_PATH) as f:
return f.read()
```
#### File: ams-client/lib/config.py
```python
from typing import Any, List
import json
import os
WTMS_ROOT_PATH = os.path.join(os.path.dirname(__file__), '../')
CONFIG_PATH = WTMS_ROOT_PATH + 'config/config.json'
GPIO_CONFIG_PATH = WTMS_ROOT_PATH + 'config/gpio.json'
SENSOR_CONFIG_PATH = WTMS_ROOT_PATH + 'config/sensor.json'
CAMERA_CONFIG_PATH = WTMS_ROOT_PATH + 'config/camera.json'
CAMERA_DEVICE_CONFIG_PATH = WTMS_ROOT_PATH + 'config/camera_device.json'
class KeyNotExist(Exception):
pass
def read_config_file(config_file_path: str) -> dict:
with open(config_file_path) as f:
return json.load(f)
def get_config() -> dict:
return read_config_file(CONFIG_PATH)
def set_config(new_config: dict) -> None:
with open(CONFIG_PATH, 'w') as f:
json.dump(new_config, f, indent = 4)
def get_gpio_config() -> dict:
return read_config_file(GPIO_CONFIG_PATH)
def get_sensor_config() -> dict:
return read_config_file(SENSOR_CONFIG_PATH)
def get_camera_config() -> dict:
return read_config_file(CAMERA_CONFIG_PATH)
def get_camera_device_config() -> dict:
return read_config_file(CAMERA_DEVICE_CONFIG_PATH)
def get_config_items(keys: List[str]) -> dict:
config = get_config()
result = {}
for key in keys:
if key not in config:
raise KeyNotExist('key: {key} does not exist.'.format(key = key))
result[key] = config[key]
return result
def get_config_item(key: str) -> Any:
return get_config_items([key])[key]
def set_gpio_config(new_gpio_config: dict) -> None:
with open(GPIO_CONFIG_PATH, 'w') as f:
json.dump(new_gpio_config, f, indent = 4)
def set_sensor_config(new_sensor_config: dict) -> None:
with open(SENSOR_CONFIG_PATH, 'w') as f:
json.dump(new_sensor_config, f, indent = 4)
def set_camera_config(new_camera_config: list) -> None:
with open(CAMERA_CONFIG_PATH, 'w') as f:
json.dump(new_camera_config, f, indent = 4)
def get_root_path() -> str:
return get_config_item('ROOT_PATH').rstrip('/')
```
#### File: ams-client/lib/notification.py
```python
import json
import sys
import requests
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from commons.consts import (
SLACK_NOTIFICATION_TYPE,
)
from commons.errors import NotificationTypeUndefined
from lib.config import get_config_item
def post_slack(channel: str, username: str, text: str, icon_emoji: str) -> None:
post_data = {
"channel": channel,
"username": username,
"text": text,
"icon_emoji": icon_emoji
}
slack_webhook_url = get_config_item('SLACK')['WEBHOOK_URL']
response = requests.post(slack_webhook_url, data=json.dumps(post_data))
def post_slack_with_attachment(
channel: str,
username: str,
text: str,
icon_emoji: str,
color: str,
attachment_title: str,
attachment_text: str,
attachment_footer: str,
) -> None:
post_data = {
"channel": channel,
"username": username,
"text": text,
"icon_emoji": icon_emoji,
"attachments": [
{
"color": color,
"author_name": attachment_title,
"text": attachment_text,
"footer": attachment_footer,
}
]
}
slack_webhook_url = get_config_item('SLACK')['WEBHOOK_URL']
response = requests.post(slack_webhook_url, data=json.dumps(post_data))
def post_slack_by_type(text: str, type_: str) -> None:
slack_config = get_config_item('SLACK')
if type_ == SLACK_NOTIFICATION_TYPE['NOTIFICATION']:
post_slack(
channel = slack_config['NOTIFICATION']['CHANNEL'],
username = slack_config['NOTIFICATION']['USERNAME'],
text = slack_config['NOTIFICATION']['MESSAGE_FORMAT'].format(message = text),
icon_emoji = slack_config['NOTIFICATION']['ICON_EMOJI'],
)
elif type_ == SLACK_NOTIFICATION_TYPE['ERROR']:
post_slack(
channel = slack_config['ERROR']['CHANNEL'],
username = slack_config['ERROR']['USERNAME'],
text = slack_config['ERROR']['MESSAGE_FORMAT'].format(message = text),
icon_emoji = slack_config['ERROR']['ICON_EMOJI'],
)
else:
raise NotificationTypeUndefined('This type does not exist.')
def post_log_to_slack(pretext: str, color: str, title: str, text: str, footer: str) -> None:
# get slack config from config
slack_config = get_config_item('SLACK')
# post slack with attachment
post_slack_with_attachment(
channel = slack_config['LOG']['CHANNEL'],
username = slack_config['LOG']['USERNAME'],
text = pretext,
icon_emoji = slack_config['LOG']['ICON_EMOJI'],
color = color,
attachment_title = title,
attachment_text = text,
attachment_footer = footer,
)
```
#### File: ams-client/on_message/camera_get.py
```python
import json
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from lib.config import get_camera_config
def get_cameras() -> list:
cameras = get_camera_config()
return cameras
```
#### File: ams-client/on_message/camera_update.py
```python
import json
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from service.camera import update_camera
"""
# message
type: json str
-----
{
"camera_id": "camera_id"
"name": "new camera",
"camera_device_id": 1,
"resolution": {
"x": 1700,
"y": 1024
},
"timer": [
{
"hour": 10,
"minute": 0
},
{
"hour": 13,
"minute": 0
}
],
"trimming": {
"top": 100,
"bottom": 1024,
"left": 720,
"right": 1700
}
}
"""
def camera_update(message: str) -> None:
updated_camera = json.loads(message)
update_camera(
camera_id = updated_camera['camera_id'],
name = updated_camera['name'],
camera_device_id = updated_camera['camera_device_id'],
resolution = updated_camera['resolution'],
timer = updated_camera['timer'],
trimming = updated_camera['trimming']
)
return
```
#### File: ams-client/on_message/device_control.py
```python
import json
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from commons.consts import DEVICE_TYPE
from lib.config import get_config, get_gpio_config
from lib.gpio import gpio_write
from service.device import publish_device_state
"""
# message
type: json str
-----
{
"devices": [
{
"device_id": 1,
"state": false
},
{
"device_id": 2,
"state": true
}
]
}
"""
def device_control(message: str) -> None:
target_devices = json.loads(message)['devices']
devices = get_gpio_config()
for target_device in target_devices:
target_device_id = target_device['device_id']
next_state = target_device['state']
for device in devices:
if device['device_id'] == target_device_id:
# feed pump should not be controlled by this function
if device['device']['type'] == DEVICE_TYPE['FEED_PUMP']:
continue
BCM = device['BCM']
gpio_write(BCM, int(next_state))
break
publish_device_state()
```
#### File: ams-client/on_message/device_exchange.py
```python
import datetime
import json
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
# from commons.errors import (
# # DeviceNotFound,
# )
from commons.consts import (
SLACK_NOTIFICATION_TYPE,
)
from lib.config import get_config, get_gpio_config, set_gpio_config
from lib.notification import post_slack_by_type
from service.device import publish_device_state
from service.timer import set_new_timer
"""
input
A: [6, 4, 5, 2, 3, 1]
B: [
{"device_id":1, "BCM": 26, "name":"a", other keys1 ...},
{"device_id":2, "BCM": 24, "name":"b", other keys2 ...},
{"device_id":3, "BCM": 23, "name":"c", other keys3 ...},
{"device_id":4, "BCM": 19, "name":"d", other keys4 ...},
{"device_id":5, "BCM": 18, "name":"e", other keys5 ...},
{"device_id":6, "BCM": 14, "name":"f", other keys6 ...}
]
output
X: [
{"device_id":1," "BCM": 26, name":"f", other keys6 ...},
{"device_id":2," "BCM": 24, name":"d", other keys4 ...},
{"device_id":3," "BCM": 23, name":"e", other keys5 ...},
{"device_id":4," "BCM": 19, name":"b", other keys2 ...},
{"device_id":5," "BCM": 18, name":"c", other keys3 ...},
{"device_id":6," "BCM": 14, name":"a", other keys1 ...}
]
"""
def exchanged(A: list, B: list) -> list:
only_exchanged, result = [dict(B[a-1]) for a in A], []
for i, o in enumerate(only_exchanged):
o.update(device_id = i+1, BCM = B[i]['BCM'])
result.append(o)
return result
"""
# message
type: json str
-----
{
"devices": [1, 2, 3, 6, 4, 5]
}
"""
def device_exchange(message: dict) -> None:
exchange_devices = json.loads(message)['devices']
gpio_config = get_gpio_config()
result = exchanged(exchange_devices, gpio_config)
set_gpio_config(result)
set_new_timer()
publish_device_state()
post_slack_by_type(
text = 'Devices are exchanged.',
type_ = SLACK_NOTIFICATION_TYPE['NOTIFICATION']
)
```
#### File: ams-client/on_message/publish_ack.py
```python
import datetime
import json
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from lib.mqtt import publish
from lib.topic import get_publish_topics
publish_topics = get_publish_topics()
def publish_ack() -> None:
message = {
"timestamp": int( datetime.datetime.now().strftime('%s') ),
}
publish(
topic = publish_topics['ACK'],
message = json.dumps(message),
qos = 1,
retain = False,
)
```
#### File: ams-client/on_message/sensor_update.py
```python
import datetime
import json
import sys
from typing import List
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from commons.consts import (
SENSOR_TYPE,
SLACK_UPDATE_SENSOR_NOTIFICATION_FORMAT,
SLACK_NOTIFICATION_TYPE,
)
from commons.errors import (
SensorNotFound,
SensorTypeNotExist,
)
from lib.config import get_sensor_config, set_sensor_config
from lib.notification import post_slack_by_type
from lib.util import formated_str_now_date
from service.sensor import publish_sensor_config, calibration_format
"""
# message
type: json str
-----
{
"sensor_id": 1,
"name": "name",
"description": "my description",
"type": "water_temperature"
}
"""
def sensor_update(message: str) -> None:
update_sensor = json.loads(message)
sensor_config = get_sensor_config()
update_sensor_id = update_sensor['sensor_id']
for sensor in sensor_config:
if sensor['sensor_id'] == update_sensor_id:
# Sensor does not found.
if sensor['sensor'] == {}:
raise SensorNotFound('Sensor does not found.')
# This sensor type does not exist.
if update_sensor['type'] not in SENSOR_TYPE.values():
raise SensorTypeNotExist('This sensor type does not exist.')
before_sensor = dict(sensor['sensor'])
sensor['sensor']['name'] = update_sensor['name']
sensor['sensor']['description'] = update_sensor['description']
sensor['sensor']['type'] = update_sensor['type']
sensor['sensor']['updated_at'] = int( datetime.datetime.now().strftime('%s') )
break
set_sensor_config(sensor_config)
slack_post_text = SLACK_UPDATE_SENSOR_NOTIFICATION_FORMAT.format(
now = formated_str_now_date(),
sensor_id = update_sensor_id,
before_name = before_sensor['name'],
before_description = before_sensor['description'],
before_type = before_sensor['type'],
after_name = update_sensor['name'],
after_description = update_sensor['description'],
after_type = update_sensor['type'],
)
post_slack_by_type(
text = slack_post_text,
type_ = SLACK_NOTIFICATION_TYPE['NOTIFICATION']
)
publish_sensor_config()
"""
# message
type: json str
-----
{
"calibration": [[1900, 21], [1910, 21.3], [2010, 23.8]]
}
"""
def sensor_calibration_update(sensor_id: int, calibration: List[List[int]]) -> None:
sensor_config = get_sensor_config()
for sensor in sensor_config:
if sensor['sensor_id'] == sensor_id:
# Sensor does not found.
if sensor['sensor'] == {}:
raise SensorNotFound('Sensor does not found.')
before_sensor = dict(sensor['sensor'])
sensor['sensor']['calibration'] = calibration_format(calibration)
break
set_sensor_config(sensor_config)
publish_sensor_config()
```
#### File: ams-client/service/backup.py
```python
import datetime
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from commons.errors import (
FormatInvalid,
)
from lib.config import get_gpio_config, set_gpio_config, get_config_item, get_sensor_config, set_sensor_config
def backup_file_now_date_generator() -> str:
return datetime.datetime.now().strftime('%Y%m%d_%H_%M_%S')
def backup_file_name(type_: str, ext: str) -> str:
"""
Parameters
----------
type_ : str
backup type (device|sensor)
ext : str
backup file extention (json| ...)
Returns
----------
str
backup_file_name
e.g.
device_20191010_10_00_00.json
"""
return '{type}_{date}.{ext}'.format(
type = type_,
date = backup_file_now_date_generator(),
ext = ext,
)
def get_device_backup_file() -> str:
return get_gpio_config()
def import_device_back_file(backup_file: list) -> None:
MAX_DEVICE_NUMBER = get_config_item('MAX_DEVICE_NUMBER')
if len(backup_file) != MAX_DEVICE_NUMBER:
raise FormatInvalid('Device number invalid')
# TODO Validation
set_gpio_config(backup_file)
return
def get_sensor_backup_file() -> str:
return get_sensor_config()
def import_sensor_back_file(backup_file: list) -> None:
MAX_SENSOR_NUMBER = get_config_item('MAX_SENSOR_NUMBER')
if len(backup_file) != MAX_SENSOR_NUMBER:
raise FormatInvalid('Device number invalid')
# TODO Validation
set_sensor_config(backup_file)
return
```
#### File: ams-client/service/device.py
```python
import datetime
import json
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from lib.config import get_gpio_config
from lib.gpio import gpio_read
from lib.mqtt import publish
from lib.topic import get_publish_topics
publish_topics = get_publish_topics()
def get_all_device_by_device_id(device_id):
devices = get_gpio_config()
for device in devices:
if device['device_id'] == device_id:
return device
return {}
def get_all_device_state() -> list:
devices = get_gpio_config()
all_device_state = []
for device in devices:
if device['device']:
all_device_state.append({
'device_id': device['device_id'],
'state': gpio_read(device['BCM']),
'name': device['device']['name'],
'description': device['device']['description'],
'type': device['device']['type'],
'run_type': device['device']['run_type'],
'options': device['device']['options'],
'created_at': device['device']['created_at'],
'updated_at': device['device']['updated_at'],
})
return all_device_state
def publish_device_state() -> None:
message = {
"timestamp": int( datetime.datetime.now().strftime('%s') ),
"devices": get_all_device_state(),
}
publish(
topic = publish_topics['DEVICE_STATE'],
message = json.dumps(message),
qos = 1,
retain = True,
)
```
#### File: ams-client/service/reboot.py
```python
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from lib.config import get_gpio_config
from lib.gpio import gpio_write
from lib.util import get_now_device_should_be_on_by_timer
from commons.consts import DEVICE_RUN_TYPE
def set_init_device_state() -> None:
devices = get_gpio_config()
for device in devices:
# no device
if device['device'] == {}:
continue
# device run type is not daily
if device['device']['run_type'] != DEVICE_RUN_TYPE['DAILY']:
continue
timer = device['device']['options']['timer']
ideal_device_state = get_now_device_should_be_on_by_timer(
on_hour = timer['on_hour'],
on_minute = timer['on_minute'],
off_hour = timer['off_hour'],
off_minute = timer['off_minute'],
)
gpio_write(
pin = device['BCM'],
value = int(ideal_device_state)
)
```
#### File: ams-client/subscriber/subscriber.py
```python
import datetime
import json
import os
import subprocess
import sys
import time
import traceback
import paho.mqtt.client as mqtt
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from lib.color import Color, color_text, print_color_log
from commons.consts import (
SLACK_NOTIFICATION_TYPE,
LOG_TITLE,
)
from lib.config import get_config, get_config_item
from lib.notification import post_slack_by_type
from lib.topic import get_subscribe_topics
from topic_router import topic_router
config = get_config()
host = config['MQTT']['MQTT_BROKER']
port = config['MQTT']['MQTT_BROKER_PORT']
username = config['MQTT']['MQTT_BROKER_USERNAME']
password = config['MQTT']['MQTT_BROKER_PASSWORD']
protocol = mqtt.MQTTv311
keepalive = config['MQTT']['KEEPALIVE']
QOS = config['MQTT']['SUBSCRIBER_QOS']
SUBSCRIBE_TOPICS = [(topic, QOS) for topic in get_subscribe_topics().values() ]
def on_connect(client, userdata, flags, rc):
print('Result Code: {}\n'.format(rc))
client.subscribe(SUBSCRIBE_TOPICS)
def on_message(client, userdata, msg):
print_color_log(
title = LOG_TITLE['SUBSCRIBER'],
title_color = Color.CYAN,
text = '{unixtime}: {topic}: {message}'.format(
unixtime = datetime.datetime.now().strftime('%s'),
topic = color_text(msg.topic, Color.GREEN),
message = msg.payload.decode(),
)
)
try:
topic_router(
topic = msg.topic,
message = msg.payload.decode()
)
except Exception as e:
error_message = ''.join(traceback.TracebackException.from_exception(e).format())
post_slack_by_type(
text = error_message,
type_ = SLACK_NOTIFICATION_TYPE['ERROR'],
)
print(error_message)
pass
if __name__ == '__main__':
client = mqtt.Client(protocol = protocol)
client.username_pw_set(username, password)
client.on_connect = on_connect
client.on_message = on_message
client.connect(host, port = port, keepalive = keepalive)
client.loop_forever()
``` |
{
"source": "ji-it/CloudTides",
"score": 2
} |
#### File: pkg/controller/monitor.py
```python
import psycopg2
import os
from config import BASE_DIR, DATABASES, FULL_HOSTNAME
import requests
import json
def main():
db = DATABASES['default']['NAME']
user = DATABASES['default']['USER']
password = DATABASES['default']['PASSWORD']
host = DATABASES['default']['HOST']
port = DATABASES['default']['PORT']
conn = psycopg2.connect(database=db, user=user, password=password, host=host, port=port)
cur = conn.cursor()
cur.execute(
'SELECT host_address, name, username, password, id FROM resources')
results = cur.fetchall()
path = os.path.join(BASE_DIR, 'controller')
for result in results:
os.system('python3 ' + path + '/query_usage.py -s ' + result[0] + ' -u ' + result[
2] + ' -p ' + \
result[3] + ' -n ' + result[1] + ' --no-ssl\n')
os.system('python3 ' + path + '/get_vm_usage_class.py -s ' + result[0] + ' -u ' + \
result[2] + ' -p ' + result[3] + ' -n ' + result[1] + \
' --no-ssl\n')
# cur.execute('UPDATE resources SET monitored = True WHERE host_address = %s AND name = %s',
# (result[0], result[1]))
# conn.commit()
data = {}
data['ResourceID'] = result[4]
data['Monitored'] = True
headers = {'Content-type': 'application/json'}
requests.put(FULL_HOSTNAME + "/v1/resource/update_status/",
data=json.dumps(data), headers=headers)
conn.commit()
cur.close()
conn.close()
# start
if __name__ == "__main__":
main()
```
#### File: pkg/controller/query_usage.py
```python
import ssl
import argparse
import atexit
from pyVim.connect import SmartConnect, Disconnect, SmartConnectNoSSL
import getpass
import requests
import json
from pyVmomi import vim
from config import FULL_HOSTNAME
GBFACTOR = float(1 << 30)
requests.adapters.DEFAULT_RETRIES = 5
def get_args():
""" Get arguments from CLI """
parser = argparse.ArgumentParser(
description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSpehre service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='Username to use')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use')
'''
parser.add_argument('-i', '--info',
required=True,
action='store',
help='cpu, mem or disk')
'''
parser.add_argument('-n', '--name',
required=False,
action='store',
help='resource name to query')
parser.add_argument('-r', '--resource-pool',
action='store_true',
help='whether is resource pool')
parser.add_argument('--no-ssl',
action='store_true',
help='Skip SSL verification')
args = parser.parse_args()
if not args.password:
args.password = <PASSWORD>(
prompt='Enter password')
return args
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def main():
try:
args = get_args()
si = None
if args.no_ssl:
si = SmartConnectNoSSL(
host=args.host,
user=args.user,
pwd=<PASSWORD>,
port=args.port)
else:
si = SmartConnect(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
except:
print("Failed to connect")
exit()
# disconnect this thing
atexit.register(Disconnect, si)
content = si.RetrieveContent()
objs = None
data = {}
data['HostAddress'] = args.host
data['Name'] = args.name
if args.resource_pool:
objs = get_all_objs(content, [vim.ResourcePool])
for pool in objs:
if pool.name == args.name:
info = pool.runtime
data['CurrentCPU'] = info.cpu.overallUsage
data['TotalCPU'] = info.cpu.maxUsage
data['CurrentRAM'] = info.memory.overallUsage / (1024.0*1024.0)
data['TotalRAM'] = info.memory.maxUsage / (1024.0*1024.0)
break
else:
objs = get_all_objs(content, [vim.ClusterComputeResource])
for cluster in objs:
if cluster.name == args.name:
summary = cluster.GetResourceUsage()
data['CurrentCPU'] = summary.cpuUsedMHz / 1000.0
data['TotalCPU'] = summary.cpuCapacityMHz / 1000.0
data['CurrentRAM'] = summary.memUsedMB / 1024.0
data['TotalRAM'] = summary.memCapacityMB / 1024.0
break
print(data)
headers = {'Content-type': 'application/json'}
res = requests.put(FULL_HOSTNAME + "/v1/usage/update_resource/", data=json.dumps(data), headers=headers)
print(res)
# start
if __name__ == "__main__":
main()
``` |
{
"source": "jiivan/genoomy",
"score": 3
} |
#### File: genoome/accounts/tests.py
```python
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test import Client
from accounts.forms import SignUpForm
client = Client()
class UserSignupFormTests(TestCase):
def test_presence_of_fields(self):
fields_included = ('email',)
fields_excluded = ('username', 'code',) # Regression test
fields = SignUpForm().fields
self.assertTrue(all(k in fields for k in fields_included))
self.assertTrue(all(k not in fields for k in fields_excluded))
def test_email_field_required(self):
data = dict(password1='<PASSWORD>', password2='<PASSWORD>')
form = SignUpForm(data)
self.assertFalse(form.is_valid())
self.assertTrue(form.has_error('email', 'required'))
self.assertFalse(form.has_error('email', 'anything'))
def test_user_creation(self):
data = dict(username='testuser', email='<EMAIL>', password1='<PASSWORD>',
password2='<PASSWORD>')
form = SignUpForm(data)
self.assertTrue(form.is_valid())
form.save()
user = get_user_model().objects.get(email='<EMAIL>')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
class UserSignUpTests(TestCase):
def test_signed_up_user_is_active(self):
post_params = dict(username='testuser', email='<EMAIL>', password1='<PASSWORD>',
password2='<PASSWORD>')
client.post(reverse('accounts:signup'), post_params)
# Username of regular users are their emails
user = get_user_model().objects.get(username='<EMAIL>')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
class UserProfileViewtest(TestCase):
def test_post_is_csrf_exempt(self):
get_user_model().objects.create_user(username='test_profile', password='<PASSWORD>')
self.client.login(username='test_profile', password='<PASSWORD>')
response = self.client.post(reverse('accounts:profile'), {})
self.assertEqual(response.status_code, 200)
```
#### File: genoome/configurable_elements/models.py
```python
from django.db import models
from color_aliases.models import ColorAlias
class LegendRow(models.Model):
content = models.CharField(max_length=256)
color = models.ForeignKey(ColorAlias)
priority = models.PositiveIntegerField(default=0)
class Meta:
ordering = ['-priority']
def __str__(self):
return self.content
def get_legend_rows():
return LegendRow.objects.all()
```
#### File: genoome/disease/forms.py
```python
import os
from django import forms
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.contrib.auth import get_user_model
from disease.files_utils import get_genome_filepath
storage = FileSystemStorage()
class UploadGenomeForm(forms.Form):
file = forms.FileField()
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
if not self.user.is_authenticated():
self.fields['email'] = forms.EmailField()
def clean_file(self):
if self.user.is_authenticated() and not self.user.can_upload_files:
raise forms.ValidationError('You already have uploaded genome file.', 'invalid')
raw_file = self.cleaned_data.get('file', None)
raw_filename = getattr(raw_file, 'name', None)
if self.user.is_authenticated() and storage.exists(get_genome_filepath(self.user, raw_filename)):
raise forms.ValidationError('You have already uploaded this file', 'invalid')
if len(raw_filename.rsplit('.', 1)) != 2:
raise forms.ValidationError('Provide file with correct extension', 'invalid')
return self.cleaned_data['file']
def clean_email(self):
if self.fields.get('email', None) is None: # Happens when user is not logged in
return None
email = self.cleaned_data.get('email', None)
user_model = get_user_model()
try:
user = user_model.objects.get(email=email)
can_upload = user.can_upload_files
except user_model.DoesNotExist:
can_upload = True # Assume that new user can upload files
if not can_upload:
raise forms.ValidationError('You can not upload more files', 'invalid')
return email
```
#### File: management/commands/clean_uploaded_files.py
```python
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.core.files.storage import FileSystemStorage
from disease.files_utils import get_genome_dirpath, get_genome_filepath
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('username', metavar='username')
def handle(self, username, **options):
user = get_user_model().objects.get(username=username)
user.analyzedataorder_set.all().delete()
user.couponredeemed_set.all().delete()
genome_files_dir = get_genome_dirpath(user)
storage = FileSystemStorage()
dirs, files = storage.listdir(genome_files_dir)
for file in files:
storage.delete(get_genome_filepath(user, file))
```
#### File: management/commands/load_gwas.py
```python
import json
from django.core.management.base import BaseCommand, CommandError
import psycopg2
from disease.models import SNPMarker
def map_SNP_model_fields(gwas_dict):
def clean_NR(value):
if value in {'NR', 'NS'}:
value = None
return value
def handle_precision_overflow(value):
if value is None:
return value
try:
fvalue = float(value)
except ValueError:
return None
return round(fvalue, 308)
snp_kwargs = {
'rsid': gwas_dict['strongest_snp'],
'link': gwas_dict['link'],
'risk_allele': gwas_dict['risk_allele'],
'disease_trait': gwas_dict['disease_trait'],
'p_value': handle_precision_overflow(clean_NR(gwas_dict['p_value'])), # Hacks for double precision overflow in Postgres
'or_or_beta': handle_precision_overflow(clean_NR(gwas_dict['or_or_beta']))
}
return snp_kwargs
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('args', metavar='gwas', nargs='+')
def handle(self, *args, **options):
self.stdout.write('Args: {}'.format(args))
for file in args:
with open(file, 'r') as f:
for entry in json.load(f):
snp_fields = map_SNP_model_fields(entry)
self.stdout.write('PROCESSING SNP: {}'.format(snp_fields))
# if entry['strongest_snp'].isdigit() in {'NR', 'HLA', 'APOE'}:
if not entry['strongest_snp'].isdigit():
self.stdout.write('Skipping entry with nonnumeric rsid')
continue
if SNPMarker.objects.filter(**snp_fields).exists():
self.stdout.write('Skipping duplicated entry')
continue
s = SNPMarker(**snp_fields)
s.full_clean()
try:
s.save()
self.stdout.write('SAVED SNP ENTRY')
except psycopg2.DataError as e: # Double precision value overflow
self.stdout.write(e)
continue
```
#### File: genoome/disease/views.py
```python
from io import BytesIO
import json
import logging
import uuid
import os
from celery import uuid as celery_uuid
from celery.result import AsyncResult
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth import login
from django.contrib import messages
from django.core.cache import cache
from django.core.urlresolvers import reverse_lazy
from django.core.files.storage import FileSystemStorage
from django.http import JsonResponse, HttpResponseServerError
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.views.generic import FormView
from django.views.generic.edit import ProcessFormView
from django.views.generic import TemplateView
from django.utils import timezone
import msgpack
from paypal.standard.forms import PayPalPaymentsForm
from configurable_elements.models import get_legend_rows
from disease.files_utils import process_filename
from disease.files_utils import get_genome_data as _get_genome_data
from disease.files_utils import get_genome_dirpath
from disease.files_utils import get_genome_filepath
from .models import CustomizedTag
from .forms import UploadGenomeForm
from .models import AnalyzeDataOrder
from .models import AlleleColor
from .models import SNPMarker
from .models import SNPMarkerArticle
from .tasks import recompute_genome_file
log = logging.getLogger(__name__)
storage = FileSystemStorage()
def upload_progress(request):
"""
Return JSON object with information about the progress of an upload.
"""
progress_id = ''
if 'X-Progress-ID' in request.GET:
progress_id = request.GET['X-Progress-ID']
elif 'X-Progress-ID' in request.META:
progress_id = request.META['X-Progress-ID']
if progress_id:
cache_key = "%s_%s" % (request.META['REMOTE_ADDR'], progress_id)
data = cache.get(cache_key)
log.debug('PID: %s, Upload progress cache %s',os.getpid(), data)
if data is None:
data = {'length': 1, 'uploaded': 1}
return JsonResponse(data)
else:
return HttpResponseServerError('Server Error: You must provide X-Progress-ID header or query param.')
class GenomeFilePathMixin(object):
def process_filename(self, filename, filename_suffix=None):
return process_filename(filename, filename_suffix)
def get_dirpath(self, user=None):
if user is None:
user = self.request.user
return get_genome_dirpath(user)
def get_filepath(self, filename, user=None):
if user is None:
user = self.request.user
return get_genome_filepath(user, filename)
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
def render_to_json_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return JsonResponse(
self.get_data(context),
**response_kwargs
)
def get_data(self, context):
return context
class UploadGenome(GenomeFilePathMixin, FormView):
template_name = 'upload_genome.html'
form_class = UploadGenomeForm
# success_url = reverse_lazy('disease:upload_success')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
form = self.get_form()
upload_id = uuid.uuid4()
return self.render_to_response(self.get_context_data(form=form, upload_id=upload_id))
def save_processed_data(self, data):
buffer = BytesIO()
pickle.dump(data, buffer)
filename = self.process_filename(self.request.FILES['file'].name, filename_suffix='_processed')
storage.save(self.get_filepath(filename), buffer)
def get_success_url(self):
return reverse_lazy('disease:upload_success', kwargs={'pk': self.analyze_order_pk})
def form_valid(self, form):
# save file
# create AnalyzeFileOrder
# raw_filepath = self.get_filepath(raw_filename)
cd = form.cleaned_data
email = cd.get('email', None)
raw_file = cd.get('file', None)
raw_filename = getattr(raw_file, 'name', None)
user_model = get_user_model()
if not self.request.user.is_authenticated():
try:
user = user_model.objects.get(email=email)
except user_model.DoesNotExist: # user doesn't have an account, create one
user = user_model(email=email, username=email)
user.save()
# Dirty hack to allow user login by model
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(self.request, user)
# Dirty hack to fix some parts requiring request.user...
self.request.user = user
else:
user = self.request.user
storage.save(self.get_filepath(raw_filename, user=user), raw_file)
task_id = celery_uuid()
analyze_order = AnalyzeDataOrder(uploaded_filename=raw_filename, user=user, task_uuid=task_id)
if user.is_staff and user.is_active:
log.info('User %s skipping payment due to staff membership', user)
analyze_order.paid = timezone.now()
analyze_order.save()
recompute_genome_file.apply_async(args=(self.get_filepath(raw_filename, user=user),),
task_id=task_id)
# table = process_genoome_data(data)
# file_exists = os.path.isfile(os.path.join(settings.MEDIA_ROOT, self.get_filepath(self.process_filename(raw_filename, filename_suffix='_processed'))))
# if self.request.user.is_authenticated() and not file_exists:
# self.save_processed_data(table)
# ctx = self.get_context_data(form=form, table=table, analyzed=True)
self.analyze_order_pk = analyze_order.pk
return super().form_valid(form)
def allele_description(request, pk):
"""
login_required
user should be able to view only his files
"""
allele = request.GET['allele']
marker = get_object_or_404(SNPMarker, pk=pk)
try:
article = get_object_or_404(SNPMarkerArticle, snp_marker=marker)
except Http404:
return redirect(marker.link)
colours = AlleleColor.objects.filter(snp_marker=marker)
your_allele = colours.get(allele=allele)
ctx = {
'marker': marker,
'article': article,
'colors': colours,
'your_allele': your_allele,
'base_template': 'base.html',
}
if 'ajax' in request.REQUEST:
ctx['base_template'] = 'allele_ajax.html'
return render(request, 'allele_description.html', ctx)
class UploadGenomeSuccessView(TemplateView):
template_name = 'upload_success.html'
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
def get(self, request, *args, **kwargs):
self.analyze_data_order = AnalyzeDataOrder.objects.get(pk=kwargs['pk'])
user = request.user
if self.analyze_data_order.is_paid or (user.is_staff and user.is_active):
return redirect('{}?file={}'.format(reverse_lazy('disease:browse_genome'),
self.analyze_data_order.uploaded_filename))
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs.update(dict(bitpay_checkout_url=settings.BITPAY_API,
analyze_order=self.analyze_data_order,
paypal_form=PayPalPaymentsForm(
initial=self.analyze_data_order.paypal_data(self.request))
))
return super().get_context_data(**kwargs)
class DisplayGenomeResult(JSONResponseMixin, GenomeFilePathMixin, TemplateView):
template_name = 'display_genome_result.html'
def get(self, request, *args, **kwargs):
self.user = self.request.user
if self.is_browsing_via_admin:
self.user = get_user_model().objects.get(pk=int(self.request.GET['pk']))
return super().get(request, *args, **kwargs)
def get_genome_data(self):
filename = self.process_filename(self.request.GET['file'], filename_suffix='_processed')
filepath = self.get_filepath(filename)
data = _get_genome_data(filepath)
data = list(reversed(sorted(data, key=lambda r: r.get('priority', -1))))
return data
@property
def is_admin(self): # TODO use permissions?
return bool(self.request.user.is_staff and self.request.user.is_active)
@property
def is_browsing_via_admin(self):
return bool(('pk' in self.request.GET) and self.is_admin)
def get_filepath(self, filename):
if self.is_browsing_via_admin:
return get_genome_filepath(self.user, filename)
return super().get_filepath(filename)
def get_analyze_data_order(self):
order_kwargs = dict(uploaded_filename=self.request.GET['file'], user=self.user)
try:
analyze_data_order = AnalyzeDataOrder.objects.get(**order_kwargs)
except AnalyzeDataOrder.DoesNotExist:
if not self.is_browsing_via_admin:
analyze_data_order = AnalyzeDataOrder(**order_kwargs)
analyze_data_order.save()
return analyze_data_order
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['legend_rows'] = get_legend_rows()
ctx['allele_tags'] = CustomizedTag.objects.filter(show_on_data=True)
ctx['is_admin'] = is_admin = self.is_admin
analyze_data_order = self.get_analyze_data_order()
paid = analyze_data_order.is_paid
job = AsyncResult(analyze_data_order.task_uuid)
ctx['genome_data_url'] = '{}?file={}'.format(reverse_lazy('disease:browse_genome'), self.request.GET['file'])
if self.is_browsing_via_admin:
ctx['genome_data_url'] += '&pk={}'.format(self.user.pk)
ctx['is_job_ready'] = is_job_ready = job.ready()
ctx['is_job_successful'] = is_job_successful = job.successful()
ctx['is_job_failure'] = is_job_failure = job.failed()
if is_job_ready and is_job_successful:
ctx['paid'] = paid
if paid or is_admin:
ctx['table'] = self.get_genome_data()
ctx['bitpay_checkout_url'] = settings.BITPAY_API
ctx['analyze_order'] = analyze_data_order
ctx['pos_data'] = analyze_data_order.posData()
ctx['paypal_form'] = PayPalPaymentsForm(
initial=analyze_data_order.paypal_data(self.request))
elif is_job_ready and is_job_failure:
if not self.request.is_ajax():
messages.add_message(self.request, settings.DANGER, "An error occured while processing your genome data. Please contact us for details.")
else:
if not self.request.is_ajax():
messages.add_message(self.request, messages.INFO, 'Your genome data is being analyzed. Wait a few second and try this page again')
return ctx
def get_data(self, context):
analyze_data_order = self.get_analyze_data_order()
if analyze_data_order is not None:
paid = analyze_data_order.is_paid
result = []
job = AsyncResult(analyze_data_order.task_uuid)
if paid or self.is_admin:
result = self.get_genome_data()
return {
'data': result,
'is_ready': job.ready(),
}
def render_to_response(self, context, **response_kwargs):
if self.request.is_ajax():
return self.render_to_json_response(context)
else:
return super().render_to_response(context, **response_kwargs)
def landing_genome_data(request):
sample_data_filepath = 'disease/samplegenotype'
data = {'data': _get_genome_data(sample_data_filepath)}
data['data'] = list(reversed(sorted(data['data'], key=lambda r: r.get('priority', -1))))
return JsonResponse(data)
class PaymentStatusView(ProcessFormView, TemplateView):
http_method_names = ['post']
def post(self, request, *args, **kwargs):
post_data = self.request.POST
if post_data['status'] in {'paid', 'complete', 'confirmed'}:
posData = json.loads(post_data['posData'])
analyze_order_pk = posData['analyze_order_pk']
user_pk = posData['user_pk']
analyze_order = AnalyzeDataOrder.objects.get(pk=analyze_order_pk)
analyze_order.paid = timezone.now()
analyze_order.save()
return HttpResponse('OK')
```
#### File: genoome/payments/signals.py
```python
from paypal.standard.models import ST_PP_COMPLETED
from paypal.standard.ipn.signals import valid_ipn_received
from disease.models import AnalyzeDataOrder
from django.utils import timezone
import logging
log = logging.getLogger(__name__)
def paypal_callback(sender, **kwargs):
ipn_obj = sender
if ipn_obj.payment_status == ST_PP_COMPLETED:
analyze_order_pk = ipn_obj.invoice
try:
analyze_order = AnalyzeDataOrder.objects.get(pk=analyze_order_pk)
analyze_order.paid = timezone.now()
analyze_order.save()
log.info('Order %r paid by %r', analyze_order, ipn_obj)
except AnalyzeDataOrder.DoesNotExist:
log.warning('No order found for %r', ipn_obj)
else:
log.warning('Payment not completed yet: %r', ipn_obj)
valid_ipn_received.connect(paypal_callback)
``` |
{
"source": "jiivan/wykresik",
"score": 2
} |
#### File: wykresik/web/app.py
```python
from bottle import default_app
from bottle import redirect
from bottle import request
from bottle import response
from bottle import route
from bottle import run
from bottle import view
import csv
import datetime
import importlib
import itertools
import os
import psycopg2
import psycopg2.extras
import random
import requests_oauthlib.oauth1_session
import sys
from withings import WithingsAuth, WithingsApi
import time
settings_name = os.environ.get('WYKRESIK_SETTINGS', 'settings')
settings = importlib.import_module(settings_name)
class InvalidToken(Exception): pass
def db_connection():
return psycopg2.connect(settings.DATABASE, cursor_factory=psycopg2.extras.DictCursor)
def get_authorizer(token=None):
back_url = '%s://%s/withings/comeback' % (
request.get_header('url_scheme', 'http'),
request.get_header('HTTP_HOST', request.get_header('SERVER_NAME', 'wykresik.genoomy.com')),
)
sys.stderr.write('back_url: %s\n' % (back_url,))
auth = WithingsAuth(settings.WITHINGS['key'], settings.WITHINGS['secret'], back_url)
if token:
with db_connection() as db_conn:
with db_conn.cursor() as c:
c.execute('SELECT * FROM withings_credentials WHERE token=%s ORDER BY created_at DESC', (token,))
db_result = c.fetchone()
if db_result is None:
raise InvalidToken
secret = db_result['secret']
auth.oauth_token = token
auth.oauth_secret = secret
return auth
def store_measures(creds):
client = WithingsApi(creds)
# lastupdate = int(time.time())
measures = client.get_measures()
with db_connection() as db_conn:
for m in measures:
with db_conn.cursor() as c:
c.execute('SELECT 1 FROM withings_measures WHERE grpid = %s', (m.grpid,))
if c.fetchone():
continue
# grpid, wuserid, weight, height, fat_free_mass, fat_ratio, fat_mass_weight, diastolic_blood_pressure, systolic_blood_pressure, heart_pulse, created_at
c.execute('INSERT INTO withings_measures (grpid, wuserid, weight, height, fat_free_mass, fat_ratio, fat_mass_weight, diastolic_blood_pressure, systolic_blood_pressure, heart_pulse, wdate) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)', (m.grpid, creds.user_id, m.weight, m.height, m.fat_free_mass, m.fat_ratio, m.fat_mass_weight, m.diastolic_blood_pressure, m.systolic_blood_pressure, m.heart_pulse, m.date))
@route('/withings/authorize')
def withings_authorize():
auth = get_authorizer()
url = auth.get_authorize_url()
with db_connection() as db_conn:
with db_conn.cursor() as c:
c.execute('INSERT INTO withings_credentials (token, secret) VALUES (%s, %s)', (auth.oauth_token, auth.oauth_secret))
redirect(url)
@route('/withings/comeback')
def withings_comeback():
oauth_token = request.GET.oauth_token
oauth_verifier = request.GET.oauth_verifier
try:
creds = get_authorizer(oauth_token).get_credentials(oauth_verifier)
except InvalidToken:
sys.stderr.write('Invalid token %s\n' % (oauth_token,))
redirect('/withings/authorize')
except requests_oauthlib.oauth1_session.TokenMissing as e:
sys.stderr.write('Token missing %s (%s)\n' % (oauth_token, e))
redirect('/withings/authorize')
with db_connection() as db_conn:
with db_conn.cursor() as c:
c.execute('UPDATE withings_credentials SET wuserid=%s WHERE token=%s', (creds.user_id, oauth_token))
store_measures(creds)
redirect('/?withings=%d' % (int(creds.user_id),))
@route('/withings/csv/<userid>')
def withings_csv(userid):
import io
csvfile = io.StringIO()
writer = csv.writer(csvfile)
writer.writerow(["Date","Weight (kg)","Fat mass (%)","Lean mass (%)","Comments"])
def _r(v):
import decimal
if not isinstance(v, decimal.Decimal):
return v
return '%.2f' % v
with db_connection() as db_conn:
with db_conn.cursor() as c:
c.execute('SELECT * FROM withings_measures WHERE wuserid=%s AND weight is not null AND fat_ratio is not null ORDER by wdate', (userid,))
for db_row in c.fetchall():
writer.writerow([
db_row['wdate'].strftime('%Y-%m-%d %I:%M %p'),
_r(db_row['weight']),
_r(db_row['fat_ratio']),
_r(100.0 - float(db_row['fat_ratio'] or 0)),
''
])
csvfile.seek(0)
response.content_type = 'text/plain'
return csvfile.read()
@route('/withings/csv-mm5/<userid>')
def withings_csv_mm5(userid):
import io
csvfile = io.StringIO()
writer = csv.writer(csvfile)
writer.writerow(["Date","Fat best of 5d (%)","Fat best of 5*24h (%)"])
def _r(v):
import decimal
if not isinstance(v, decimal.Decimal):
return v
return '%.2f' % v
with db_connection() as db_conn:
with db_conn.cursor() as c:
c.execute('SELECT * FROM withings_maxminfive WHERE wuserid = %s ORDER BY justday;', (userid,))
maxminfive = c.fetchall()
with db_conn.cursor() as c:
c.execute('SELECT * FROM withings_maxminfive_tf WHERE wuserid = %s ORDER BY justday;', (userid,))
maxminfive_24h = c.fetchall()
def filldate_zip(a,b):
def _nd(d):
return d.replace(tzinfo=None)
while a or b:
if (not a) or (_nd(b[0]['justday']) < _nd(a[0]['justday'])):
row = b.pop(0)
yield row['justday'], None, row['maxminfive']
continue
if (not b) or (_nd(a[0]['justday']) < _nd(b[0]['justday'])):
row = a.pop(0)
yield row['justday'], row['maxminfive'], None
continue
if _nd(a[0]['justday']) == _nd(b[0]['justday']):
rowa = a.pop(0)
rowb = b.pop(0)
yield rowa['justday'], rowa['maxminfive'], rowb['maxminfive']
continue
raise RuntimeError
for justday, mm5, mm524h in filldate_zip(maxminfive, maxminfive_24h):
writer.writerow([
justday.strftime('%Y-%m-%d %I:%M %p'),
_r(mm5),
_r(mm524h),
])
csvfile.seek(0)
response.content_type = 'text/plain'
return csvfile.read()
@route('/withings/table')
@route('/withings/table-<wuserid:re:\d+>')
@route('/withings/table/<first_date:re:\d{4}\d{2}\d{2}>-<last_date:re:\d{4}\d{2}\d{2}>')
@route('/withings/table-<wuserid:re:\d+>/<first_date:re:\d{4}\d{2}\d{2}>-<last_date:re:\d{4}\d{2}\d{2}>')
@view('withings_table')
def withings_table(wuserid=None, first_date=None, last_date=None):
if first_date:
first_date = datetime.datetime.strptime(first_date, '%Y%m%d')
if last_date:
last_date = datetime.datetime.strptime(last_date, '%Y%m%d')
# Determine wuserid
with db_connection() as db_conn:
with db_conn.cursor() as c:
c.execute('SELECT wuserid FROM withings_measures GROUP BY wuserid')
wuserids = [r[0] for r in c.fetchall()]
if not wuserids:
redirect('/withings/authorize')
if wuserid is None:
wuserid = random.choice(wuserids)
wuserid = int(wuserid)
db_operations_start = time.time()
with db_connection() as db_conn:
with db_conn.cursor() as c:
c.execute('SELECT * FROM withings_maxminfive WHERE wuserid = %s ORDER BY justday DESC;', (wuserid,))
maxminfive = c.fetchall()
with db_conn.cursor() as c:
c.execute('SELECT * FROM withings_maxminfive_tf WHERE wuserid = %s ORDER BY justday DESC;', (wuserid,))
maxminfive_24h = c.fetchall()
userdates = frozenset((r['wuserid'], r['justday'].replace(tzinfo=None)) for r in itertools.chain(maxminfive, maxminfive_24h))
try:
if not first_date:
first_date = min(maxminfive[-1]['justday'].replace(tzinfo=None), maxminfive_24h[-1]['justday'].replace(tzinfo=None))
if not last_date:
last_date = max(maxminfive[0]['justday'].replace(tzinfo=None), maxminfive_24h[0]['justday'].replace(tzinfo=None))
except IndexError: # empty sequence
last_date = first_date = datetime.datetime.now()
def _fillnulls(data):
sdates = list(sorted(userdates, key=lambda r: (r[1], r[0]*-1)))
while sdates and (sdates[-1][1] > last_date):
sdates.pop()
if not sdates:
return data
result = []
wuserid, justday = sdates.pop()
for row in data:
if not (first_date <= row['justday'].replace(tzinfo=None) <= last_date):
continue
while (justday, wuserid*-1) > (row['justday'].replace(tzinfo=None), row['wuserid']*-1):
result.append({'wuserid': wuserid, 'justday': justday, 'maxminfive': None})
if not sdates:
break
wuserid, justday = sdates.pop()
if sdates:
if (wuserid, justday) == (row['wuserid'], row['justday'].replace(tzinfo=None)):
wuserid, justday = sdates.pop()
result.append(row)
return result
maxminfive = _fillnulls(maxminfive)
maxminfive_24h = _fillnulls(maxminfive_24h)
db_operations_delta = time.time() - db_operations_start
return {
'maxminfive': maxminfive,
'maxminfive_24h': maxminfive_24h,
'db_delta': db_operations_delta,
'first_date': first_date,
'last_date': last_date,
'wuserids': wuserids,
'selected_wuserid': wuserid,
}
@route('/withings/plain')
@route('/withings/plain-<wuserid:re:\d+>')
@route('/withings/plain-<wuserid:re:\d+>/<first_date:re:\d{4}\d{2}\d{2}>-<last_date:re:\d{4}\d{2}\d{2}>')
@route('/withings/plain-<wuserid:re:\d+>/fat-<fat_query:re:\d+-\d+>')
@route('/withings/plain-<wuserid:re:\d+>/weight-<weight_query:re:\d+-\d+>')
@route('/withings/plain-<wuserid:re:\d+>/<first_date:re:\d{4}\d{2}\d{2}>-<last_date:re:\d{4}\d{2}\d{2}>/fat-<fat_query:re:\d+-\d+>/weight-<weight_query:re:\d+-\d+>')
@view('withings_plain')
def withings_plain(wuserid=None, first_date=None, last_date=None, fat_query=None, weight_query=None):
# Determine date range
if first_date:
first_date = datetime.datetime.strptime(first_date, '%Y%m%d')
if last_date:
last_date = datetime.datetime.strptime(last_date, '%Y%m%d')
# Determine wuserid
with db_connection() as db_conn:
with db_conn.cursor() as c:
c.execute('SELECT wuserid FROM withings_measures GROUP BY wuserid')
wuserids = [r[0] for r in c.fetchall()]
if not wuserids:
redirect('/withings/authorize')
if wuserid is None:
wuserid = random.choice(wuserids)
wuserid = int(wuserid)
def parse_limits(query):
if not query:
return None, None
return sorted(float(s)/10 for s in query.split('-'))
# Determine fat limits
fat_min, fat_max = parse_limits(fat_query)
# Determine weight limits
weight_min, weight_max = parse_limits(weight_query)
return {
'wuserids': wuserids,
'selected_wuserid': wuserid,
'first_date': first_date,
'last_date': last_date,
'fat_min': fat_min,
'fat_max': fat_max,
'weight_min': weight_min,
'weight_max': weight_max,
}
if __name__ == '__main__':
@route('/s/<path:re:.*$>')
def static_serve(path):
fullpath = 'static/'+path
with open(fullpath, 'rb') as f:
return f.read()
run(host='localhost', port=8080, debug=True)
else:
default_app.default.config['catchall'] = False
application = default_app()
``` |
{
"source": "JiiWeee/Painonhallinta",
"score": 4
} |
#### File: JiiWeee/Painonhallinta/luokat.py
```python
class Henkilo:
"""yliluokka kaikille henkilรถtyypeille."""
def __init__(self, etunimi, sukunimi, pituus, paino, ika, sukupuoli):
self.etunimi = etunimi
self.sukunimi = sukunimi
self.pituus = pituus
self.paino = paino
self.ika = ika
self.sukupuoli = sukupuoli
def painoindeksi(self):
bmi = self.paino / (self.pituus / 100) ** 2
return bmi
class Aikuinen(Henkilo):
"""Aliluokka aikuiselle henkilรถlle, perii Henkilo-luokan ominaisuudet
ja metodit"""
def __init__(self, etunimi, sukunimi, pituus, paino, ika, sukupuoli, tavoitepaino):
super().__init__(etunimi, sukunimi, pituus, paino, ika, sukupuoli)
self.arg = tavoitepaino
def rasvaprosentti(self):
rasvaprosentti = 1.2 * self.painoindeksi() + 0.23 * self.ika - 10.8 * self.sukupuoli - 5.4
return rasvaprosentti
if __name__ == "__main__":
mikaV = Henkilo('mika', 'vainio', 171, 74, 59, 1)
print('henkilรถ painaa', mikaV.paino)
mikaV.painoindeksi()
mikaV2 = Aikuinen('mika', 'Vainio', 171, 74, 59, 1, 70)
print(mikaV2.etunimi, 'painoindeksi', mikaV2.painoindeksi())
print(mikaV2.etunimi,'Rasvaprosentti', mikaV2.rasvaprosentti())
``` |
{
"source": "jijarf/ahihi",
"score": 2
} |
#### File: ahihi/plugin.video.kminus/fptplay.py
```python
from xbmcswift2 import Plugin,xbmcaddon, xbmc
import urlfetch
from BeautifulSoup import BeautifulSoup
import json
import re
import urllib
plugin = Plugin()
crawurl = 'https://fptplay.net/livetv'
def getLinkById(id = None, quality = "2"):
#if id.startswith('https://') :
# #is event
# id = getChannelIdFromEventLink(id)
#if id == None :
# return None
#get cookie & csrf
result = urlfetch.fetch(
crawurl,
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36'
})
#m = re.search(r"name=\"_token\" content=\"(.+)\"",result.content)
#if m == None :
# return None
#csrf = m.group(1)
cookie='laravel_session=' + result.cookies.get('laravel_session') + ";"
csrf = urllib.unquote(result.cookies.get('token'))
#plugin.log.info(csrf)
result = urlfetch.post(
'https://fptplay.net/show/getlinklivetv',
data={"id": id,
"quality": quality,
"mobile": "web",
"type" : "newchannel"
},
headers={'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36',
'X-Requested-With':'XMLHttpRequest',
'Referer':'https://fptplay.net/livetv',
'x-csrf-token': csrf,
'cookie':cookie
}
)
plugin.log.info(result.content)
if result.status_code != 200 :
return None
info = json.loads(result.content)
return info['stream']
def getLink(uri = None, quality = "2"):
#get cookie & csrf
result = urlfetch.fetch(
uri,
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36'
})
#m = re.search(r"name=\"_token\" content=\"(.+)\"",result.content)
#if m == None :
# return None
#csrf = m.group(1)
m = re.search(r"var id = '([^']+)';",result.content)
if m == None :
return None
id = m.group(1)
cookie='laravel_session=' + result.cookies.get('laravel_session') + ";"
csrf = urllib.unquote(result.cookies.get('token'))
result = urlfetch.post(
'https://fptplay.net/show/getlinklivetv',
data={"id": id,
"quality": quality,
"mobile": "web",
"type" : "newchannel"
},
headers={'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36',
'X-Requested-With':'XMLHttpRequest',
'Referer':'https://fptplay.net/livetv',
'x-csrf-token': csrf,
'cookie':cookie
}
)
if result.status_code != 200 :
return None
info = json.loads(result.content)
return info['stream']
``` |
{
"source": "JiJibrto/lab_rab_12",
"score": 3
} |
#### File: JiJibrto/lab_rab_12/individual1.py
```python
import time
class Time:
def __init__(self):
self.__time1 = []
@staticmethod
def __interpreter(__s):
__sec = __s
__min = __sec // 60
__hou = __min // 60
__min = __min % 60
__sec = __sec % 60
__time = [__hou, __min, __sec]
return __time
def reverse_interpreter_sec(self):
__sec = self.__time1[0] * 3600 + self.__time1[1] * 60 + self.__time1[2]
return __sec
def reverse_interpreter_min(self):
__min = self.__time1[0] * 60 + self.__time1[1] + (0 if self.__time1[2] == 0 else 1)
return __min
def read_time_num(self):
self.__time1 = []
self.__time1.append(int(input("Set hours> ")))
self.__time1.append(int(input("Set minutes> ")))
self.__time1.append(int(input("Set seconds> ")))
self.__time1 = self.__interpreter(self.reverse_interpreter_sec())
def read_time_str(self):
self.__time1 = []
self.__time1 = list(map(int, input("Set time (*:*:*)> ").split(":", maxsplit=2)))
self.__time1 = self.__interpreter(self.reverse_interpreter_sec())
def read_time_sec(self):
self.__time1 = []
self.__time1 = self.__interpreter(int(input("Set seconds> ")))
def read_time_now(self):
self.__time1 = []
self.__time1 = self.__interpreter(int(time.time()))
def display(self):
print(f"{self.__time1}")
def sum_sec(self, __s):
return self.reverse_interpreter_sec() + __s
def dif_sec(self, __s):
return self.reverse_interpreter_sec() - __s
if __name__ == '__main__':
new_time1 = Time()
new_time2 = Time()
print("ะะฐัะธะฐะฝัั ะธะฝะธัะธะฐะปะธะทะฐัะธะธ: \n\nะกััะพะบะพะน: ")
new_time1.read_time_str()
new_time1.display()
print("\nะะพ ะฝะฐััะพััะตะผั ะฒัะตะผะตะฝะธ: ")
new_time1.read_time_now()
new_time1.display()
print("\nะกะตะบัะฝะดะฐะผะธ: ")
new_time1.read_time_sec()
new_time1.display()
print("\nะฆะธััะฐะผะธ: ")
new_time1.read_time_num()
new_time1.display()
print(f"\nะกะปะพะถะตะฝะธะต ะฒัะตะผะตะฝะธ ะธ ะทะฐะดะฐะฝะฝะพะณะพ ะบะพะปะธัะตััะฒะฐ ัะตะบัะฝะด: {new_time1.sum_sec(int(input('Enter seconds> ')))}")
print(f"\nะััะธัะฐะฝะธะต ะธะท ะฒัะตะผะตะฝะธ ะทะฐะดะฐะฝะฝะพะณะพ ะบะพะปะธัะตััะฒะฐ ัะตะบัะฝะด: {new_time1.dif_sec(int(input('Enter seconds> ')))}")
print(f"\nะะตัะตะฒะพะด ะฒ ัะตะบัะฝะดั: {new_time1.reverse_interpreter_sec()}")
print(f"ะะตัะตะฒะพะด ะฒ ะผะธะฝััั (ั ะพะบััะณะปะตะฝะธะตะผ ะดะพ ัะตะปะพะน ะผะธะฝััั): {new_time1.reverse_interpreter_min()}")
print("\nะะฝะธัะธะฐะปะธะทะฐัะธั ะฒัะพัะพะณะพ ะพะฑัะตะบัะฐ ะฒัะตะผะตะฝะธ")
new_time2.read_time_num()
new_time2.display()
print(f"\nะััะธัะปะตะฝะธะต ัะฐะทะฝะธัั ะผะตะถะดั ะดะฒัะผั ะผะพะผะตะฝัะฐะผะธ ะฒัะตะผะตะฝะธ ะฒ ัะตะบัะฝะดะฐั
: {new_time1.reverse_interpreter_sec() - new_time2.reverse_interpreter_sec()}")
if new_time1.reverse_interpreter_sec() > new_time2.reverse_interpreter_sec():
print(f"ะกัะฐะฒะฝะตะฝะธะต ะผะพะผะตะฝัะพะฒ ะฒัะตะผะตะฝะธ: new_time1 > new_time2")
elif new_time1.reverse_interpreter_sec() < new_time2.reverse_interpreter_sec():
print(f"ะกัะฐะฒะฝะตะฝะธะต ะผะพะผะตะฝัะพะฒ ะฒัะตะผะตะฝะธ: new_time1 < new_time2")
else:
print(f"ะกัะฐะฒะฝะตะฝะธะต ะผะพะผะตะฝัะพะฒ ะฒัะตะผะตะฝะธ: new_time1 == new_time2")
``` |
{
"source": "JiJibrto/lab_rab_17",
"score": 3
} |
#### File: lab_rab_17/task_2_packet/UnknownCommandError.py
```python
class UnknownCommandError(Exception):
def __init__(self, command, message="Unknown command"):
self.command = command
self.message = message
super(UnknownCommandError, self).__init__(message)
def __str__(self):
return f"{self.command} -> {self.message}"
``` |
{
"source": "JiJiGuoGuo/CV-Backbones-master",
"score": 3
} |
#### File: CV-Backbones-master/GhostEfficientNetV2/EfficientNetV2.py
```python
import tensorflow as tf
import math
def hard_swish(x, inplace: bool = False):
'''
ๆฏswishๆด็ฎๆด็ๆถ๏ผ้ๅธธh_swish้ๅธธๅชๅจๆดๆทฑๅฑๆฌกไธๆ็จ
'''
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return (tf.nn.relu6(x + 3.) * x)/ 6.
def hard_sigmoid(x):
'''
hard_sigmoidๆฏLogistic sigmoid็ๅๆฎต่ฟไผผๅฝๆฐ๏ผๆดๆไบ่ฎก็ฎ๏ผๅญฆไน ้็ๅ ๅฟซ
if x<-2.5,return 0
if x>2.5,return 1
if -2.5<=x<=2.5,return 0.2*x+0.5
tensorflow2ๅทฒ็ปๅฎ็ฐไบhard_sigmoid
'''
return tf.keras.activations.hard_sigmoid(x)
def _make_divisible(v, divisor:int=8, min_value=None):
"""
็กฎไฟๆๆ็ๅฑ็้้้ฝ่ฝๅค่ขซ8ๆด้ค๏ผ็ๆ็ๆฐๅญ๏ผ่ฝ่ขซdivisorๆด้ค
ไฝฟ็จif๏ผๆฅไฟ่ฏnew_v็ธๅฏนไบๅๅ
็v็ๅๅไธ่ถ
่ฟ+-10%
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
# ็กฎไฟ่ฟไธๆดๆฐ็ไธ้ๅน
ๅบฆไธ่ถ
่ฟ10%ใ
if new_v < (0.9 * v):
new_v += divisor
return new_v
class SE(tf.keras.layers.Layer):
def __init__(self,inputs_channels:int,se_ratio:int = 0.25,name:str=""):
'''
่ฟไธชๅฝๆฐๆฏไฝฟ็จConv1x1ๅฎ็ฐ็SEๆจกๅ๏ผๅนถไฝฟ็จreduc_meanๅฎ็ฐGlobalAveragePooling
Args:
inputs_channels: ่พๅ
ฅๅผ ้็channels
se_ratio: ็ฌฌไธไธชFCไผๅฐ่พๅ
ฅSE็ๅผ ้channelsๅ็ผฉๆ็ๅ็
name:
return:ไธไธชๅผ ้๏ผshapeๅinput
'''
super(SE,self).__init__()
self.se_ratio = se_ratio
self.filters = inputs_channels
self.reduction = _make_divisible(inputs_channels * se_ratio,8)
#็ฌฌไธไธชFCๅฐ่พๅ
ฅSE็channelๅ็ผฉๆ1/4
self.global_pool = tf.keras.layers.GlobalAveragePooling2D(data_format='channels_last')
self.conv1 = tf.keras.layers.Conv2D(self.reduction,1,1,use_bias=True,name=name+'1_conv')
self.act1 = tf.keras.layers.Activation('swish')
self.conv2 = tf.keras.layers.Conv2D(self.filters,1,1,use_bias=True,name=name+'2_conv')
self.act2 = tf.keras.layers.Activation('sigmoid')
self.multiply = tf.keras.layers.Multiply()
def call(self,inputs):
#็ฑไบtf2.6ๆๅขๅ ไบkeep_dim๏ผๆไปฅๅจtf2.3้่ฆๆๅจexpand_dims
x = self.global_pool(inputs)
x = tf.expand_dims(tf.expand_dims(x,1),1)
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
out = self.multiply([x,inputs])
return out
class SqueezeExcite(tf.keras.layers.Layer):
def __init__(self, input, se_ratio=0.25, reduced_base_chs=None, divisor=4):
'''
SEๆจกๅ๏ผSqueeze๏ผไธไธชAvePooling๏ผGhostModue็SEๅ
จ้จๆฟๆดปๅฝๆฐๆขๆไบrelu
Excitation๏ผไธไธชFC+swish๏ผๅ ไธไธชFC+sigmoid
Args:
input:
se_ratio: ็ฌฌไธไธชFCไผๅฐ่พๅ
ฅSE็ๅผ ้channelsๅ็ผฉๆ็ๅ็
reduced_base_chs:
divisor:
return:ไธไธชๅผ ้๏ผshapeๅinput
'''
super(SqueezeExcite, self).__init__()
self.input_channels = input.shape[-1]
reduced_chs = _make_divisible((reduced_base_chs or self.input_channels) * se_ratio, divisor)
self.conv1 = tf.keras.layers.Conv2D(reduced_chs,1,1,use_bias=True)
self.act1 = tf.keras.layers.Activation('relu')
self.conv2 = tf.keras.layers.Conv2D(self.input_channels,1,1,use_bias=True)#ไฝฟๅพ่พๅบchannels=่พๅ
ฅchanels
self.act2 = tf.keras.layers.Activation('relu')
def call(self, x):
xx = tf.reduce_mean(x,(1,2),keepdims=True)
xx = self.conv1(xx)
xx = self.act1(xx)
xx = self.conv2(xx)
xx = self.act2(xx)
out = tf.keras.layers.Multiply()([x,xx])
return out
class GhostModule(tf.keras.layers.Layer):
def __init__(self, input_channels,kernel_size=1, ratio=2, dw_size=3, stride=1,use_relu=True):
'''
ๅฎ็ฐ็GhostModule๏ผCNNๆจกๅ็ไธญ็feature map็ๅพๅคๆฏ็ธไผผ็๏ผๆไบorigin map่ฝๅค้่ฟๆ็งcheap operation
็ๆ่ฟไบ็ธไผผ็feature map๏ผ็งฐไธบghost map๏ผไธญๆ็ฟป่ฏไธบๅนปๅฝฑ
Args:
input_channels: ่พๅ
ฅ็ๅผ ้็้้ๆฐ
kernel_size: ้ค1x1ๅท็งฏ็ๅ
ถไปๅท็งฏๆ ธๅคงๅฐ
ratio:ๅๅงconvไผๅฐๅchannelๅ็ผฉๆๅๆฅ็ๅคๅฐ
dw_size: DepthwiseConv็ๅท็งฏๆ ธๅคงๅฐ
stride:
use_relu: ๆฏๅฆไฝฟ็จreluไฝไธบๆฟๆดปๅฝๆฐ
return:GhostModuleไธๆนๅinput็shape๏ผๆไปฅ่พๅ
ฅchannels=่พๅบchannels
'''
super(GhostModule, self).__init__()
self.ouput_channel = input_channels
init_channels = math.ceil(self.ouput_channel / ratio)
new_channels = init_channels * (ratio - 1)
# ่ฟ้ๅฏไปฅ็ๅฐๅฎ็ฐไบไธคๆฌกๅท็งฏ๏ผ็นๅท็งฏ
self.primary_conv = tf.keras.Sequential([
#็นๅท็งฏ็ๅท็งฏๆ ธ็็ปๆฐ=ไธไธๅฑ็channelๆฐ๏ผๅคงๅฐไธบ1x1xM๏ผๅ
ถไธญM=input.shape(-1)
tf.keras.layers.Conv2D(init_channels,kernel_size,stride,padding='same',use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=1e-5),
tf.keras.layers.Activation(activation='relu') if use_relu else tf.keras.Sequential(),
])
self.cheap_operation = tf.keras.Sequential([
#group็จไบๅฏนchannel่ฟ่กๅ็ป๏ผ้ป่ฎคๆฏไธไธชchannelไธบไธ็ป,่ฟ้้็จ็ๆฏๅ็ปๅท็งฏ
tf.keras.layers.Conv2D(new_channels,3,1,'same',use_bias=False,groups=init_channels),
# tf.keras.layers.DepthwiseConv2D(3,1,'same',use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=1e-5),
tf.keras.layers.Activation(activation='relu') if use_relu else tf.keras.Sequential(),
])
def call(self,x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = tf.concat([x1,x2],axis=-1)#origin mapๅghost map่ฟ่กๆผๆฅ
#็ฌฌ0,1,2็ปดๅ
จ้๏ผๆๅไธ็ปดๅบฆไป0ๅผๅง่ฏปๅๅฐself.oup๏ผๆญฅ้ฟไธบ1๏ผๅทฆ้ญๅณๅผ
return out[...,:self.ouput_channel]
class Fused_MBConv(tf.keras.layers.Layer):
def __init__(self,input_channels,output_channels,kernel_size,activation='swish',
stride=1,expand_ratio=6,se_ratio=4,dropout=None,shortcut = 1,survival=None,epsilon=1e-5):
super(Fused_MBConv, self).__init__()
self.expand_ratio = expand_ratio
self.drop = dropout
self.se_ratio = se_ratio
self.use_shortcut = shortcut
self.survival = survival
expand_ratio_filters = _make_divisible(input_channels * expand_ratio)
self.stride = stride
self.input_channels = input_channels
self.output_channels = output_channels
if stride == 2:
self.poolAvage = tf.keras.layers.AveragePooling2D()
if input_channels != output_channels:
self.shortcut = tf.keras.layers.Conv2D(output_channels,kernel_size=1,strides=1,padding='same',use_bias = False)
#ๅ็ปด้ถๆฎต๏ผๅท็งฏ
if expand_ratio != 1:
self.conv3x3_fused = tf.keras.Sequential([
tf.keras.layers.Conv2D(expand_ratio_filters,kernel_size=kernel_size,strides=stride,padding='same',use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=epsilon),
tf.keras.layers.Activation(activation),
])
if (dropout is not None) and (dropout != 0):
self.ghost1_dropout = tf.keras.layers.Dropout(dropout)
#seๆจกๅ
if se_ratio is not None:
self.se = SE(expand_ratio_filters, se_ratio)
#่พๅบ้ถๆฎต๏ผ้็ปด้ถๆฎต๏ผๅท็งฏ
self.conv1x1 = tf.keras.Sequential([
tf.keras.layers.Conv2D(output_channels,kernel_size=1 if expand_ratio != 1 else kernel_size,
strides=1 if expand_ratio != 1 else stride,padding='same',
use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=epsilon),
])
def call(self,inputs):
shortcut = inputs
if self.stride == 2:
shortcut = self.poolAvage(shortcut)
if self.input_channels != self.output_channels:
shortcut = self.shortcut(shortcut)
#ๅ็ปด
if self.expand_ratio != 1:
inputs = self.conv3x3_fused(inputs)
if (self.drop is not None) and (self.drop != 0):
inputs = self.ghost1_dropout(inputs)
#SEๆจกๅ
if self.se_ratio is not None:
inputs = self.se(inputs)
#่พๅบ้ถๆฎต๏ผ้็ปด1x1
inputs = self.conv1x1(inputs)
if self.use_shortcut:#ๅฆๆไฝฟ็จ็ด่ฟ/ๆฎๅทฎ็ปๆ
if self.survival is not None and self.survival<1:#็ๅญๆฆ็(้ๆบๆทฑๅบฆๆฎๅทฎ็ฝ็ป่ฎบๆไธญ็ๆฏ่ฏญ๏ผ่กจ็คบๆฎๅทฎๆฏ่ทฏ่ขซๆฟๆดป็ๆฆ็)
from tensorflow_addons.layers import StochasticDepth
stoDepth = StochasticDepth(survival_probability=self.survival)
return stoDepth([shortcut, inputs])
else:
return tf.keras.layers.Add()([inputs,shortcut])
else:
return inputs
class MBConv(tf.keras.layers.Layer):
def __init__(self,input_channels,output_channels,kernel_size,activation='swish',
stride=1,expand_ratio=6,se_ratio=4,dropout=None,shortcut = 1,survival=None,epsilon=1e-5):
super(MBConv, self).__init__()
expand_channels = expand_ratio * input_channels
self.expand_ratio = expand_ratio
self.dropout = dropout
self.survival = survival
self.use_shortcut = shortcut
self.stride = stride
self.input_channels = input_channels
self.output_channels = output_channels
self.has_se = (se_ratio is not None) and (0 < se_ratio <=1)#ๅฆๆๆse_ratioๅ่กจ็คบ่ฆไฝฟ็จseๆจกๅ
if stride == 2:
self.poolAvage = tf.keras.layers.AveragePooling2D()
if input_channels != output_channels:
self.shortcut = tf.keras.layers.Conv2D(output_channels,kernel_size=1,strides=1,padding='same',use_bias = False)
#conv1x1ๅ็ปด
if expand_ratio != 1:#pytorchๆฒกๆ่ฟไธชexpand=1็ๅคๆญ๏ผ้พ้ๆฏ1x1ๅจ่พๅบ=ๅๅง็ปดๅบฆๆถ๏ผๆฒกๆไปไนๆๆ
self.conv1x1_up = tf.keras.Sequential([
tf.keras.layers.Conv2D(expand_channels,kernel_size=1,strides=1,padding='same',use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=epsilon),
tf.keras.layers.Activation(activation),
])
#depthwise3x3
self.dethwise_conv = tf.keras.Sequential([
tf.keras.layers.DepthwiseConv2D(kernel_size=kernel_size,strides=stride,padding='same',use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=epsilon),
tf.keras.layers.Activation(activation),
])
#ๆฏๅฆdropout
if (expand_ratio != 1) and (dropout is not None) and (dropout != 0):
self.dropout = tf.keras.layers.Dropout(dropout)
#SEๆจกๅ
if self.has_se:
self.se = SE(expand_channels, se_ratio)
#conv1x1้็ปด
self.conv1x1_down = tf.keras.Sequential([
tf.keras.layers.Conv2D(output_channels,kernel_size=1,strides=1,use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=epsilon),
])
def call(self,inputs):
shortcut = inputs
if self.stride == 2:
shortcut = self.poolAvage(shortcut)
if self.input_channels != self.output_channels:
shortcut = self.shortcut(shortcut)
if self.expand_ratio != 1:#conv1x1ๅ็ปด
inputs = self.conv1x1_up(inputs)
#depthwise3x3
inputs = self.dethwise_conv(inputs)
#dropout
if (self.expand_ratio != 1) and (self.dropout is not None) and (self.dropout != 0):
x = self.dropout(inputs)
#seๆจกๅ
if self.has_se:
inputs = self.se(inputs)
#conv1x1้็ปด
inputs = self.conv1x1_down(inputs)
#shortcut and stochastic Depth
if self.use_shortcut:#ๅฆๆไฝฟ็จ็ด่ฟ/ๆฎๅทฎ็ปๆ
if self.survival is not None and self.survival<1:#็ๅญๆฆ็(้ๆบๆทฑๅบฆๆฎๅทฎ็ฝ็ป่ฎบๆไธญ็ๆฏ่ฏญ๏ผ่กจ็คบๆฎๅทฎๆฏ่ทฏ่ขซๆฟๆดป็ๆฆ็)
from tensorflow_addons.layers import StochasticDepth
stoDepth = StochasticDepth(survival_probability=self.survival)
return stoDepth([shortcut,inputs])
else:
return tf.keras.layers.Add()([inputs,shortcut])
else:
return inputs
class EfficientNetV2(tf.keras.Model):
'''
ๆ นๆฎEfficientNetV2่ฎบๆ้ๆฐๅฎ็ฐ็EfficientNet-V2-sๅๅฎๆนไปฃ็
Args:
cfg: stages็้
็ฝฎ
num_classes: ็ฑปๅซๆฐ้๏ผไนๆฏๆ็ป็่พๅบchannels
input: ่พๅ
ฅ็ๅผ ้, ่ฅๆไพไบๅๅฟฝ็ฅin_shape
activation: ้่ฟ้่ๅฑ็ๆฟๆดปๅฝๆฐ
width_mult: ๆจกๅๅฎฝๅบฆๅ ๅญ, ้ป่ฎคไธบ1
depth_mult: ๆจกๅๆทฑๅบฆๅ ๅญ,้ป่ฎคไธบ1
conv_dropout_rate: ๅจMBConv/Stageๅ็drop็ๆฆ็๏ผ0ๆnoneไปฃ่กจไธไฝฟ็จdropout
dropout_rate: ๅจGlobalAveragePoolingๅ็dropๆฆ็๏ผ0ๆnoneไปฃ่กจไธไฝฟ็จdropout
drop_connect: ๅจ่ทณๅฑ่ฟๆฅdropๆฆ็๏ผ0ๆnoneไปฃ่กจไธไฝฟ็จdropout
include_top: ๆฏๅฆๅ
ๅซๅ็ฑป็จ็ๆ้กถๅฑ
name: ๅฑ็ๅๅญ
Returns:a tf.keras model
'''
def __init__(self,cfg,num_classes,activation,width_mult:float,depth_mult=float,
conv_dropout_rate=None,dropout_rate=None,drop_connect=None,include_top=True,name=None,epsilon=1e-5):
super(EfficientNetV2, self).__init__(name=name)
self.dropout_rate = dropout_rate
self.include_top = include_top
#stage 0
self.stage0_conv3 = tf.keras.Sequential([
tf.keras.layers.Conv2D(24,kernel_size=3,strides=2,padding='same',use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=1e-5),
tf.keras.layers.Activation(activation),
])
#ๆฅไธๆฅๆฏstage 1ๅฐstage 6
self.stage1to6 = tf.keras.Sequential()
for stage in cfg:
count = int(math.ceil((stage[0] * depth_mult)))#stage[0]ๆฏcount่กจ็คบ้ๅคๅคๅฐๆฌก
for j in range(count):
self.stage1to6.add(handleInputStageChannels(index=j,input_channels=_make_divisible(stage[4],width_mult),
output_channels=_make_divisible(stage[5],width_mult),
kernel_size=stage[1],activation=activation,expand_ratio=stage[3],
use_Fused=stage[6],stride=stage[2],se_ratio=stage[7],dropout=conv_dropout_rate,
drop_connect=drop_connect,shortcut=stage[8],survival=stage[9]))
#ๆ็ปstage
self.stage7_conv = tf.keras.Sequential([
tf.keras.layers.Conv2D(_make_divisible(1280,width_mult),kernel_size=1,padding='same',use_bias=False),
tf.keras.layers.BatchNormalization(epsilon=epsilon),
tf.keras.layers.Activation(activation),
])
self.stage7_globalAverPool = tf.keras.layers.GlobalAveragePooling2D()
if (self.dropout_rate is not None) and (self.dropout_rate != 0):
self.stage7_drop = tf.keras.layers.Dropout(dropout_rate)
self.stage7_classfier = tf.keras.Sequential([
tf.keras.layers.Dense(num_classes),
# tf.keras.layers.Activation('softmax'),
])
def call(self,inputs):
x = self.stage0_conv3(inputs)
x = self.stage1to6(x)
x = self.stage7_conv(x)
x = self.stage7_globalAverPool(x)
if (self.dropout_rate is not None) and (self.dropout_rate != 0):
x = self.stage7_drop(x)
x = self.stage7_classfier(x)
return x
def handleInputStageChannels(index,input_channels,output_channels,kernel_size,activation,expand_ratio,use_Fused,
stride=1,se_ratio=None,dropout=None,drop_connect=0.2,shortcut=1,survival=None):
'''
่ฟไธชๅฝๆฐ็จๆฅๅค็ๅจๅพช็ฏcountๆถ๏ผๅจๆฏ็ปcount็็ฌฌไธไธชstageๅฐ็ฌฌไบไธชstage็channelsๅๆข๏ผๅฏผ่ด็stage่พๅ
ฅ้ฎ้ข็ๆ
ๅต
Args:
count: ๆป็้ๅคๆฌกๆฐ
input_channels:
output_channels:
kernel_size:
activation:
expand_ratio:
use_Fused:
stride:
se_ratio:
dropout:
drop_connect:
Returns:
'''
if use_Fused:
return Fused_MBConv(input_channels = output_channels if index != 0 else input_channels,
output_channels = output_channels,kernel_size=kernel_size,activation=activation,
stride = 1 if index != 0 else stride,
expand_ratio = expand_ratio,se_ratio=se_ratio,dropout=dropout,shortcut=shortcut,survival=survival)
elif not use_Fused:
return MBConv(input_channels = output_channels if index != 0 else input_channels,
output_channels = output_channels,kernel_size=kernel_size,activation=activation,
stride = 1 if index != 0 else stride,
expand_ratio = expand_ratio,se_ratio=se_ratio,dropout=dropout,shortcut=shortcut,survival=survival)
class EfficientNetV2_S(tf.keras.Model):
def __init__(self,num_classes,activation='swish',width_mult=1.0,depth_mult=1.0,conv_dropout_rate=None,dropout_rate=None,drop_connect=0.2):
super(EfficientNetV2_S, self).__init__()
# ่ฎกๆฐ๏ผ่ฏฅstage้ๅคๅคๅฐๆฌก๏ผๆฉๅฑๆฏไพ๏ผMBConv็ฌฌไธไธชๅท็งฏๅฐ่พๅ
ฅ้้ๆฉๅฑๆๅ ๅ(1,4,6)๏ผSE็๏ผSEๆจกๅไธญ็ฌฌไธไธชFC/Convๅฑๅฐๅ
ถ็ผฉๆพๅฐๅคๅฐ๏ผ้ๅธธๆฏ1/4
# ๆฌกๆฐ0๏ผๅท็งฏๆ ธๅคงๅฐ1๏ผๆญฅ้ฟ2๏ผๆฉๅฑๆฏไพ3๏ผ่พๅ
ฅ้้ๆฐ4๏ผ่พๅบ้้ๆฐ5๏ผๆฏๅฆFused6๏ผSE็7๏ผๆฏๅฆshortcut8,็ๅญๆฆ็9
# 0, 1 2, 3 4 5 6 7 8 9
cfg = [
[2, 3, 1, 1, 24, 24, True, None, 1, 0.5], # stage 1
[4, 3, 2, 4, 24, 48, True, None, 1, 0.5], # stage 2
[4, 3, 2, 4, 48, 64, True, None, 1, 0.5], # stage 3
[6, 3, 2, 4, 64, 128, False, 4, 1, 0.5], # stage 4
[9, 3, 1, 6, 128, 160, False, 4, 1, 0.5], # stage 5
[15, 3, 2, 6, 160, 256, False, 4, 1, 0.5], # stage 6
]
self.efficientV2 = EfficientNetV2(cfg,num_classes=num_classes,activation=activation,
width_mult=width_mult,depth_mult=depth_mult,
conv_dropout_rate=None,dropout_rate=None,drop_connect=None)
def call(self,inputs):
return self.efficientV2(inputs)
if __name__ == '__main__':
x = tf.random.uniform([3,224,224,3])
model = EfficientNetV2_S(7)
model(x)
model.summary()
```
#### File: ghostnet_pytorch/models/official_tf_ghostEfficientNetv2.py
```python
from tensorflow.keras import Model, layers, activations
import tensorflow_addons as tfa
import math
"""
round_filters and round_repeats are borrowed from official repo
https://github.com/google/automl/tree/master/efficientnetv2
"""
def round_filters(filters, multiplier=1.):
divisor = 8
min_depth = 8
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
return int(new_filters)
def round_repeats(repeats, multiplier=1.):
return int(math.ceil(multiplier * repeats))
def squeeze_and_excite(x, in_channels, out_channels, activation, reduction_ratio=4):
x = layers.GlobalAvgPool2D()(x)
x = layers.Dense(in_channels // reduction_ratio)(x)
x = layers.Activation(activation)(x)
x = layers.Dense(out_channels)(x)
x = layers.Activation(activations.sigmoid)(x)
return x
def ghost_conv(x, out_channels, kernel_size, stride, kernel_regularizer=None):
x1 = layers.Conv2D(out_channels // 2, kernel_size=kernel_size, strides=stride, padding="same",
use_bias=False, kernel_regularizer=kernel_regularizer)(x)
x2 = layers.BatchNormalization(epsilon=1e-5)(x1)
x2 = layers.Activation(activations.elu)(x2)
x2 = layers.DepthwiseConv2D(kernel_size=(3, 3), strides=1, padding="same",
use_bias=False, kernel_regularizer=kernel_regularizer)(x2)
return layers.Concatenate()([x1, x2])
def fused_mbconv(x, in_channels, out_channels, kernel_size, activation, stride=1, reduction_ratio=4,
expansion=6, dropout=None, drop_connect=.2):
shortcut = x
expanded = round_filters(in_channels * expansion)
if stride == 2:
shortcut = layers.AveragePooling2D()(shortcut)
if in_channels != out_channels:
shortcut = ghost_conv(shortcut, out_channels, (1, 1), 1)
if expansion != 1:
x = ghost_conv(x, expanded, kernel_size, stride)
x = layers.BatchNormalization(epsilon=1e-5)(x)
x = layers.Activation(activation)(x)
if (dropout is not None) and (dropout != 0.):
x = layers.Dropout(dropout)(x)
if reduction_ratio is not None:
se = squeeze_and_excite(x, in_channels, expanded, activation, reduction_ratio)
x = layers.Multiply()([x, se])
x = ghost_conv(x, out_channels, (1, 1) if expansion != 1 else kernel_size, 1)
x = layers.BatchNormalization(epsilon=1e-5)(x)
x = tfa.layers.StochasticDepth()([shortcut, x])
return x
def mbconv(x, in_channels, out_channels, kernel_size, activation, stride=1,
reduction_ratio=4, expansion=6, dropout=None, drop_connect=.2):
shortcut = x
expanded = round_filters(in_channels * expansion)
if stride == 2:
shortcut = layers.AveragePooling2D()(shortcut)
if in_channels != out_channels:
shortcut = ghost_conv(shortcut, out_channels, (1, 1), 1)
if expansion != 1:
x = ghost_conv(x, expanded, (1, 1), 1)
x = layers.BatchNormalization(epsilon=1e-5)(x)
x = layers.Activation(activation)(x)
x = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=stride, padding="same", use_bias=False)(x)
x = layers.BatchNormalization(epsilon=1e-5)(x)
x = layers.Activation(activation)(x)
if (expansion != 1) and (dropout is not None) and (dropout != 0.):
x = layers.Dropout(dropout)(x)
if reduction_ratio is not None:
se = squeeze_and_excite(x, in_channels, expanded, activation, reduction_ratio)
x = layers.Multiply()([x, se])
x = ghost_conv(x, out_channels, (1, 1), 1)
x = layers.BatchNormalization(epsilon=1e-5)(x)
x = tfa.layers.StochasticDepth()([shortcut, x])
return x
def repeat(x, count, in_channels, out_channels, kernel_size, activation,
stride=1, reduction_ratio=None, expansion=6, fused=False, dropout=None, drop_connect=.2):
for i in range(count):
if fused:
x = fused_mbconv(x, in_channels, out_channels, kernel_size,
activation, stride, reduction_ratio, expansion, dropout, drop_connect)
else:
x = mbconv(x, in_channels, out_channels, kernel_size, activation, stride,
reduction_ratio, expansion, dropout, drop_connect)
return x
def stage(x, count, in_channels, out_channels, kernel_size, activation,
stride=1, reduction_ratio=None, expansion=6, fused=False, dropout=None, drop_connect=.2):
x = repeat(x, count=1, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
activation=activation, stride=stride, reduction_ratio=reduction_ratio,
expansion=expansion, fused=fused, dropout=dropout, drop_connect=drop_connect)
x = repeat(x, count=count - 1, in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size,
activation=activation, stride=1, reduction_ratio=reduction_ratio,
expansion=expansion, fused=fused, dropout=dropout, drop_connect=drop_connect)
return x
def base(cfg, num_classes=1000, input_tensor=None, activation=activations.swish,
width_mult=1., depth_mult=1., conv_dropout_rate=None, dropout_rate=None, drop_connect=.2):
"""
EfficientNet-V2-s, re-implementation according to
https://arxiv.org/abs/2104.00298
and official code
https://github.com/google/automl/tree/master/efficientnetv2
EfficientNetV2: Smaller Models and Faster Training
by <NAME>, <NAME>
:param cfg: configuration of stages
:param num_classes: number of classes to output
:param input_tensor: given a tensor as input, if provided, in_shape will be ignored
:param activation: activation to use across hidden layers
:param width_mult: width factor, default to 1.0
:param depth_mult: depth multiplier, default to 1.0
:param conv_dropout_rate: probability to drop after each MBConv/stage, 0 or None means no dropout will be applied
:param dropout_rate: probability to drop after GlobalAveragePooling, 0 or None means no dropout will be applied
:param drop_connect: probability to drop spatially in skip connections, 0 or None means no dropout will be applied
:return: a tf.keras model
"""
inp = input_tensor
# stage 0
x = layers.Conv2D(cfg[0][4], kernel_size=(3, 3), strides=2, padding="same", use_bias=False)(inp)
x = layers.BatchNormalization(epsilon=1e-5)(x)
x = layers.Activation(activation)(x)
for stage_cfg in cfg:
x = stage(x, count=round_repeats(stage_cfg[0], depth_mult),
in_channels=round_filters(stage_cfg[4], width_mult),
out_channels=round_filters(stage_cfg[5], width_mult),
kernel_size=stage_cfg[1], activation=activation, stride=stage_cfg[2],
reduction_ratio=stage_cfg[7], expansion=stage_cfg[3], fused=stage_cfg[6] == 1,
dropout=conv_dropout_rate, drop_connect=drop_connect)
# final stage
x = layers.Conv2D(round_filters(1280, width_mult), (1, 1), strides=1, padding="same", use_bias=False)(x)
x = layers.BatchNormalization(epsilon=1e-5)(x)
x = layers.Activation(activation)(x)
x = layers.GlobalAvgPool2D()(x)
if (dropout_rate is not None) and (dropout_rate != 0):
x = layers.Dropout(dropout_rate)(x)
x = layers.Dense(num_classes)(x)
x = layers.Activation(activations.softmax)(x)
return Model(inp, x)
def s(in_shape=(224, 224, 3), num_classes=1000, input_tensor=None, activation=activations.swish,
width_mult=1., depth_mult=1., conv_dropout_rate=None, dropout_rate=None, drop_connect=.2):
"""
EfficientNet-V2-s, re-implementation according to
https://arxiv.org/abs/2104.00298
and official code
https://github.com/google/automl/tree/master/efficientnetv2
EfficientNetV2: Smaller Models and Faster Training
by <NAME>, <NAME>
:param in_shape: input shape of the model, in form of (H, W, C)
:param num_classes: number of classes to output
:param input_tensor: given a tensor as input, if provided, in_shape will be ignored
:param activation: activation to use across hidden layers
:param width_mult: width factor, default to 1.0
:param depth_mult: depth multiplier, default to 1.0
:param conv_dropout_rate: probability to drop after each MBConv/stage, 0 or None means no dropout will be applied
:param dropout_rate: probability to drop after GlobalAveragePooling, 0 or None means no dropout will be applied
:param drop_connect: probability to drop spatially in skip connections, 0 or None means no dropout will be applied
:return: a tf.keras model
"""
# each row is a stage
# count, kernel size, stride, expansion ratio, in channel, out channel, is fused(1 if true), reduction ratio(None if no se)
cfg = [
[2, 3, 1, 1, 24, 24, 1, None],
[4, 3, 2, 4, 24, 48, 1, None],
[4, 3, 2, 4, 48, 64, 1, None],
[6, 3, 2, 4, 64, 128, 0, 4],
[9, 3, 1, 6, 128, 160, 0, 4],
[15, 3, 2, 6, 160, 256, 0, 4],
]
input_tensor = layers.Input(in_shape) if input_tensor is None else input_tensor
return base(cfg=cfg, num_classes=num_classes, input_tensor=input_tensor, activation=activation,
width_mult=width_mult, depth_mult=depth_mult, conv_dropout_rate=conv_dropout_rate,
dropout_rate=dropout_rate, drop_connect=drop_connect)
def m(in_shape=(224, 224, 3), num_classes=1000, input_tensor=None, activation=activations.swish,
width_mult=1.0, depth_mult=1., conv_dropout_rate=None, dropout_rate=None, drop_connect=.2):
"""
EfficientNet-V2-m, re-implementation according to
https://arxiv.org/abs/2104.00298
and official code
https://github.com/google/automl/tree/master/efficientnetv2
EfficientNetV2: Smaller Models and Faster Training
by <NAME>, <NAME>
:param in_shape: input shape of the model, in form of (H, W, C)
:param num_classes: number of classes to output
:param input_tensor: given a tensor as input, if provided, in_shape will be ignored
:param activation: activation to use across hidden layers
:param width_mult: width factor, default to 1.0
:param depth_mult: depth multiplier, default to 1.0
:param conv_dropout_rate: probability to drop after each MBConv/stage, 0 or None means no dropout will be applied
:param dropout_rate: probability to drop after GlobalAveragePooling, 0 or None means no dropout will be applied
:param drop_connect: probability to drop spatially in skip connections, 0 or None means no dropout will be applied
:return: a tf.keras model
"""
# each row is a stage
# count, kernel size, stride, expansion ratio, in channel, out channel, is fused(1 if true), reduction ratio(None if no se)
cfg = [
[3, 3, 1, 1, 24, 24, 1, None],
[5, 3, 2, 4, 24, 48, 1, None],
[5, 3, 2, 4, 48, 80, 1, None],
[7, 3, 2, 4, 80, 160, 0, 4],
[14, 3, 1, 6, 160, 176, 0, 4],
[18, 3, 2, 6, 176, 304, 0, 4],
[5, 3, 1, 6, 304, 512, 0, 4],
]
input_tensor = layers.Input(in_shape) if input_tensor is None else input_tensor
return base(cfg=cfg, num_classes=num_classes, input_tensor=input_tensor, activation=activation,
width_mult=width_mult, depth_mult=depth_mult, conv_dropout_rate=conv_dropout_rate,
dropout_rate=dropout_rate, drop_connect=drop_connect)
def l(in_shape=(224, 224, 3), num_classes=1000, input_tensor=None, activation=activations.swish,
width_mult=1.0, depth_mult=1., conv_dropout_rate=None, dropout_rate=None, drop_connect=.2):
"""
EfficientNet-V2-l, re-implementation according to
https://arxiv.org/abs/2104.00298
and official code
https://github.com/google/automl/tree/master/efficientnetv2
EfficientNetV2: Smaller Models and Faster Training
by <NAME>, <NAME>
:param in_shape: input shape of the model, in form of (H, W, C)
:param num_classes: number of classes to output
:param input_tensor: given a tensor as input, if provided, in_shape will be ignored
:param activation: activation to use across hidden layers
:param width_mult: width factor, default to 1.0
:param depth_mult: depth multiplier, default to 1.0
:param conv_dropout_rate: probability to drop after each MBConv/stage, 0 or None means no dropout will be applied
:param dropout_rate: probability to drop after GlobalAveragePooling, 0 or None means no dropout will be applied
:param drop_connect: probability to drop spatially in skip connections, 0 or None means no dropout will be applied
:return: a tf.keras model
"""
# each row is a stage
# count, kernel size, stride, expansion ratio, in channel, out channel, is fused(1 if true), reduction ratio(None if no se)
cfg = [
[4, 3, 1, 1, 32, 32, 1, None],
[7, 3, 2, 4, 32, 64, 1, None],
[7, 3, 2, 4, 64, 96, 1, None],
[10, 3, 2, 4, 96, 192, 0, 4],
[19, 3, 1, 6, 192, 224, 0, 4],
[25, 3, 2, 6, 224, 384, 0, 4],
[7, 3, 1, 6, 384, 640, 0, 4],
]
input_tensor = layers.Input(in_shape) if input_tensor is None else input_tensor
return base(cfg=cfg, num_classes=num_classes, input_tensor=input_tensor, activation=activation,
width_mult=width_mult, depth_mult=depth_mult, conv_dropout_rate=conv_dropout_rate,
dropout_rate=dropout_rate, drop_connect=drop_connect)
def xl(in_shape=(224, 224, 3), num_classes=1000, input_tensor=None, activation=activations.swish,
width_mult=1.0, depth_mult=1., conv_dropout_rate=None, dropout_rate=None, drop_connect=.2):
"""
EfficientNet-V2-xl, re-implementation according to
https://arxiv.org/abs/2104.00298
and official code
https://github.com/google/automl/tree/master/efficientnetv2
EfficientNetV2: Smaller Models and Faster Training
by <NAME>, <NAME>
:param in_shape: input shape of the model, in form of (H, W, C)
:param num_classes: number of classes to output
:param input_tensor: given a tensor as input, if provided, in_shape will be ignored
:param activation: activation to use across hidden layers
:param width_mult: width factor, default to 1.0
:param depth_mult: depth multiplier, default to 1.0
:param conv_dropout_rate: probability to drop after each MBConv/stage, 0 or None means no dropout will be applied
:param dropout_rate: probability to drop after GlobalAveragePooling, 0 or None means no dropout will be applied
:param drop_connect: probability to drop spatially in skip connections, 0 or None means no dropout will be applied
:return: a tf.keras model
"""
cfg = [
[4, 3, 1, 1, 32, 32, 1, None],
[8, 3, 2, 4, 32, 64, 1, None],
[8, 3, 2, 4, 64, 96, 1, None],
[16, 3, 2, 4, 96, 192, 0, 4],
[24, 3, 1, 6, 192, 256, 0, 4],
[32, 3, 2, 6, 256, 512, 0, 4],
[8, 3, 1, 6, 512, 640, 0, 4],
]
input_tensor = layers.Input(in_shape) if input_tensor is None else input_tensor
return base(cfg=cfg, num_classes=num_classes, input_tensor=input_tensor, activation=activation,
width_mult=width_mult, depth_mult=depth_mult, conv_dropout_rate=conv_dropout_rate,
dropout_rate=dropout_rate, drop_connect=drop_connect)
def main():
model = s((224, 224, 3), 1000)
model.summary()
if __name__ == '__main__':
main()
```
#### File: ghostnet_pytorch/models/tf2_efficientNetV2.py
```python
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Activation,
Add,
BatchNormalization,
Conv2D,
Dense,
DepthwiseConv2D,
Dropout,
GlobalAveragePooling2D,
Input,
PReLU,
Reshape,
Multiply,
)
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 0.001
CONV_KERNEL_INITIALIZER = keras.initializers.VarianceScaling(scale=2.0, mode="fan_out", distribution="truncated_normal")
# CONV_KERNEL_INITIALIZER = 'glorot_uniform'
BLOCK_CONFIGS = {
"b0": { # width 1.0, depth 1.0
"first_conv_filter": 32,
"expands": [1, 4, 4, 4, 6, 6],
"out_channels": [16, 32, 48, 96, 112, 192],
"depthes": [1, 2, 2, 3, 5, 8],
"strides": [1, 2, 2, 2, 1, 2],
"use_ses": [0, 0, 0, 1, 1, 1],
},
"b1": { # width 1.0, depth 1.1
"first_conv_filter": 32,
"expands": [1, 4, 4, 4, 6, 6],
"out_channels": [16, 32, 48, 96, 112, 192],
"depthes": [2, 3, 3, 4, 6, 9],
"strides": [1, 2, 2, 2, 1, 2],
"use_ses": [0, 0, 0, 1, 1, 1],
},
"b2": { # width 1.1, depth 1.2
"first_conv_filter": 32,
"output_conv_filter": 1408,
"expands": [1, 4, 4, 4, 6, 6],
"out_channels": [16, 32, 56, 104, 120, 208],
"depthes": [2, 3, 3, 4, 6, 10],
"strides": [1, 2, 2, 2, 1, 2],
"use_ses": [0, 0, 0, 1, 1, 1],
},
"b3": { # width 1.2, depth 1.4
"first_conv_filter": 40,
"output_conv_filter": 1536,
"expands": [1, 4, 4, 4, 6, 6],
"out_channels": [16, 40, 56, 112, 136, 232],
"depthes": [2, 3, 3, 5, 7, 12],
"strides": [1, 2, 2, 2, 1, 2],
"use_ses": [0, 0, 0, 1, 1, 1],
},
"s": { # width 1.4, depth 1.8
"first_conv_filter": 24,
"output_conv_filter": 1280,
"expands": [1, 4, 4, 4, 6, 6],
"out_channels": [24, 48, 64, 128, 160, 256],
"depthes": [2, 4, 4, 6, 9, 15],
"strides": [1, 2, 2, 2, 1, 2],
"use_ses": [0, 0, 0, 1, 1, 1],
},
"m": { # width 1.6, depth 2.2
"first_conv_filter": 24,
"output_conv_filter": 1280,
"expands": [1, 4, 4, 4, 6, 6, 6],
"out_channels": [24, 48, 80, 160, 176, 304, 512],
"depthes": [3, 5, 5, 7, 14, 18, 5],
"strides": [1, 2, 2, 2, 1, 2, 1],
"use_ses": [0, 0, 0, 1, 1, 1, 1],
},
"l": { # width 2.0, depth 3.1
"first_conv_filter": 32,
"output_conv_filter": 1280,
"expands": [1, 4, 4, 4, 6, 6, 6],
"out_channels": [32, 64, 96, 192, 224, 384, 640],
"depthes": [4, 7, 7, 10, 19, 25, 7],
"strides": [1, 2, 2, 2, 1, 2, 1],
"use_ses": [0, 0, 0, 1, 1, 1, 1],
},
"xl": {
"first_conv_filter": 32,
"output_conv_filter": 1280,
"expands": [1, 4, 4, 4, 6, 6, 6],
"out_channels": [32, 64, 96, 192, 256, 512, 640],
"depthes": [4, 8, 8, 16, 24, 32, 8],
"strides": [1, 2, 2, 2, 1, 2, 1],
"use_ses": [0, 0, 0, 1, 1, 1, 1],
},
}
#ๅ ๆจช็บฟ่กจ็คบๅ
้จๅฝๆฐ๏ผๅธๆ่ฝๅคๅจ็ฑป้่ขซ่ฐ็จ
def _make_divisible(v, divisor=4, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv2d_no_bias(inputs, filters, kernel_size, strides=1, padding="VALID", name=""):
'''
่ฟไธชๅฝๆฐๅฐไผๅฏนinputs่ฟ่กConv2D๏ผไธไฝฟ็จๅ็ฝฎ๏ผไธๅท็งฏๆ ธๅๅงๅไธบCONV_KERNEL_INITIALIZER
'''
return Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + "conv")(
inputs
)
def batchnorm_with_activation(inputs, activation="swish", name=""):
"""Performs a batch normalization followed by an activation. """
bn_axis = 1 if K.image_data_format() == "channels_first" else -1
nn = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=name + "bn",
)(inputs)
if activation:
nn = Activation(activation=activation, name=name + activation)(nn)
# nn = PReLU(shared_axes=[1, 2], alpha_initializer=tf.initializers.Constant(0.25), name=name + "PReLU")(nn)
return nn
def se_module_by_conv2d(inputs, se_ratio=4, name=""):
filters = inputs.shape[-1]#ๅพ็็้้ๆฐ
reduction = filters // se_ratio
# ่ฎก็ฎaxisไธ็ๅ
็ด ๅนณๅๅผ๏ผๅ
จๅฑๅนณๅๆฑ ๅ
se = tf.reduce_mean(inputs,(1,2),keepdims=True)
#่ฟๅฟ็ไธคไธชConv2Dๅ
ถๅฎๅฐฑๆฏไธ็งๅ
จ่ฟๆฅๅฑใ
se = Conv2D(reduction, kernel_size=1, use_bias=True, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + "1_conv")(se)
se = Activation("swish")(se)
se = Conv2D(filters, kernel_size=1, use_bias=True, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + "2_conv")(se)
se = Activation("sigmoid")(se)
return Multiply()([inputs, se])
def se_module_by_dense(inputs,se_ratio=4,name=""):
filters = inputs.shape[-1]#ๅพ็็้้ๆฐ
reduction = filters // se_ratio
#่ฎก็ฎaxisไธ็ๅ
็ด ๅนณๅๅผ๏ผๅ
จๅฑๅๅผๆฑ ๅ
se = tf.math.reduce_mean(inputs,(1,2),keepdims=True)
se = tf.keras.layers.Dense(reduction,activation="swish",use_bias=True)(se)
se = tf.keras.layers.Dense(filters,activation='sigmoid',use_bias=True)(se)
return tf.keras.layers.Multiply()([inputs,se])
class SE(tf.keras.layers):
def __init__(self,inputs,se_ratio:int = 4,channel_pos=-1,name:str=""):
'''
่ฟไธชๅฝๆฐๆฏไฝฟ็จConv1x1ๅฎ็ฐ็SEๆจกๅ๏ผๅนถไฝฟ็จreduc_meanๅฎ็ฐGlobalAveragePooling
channel_pos๏ผ้้็็ปดๆฐไฝไบ็ฌฌไบ็ปดๅบฆ่ฟๆฏๆๅไธ็ปดๅบฆ
'''
super(SE,self).__init__()
self.inputs = inputs
self.se_ratio = se_ratio
self.name = name
self.ch_pos = channel_pos
self.filters = self.inputs.shape[-1]
self.reduction = self.filters // self.se_ratio
self.conv1 = Conv2D(self.reduction,1,1,use_bias=True,kernel_initializer=CONV_KERNEL_INITIALIZER,name=self.name+'1_conv')
self.act1 = Activation('swish')
self.conv2 = Conv2D(self.filters,1,1,use_bias=True,kernel_initializer=CONV_KERNEL_INITIALIZER,name=self.name+'2_conv')
self.act2 = Activation('sigmoid')
self.multiply = Multiply()
def call(self):
x = tf.reduce_mean(self.inputs,(1,2),keepdims=True)
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
out = self.multiply([x,self.inputs])
return out
def MBConv(inputs, output_channel, stride, expand_ratio, shortcut, survival=None, use_se=0, is_fused=True, name=""):
'''
ๆ นๆฎ่ฎบๆๅฎๅ
จ็ผๅ็MBConv็ปๆ
'''
input_channel = inputs.shape[-1] #่พๅ
ฅ้้ๅจๆๅไธ็ปด
if is_fused:#ๅฆๆไฝฟ็จFused-MBConv
nn = Conv2D(input_channel*expand_ratio,(3,3),strides=stride,use_bias=False,kernel_initializer=CONV_KERNEL_INITIALIZER,padding='same',name="Fused Conv3x3")(inputs)
nn = BatchNormalization(momentum=BATCH_NORM_DECAY,epsilon=BATCH_NORM_EPSILON,name="Fused Conv3x3 BN")(nn)
nn = Activation(activation='swish',name="Fused Conv3x3 Activate")(nn)
elif not is_fused:#ๅฆๆไฝฟ็จMBConv
nn = Conv2D(input_channel*expand_ratio,(1,1),padding='same',use_bias=False,kernel_initializer=CONV_KERNEL_INITIALIZER,name="1x1 expand Convolution")(inputs)
nn = BatchNormalization(momentum=BATCH_NORM_DECAY,epsilon=BATCH_NORM_EPSILON,name="1x1 expand Conv BN")(nn)
nn = Activation(activation='swish',name="1x1 expand Conv activation = swish ")(nn)
nn = DepthwiseConv2D((3,3),padding='same',strides=stride,use_bias=False,depthwise_initializer=CONV_KERNEL_INITIALIZER,name='Depwise Conv')(nn)
nn = BatchNormalization(momentum=BATCH_NORM_DECAY,epsilon=BATCH_NORM_EPSILON,name='Depthwise Conv BN')(nn)
nn = Activation(activation='swish',name='Depthwise Conv activ')(nn)
if use_se:#ๆฏๅฆไฝฟ็จSEๆจกๅ
nn = se_module_by_conv2d(nn, se_ratio=4 * expand_ratio, name=name + "se_")
nn = Conv2D(output_channel,(1,1),strides=stride,padding='same',use_bias=False,kernel_initializer=CONV_KERNEL_INITIALIZER,name="้็ปดๅท็งฏ1x1")(nn)
nn = BatchNormalization(momentum=BATCH_NORM_DECAY,epsilon=BATCH_NORM_EPSILON,name="้็ปดๅท็งฏ1x1ๅ็BN")(nn)
# pw-linear
if is_fused and expand_ratio == 1:
nn = conv2d_no_bias(nn, output_channel, (3, 3), strides=stride, padding="same", name=name + "fu_")
nn = batchnorm_with_activation(nn, name=name + "fu_")
else:
nn = conv2d_no_bias(nn, output_channel, (1, 1), strides=(1, 1), padding="same", name=name + "MB_pw_")
nn = batchnorm_with_activation(nn, activation=None, name=name + "MB_pw_")
if shortcut:#ๆฏๅฆไฝฟ็จ็ด่ฟ
if survival is not None and survival < 1:
from tensorflow_addons.layers import StochasticDepth
return StochasticDepth(float(survival))([inputs, nn])
else:
return Add()([inputs, nn])
else:
return nn
def EfficientNetV2(
model_type,#s,m,l
input_shape=(None, None, 3),
num_classes=1000,
dropout=0.2,
first_strides=2,
survivals=None,
classifier_activation="softmax",
pretrained="imagenet21k-ft1k",
model_name="EfficientNetV2",
kwargs=None, # Not used, just recieving parameter
):
blocks_config = BLOCK_CONFIGS.get(model_type.lower(), BLOCK_CONFIGS["s"])
expands = blocks_config["expands"]
out_channels = blocks_config["out_channels"]
depthes = blocks_config["depthes"]
strides = blocks_config["strides"]
use_ses = blocks_config["use_ses"]
first_conv_filter = blocks_config.get("first_conv_filter", out_channels[0])
output_conv_filter = blocks_config.get("output_conv_filter", 1280)
inputs = Input(shape=input_shape)
out_channel = _make_divisible(first_conv_filter, 8)
nn = conv2d_no_bias(inputs, out_channel, (3, 3), strides=first_strides, padding="same", name="stem_")
nn = batchnorm_with_activation(nn, name="stem_")
# StochasticDepth survival_probability values
total_layers = sum(depthes)
if isinstance(survivals, float):
survivals = [survivals] * total_layers
elif isinstance(survivals, (list, tuple)) and len(survivals) == 2:
start, end = survivals
survivals = [start - (1 - end) * float(ii) / total_layers for ii in range(total_layers)]
else:
survivals = [None] * total_layers
survivals = [survivals[int(sum(depthes[:id])) : sum(depthes[: id + 1])] for id in range(len(depthes))]
pre_out = out_channel
for id, (expand, out_channel, depth, survival, stride, se) in enumerate(zip(expands, out_channels, depthes, survivals, strides, use_ses)):
out = _make_divisible(out_channel, 8)
is_fused = True if se == 0 else False
for block_id in range(depth):
stride = stride if block_id == 0 else 1
shortcut = True if out == pre_out and stride == 1 else False
name = "stack_{}_block{}_".format(id, block_id)
nn = MBConv(nn, out, stride, expand, shortcut, survival[block_id], se, is_fused, name=name)
pre_out = out
output_conv_filter = _make_divisible(output_conv_filter, 8)
nn = conv2d_no_bias(nn, output_conv_filter, (1, 1), strides=(1, 1), padding="valid", name="post_")
nn = batchnorm_with_activation(nn, name="post_")
if num_classes > 0:
nn = GlobalAveragePooling2D(name="avg_pool")(nn)
if dropout > 0 and dropout < 1:
nn = Dropout(dropout)(nn)
nn = Dense(num_classes, activation=classifier_activation, name="predictions")(nn)
model = Model(inputs=inputs, outputs=nn, name=name)
reload_model_weights(model, model_type, pretrained)
return model
def reload_model_weights(model, model_type, pretrained="imagenet"):
pretrained_dd = {"imagenet": "imagenet", "imagenet21k": "21k", "imagenet21k-ft1k": "21k-ft1k"}
if not pretrained in pretrained_dd:
print(">>>> No pretraind available, model will be random initialized")
return
pre_url = "https://github.com/leondgarse/keras_efficientnet_v2/releases/download/v1.0.0/efficientnetv2-{}-{}.h5"
url = pre_url.format(model_type, pretrained_dd[pretrained])
file_name = os.path.basename(url)
try:
pretrained_model = keras.utils.get_file(file_name, url, cache_subdir="models/efficientnetv2")
except:
print("[Error] will not load weights, url not found or download failed:", url)
return
else:
print(">>>> Load pretraind from:", pretrained_model)
model.load_weights(pretrained_model, by_name=True, skip_mismatch=True)
def EfficientNetV2B0(input_shape=(224, 224, 3), num_classes=1000, dropout=0.2, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
return EfficientNetV2(model_type="b0", model_name="EfficientNetV2B0", **locals(), **kwargs)
def EfficientNetV2B1(input_shape=(240, 240, 3), num_classes=1000, dropout=0.2, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
return EfficientNetV2(model_type="b1", model_name="EfficientNetV2B1", **locals(), **kwargs)
def EfficientNetV2B2(input_shape=(260, 260, 3), num_classes=1000, dropout=0.3, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
return EfficientNetV2(model_type="b2", model_name="EfficientNetV2B2", **locals(), **kwargs)
def EfficientNetV2B3(input_shape=(300, 300, 3), num_classes=1000, dropout=0.3, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
return EfficientNetV2(model_type="b3", model_name="EfficientNetV2B3", **locals(), **kwargs)
def EfficientNetV2S(input_shape=(384, 384, 3), num_classes=1000, dropout=0.2, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
return EfficientNetV2(model_type="s", model_name="EfficientNetV2S", **locals(), **kwargs)
def EfficientNetV2M(input_shape=(480, 480, 3), num_classes=1000, dropout=0.3, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
return EfficientNetV2(model_type="m", model_name="EfficientNetV2M", **locals(), **kwargs)
def EfficientNetV2L(input_shape=(480, 480, 3), num_classes=1000, dropout=0.4, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
return EfficientNetV2(model_type="l", model_name="EfficientNetV2L", **locals(), **kwargs)
def EfficientNetV2XL(input_shape=(512, 512, 3), num_classes=1000, dropout=0.4, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
return EfficientNetV2(model_type="xl", model_name="EfficientNetV2XL", **locals(), **kwargs)
def get_actual_survival_probabilities(model):
from tensorflow_addons.layers import StochasticDepth
return [ii.survival_probability for ii in model.layers if isinstance(ii, StochasticDepth)]
``` |
{
"source": "JiJiJiang/LightCNN",
"score": 3
} |
#### File: JiJiJiang/LightCNN/light_cnn.py
```python
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class mfm(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1):
super(mfm, self).__init__()
self.out_channels = out_channels
if type == 1:
self.filter = nn.Conv2d(in_channels, 2*out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.filter = nn.Linear(in_channels, 2*out_channels)
def forward(self, x):
x = self.filter(x)
out = torch.split(x, self.out_channels, 1)
return torch.max(out[0], out[1])
class group(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(group, self).__init__()
self.conv_a = mfm(in_channels, in_channels, 1, 1, 0)
self.conv = mfm(in_channels, out_channels, kernel_size, stride, padding)
def forward(self, x):
x = self.conv_a(x)
x = self.conv(x)
return x
class resblock(nn.Module):
def __init__(self, in_channels, out_channels):
super(resblock, self).__init__()
self.conv1 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x):
res = x
out = self.conv1(x)
out = self.conv2(out)
out = out + res
return out
class network_9layers(nn.Module):
def __init__(self, num_classes=79077):
super(network_9layers, self).__init__()
self.features = nn.Sequential(
mfm(1, 48, 5, 1, 2),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(48, 96, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(96, 192, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(192, 128, 3, 1, 1),
group(128, 128, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
)
self.fc1 = mfm(8*8*128, 256, type=0)
self.fc2 = nn.Linear(256, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = F.dropout(x, training=self.training)
out = self.fc2(x)
return out, x
class network_29layers(nn.Module):
def __init__(self, block, layers, num_classes=79077):
super(network_29layers, self).__init__()
self.conv1 = mfm(1, 48, 5, 1, 2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.group1 = group(48, 96, 3, 1, 1)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.group2 = group(96, 192, 3, 1, 1)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.group3 = group(192, 128, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 128, 128)
self.group4 = group(128, 128, 3, 1, 1)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.fc = mfm(8*8*128, 256, type=0)
self.fc2 = nn.Linear(256, num_classes)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.block1(x)
x = self.group1(x)
x = self.pool2(x)
x = self.block2(x)
x = self.group2(x)
x = self.pool3(x)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = self.pool4(x)
x = x.view(x.size(0), -1)
fc = self.fc(x)
fc = F.dropout(fc, training=self.training)
out = self.fc2(fc)
return out, fc
class network_29layers_v2(nn.Module):
def __init__(self, block, layers, num_classes=79077):
super(network_29layers_v2, self).__init__()
self.conv1 = mfm(1, 48, 5, 1, 2)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.group1 = group(48, 96, 3, 1, 1)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.group2 = group(96, 192, 3, 1, 1)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.group3 = group(192, 128, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 128, 128)
self.group4 = group(128, 128, 3, 1, 1)
self.fc = nn.Linear(8*8*128, 256)
self.fc2 = nn.Linear(256, num_classes, bias=False)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block1(x)
x = self.group1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block2(x)
x = self.group2(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = x.view(x.size(0), -1)
fc = self.fc(x)
x = F.dropout(fc, training=self.training)
out = self.fc2(x)
return out, fc
def LightCNN_9Layers(**kwargs):
model = network_9layers(**kwargs)
return model
def LightCNN_29Layers(**kwargs):
model = network_29layers(resblock, [1, 2, 3, 4], **kwargs)
return model
def LightCNN_29Layers_v2(**kwargs):
model = network_29layers_v2(resblock, [1, 2, 3, 4], **kwargs)
return model
``` |
{
"source": "jijinggang/fastapi-admin",
"score": 2
} |
#### File: fastapi-admin/fastapi_admin/__init__.py
```python
import re
from . import routes
def version():
with open("pyproject.toml") as f:
ret = re.findall('version = "(\d+\.\d+\.\d+)"', f.read())
return ret[0]
```
#### File: fastapi-admin/fastapi_admin/models.py
```python
from tortoise import Model, fields
from fastapi_admin import enums
class User(Model):
username = fields.CharField(max_length=20, unique=True)
password = fields.CharField(max_length=200, description="Will auto hash with raw password")
class Meta:
abstract = True
class Permission(Model):
label = fields.CharField(max_length=50)
model = fields.CharField(max_length=50)
action: enums.PermissionAction = fields.IntEnumField(
enums.PermissionAction, default=enums.PermissionAction.read
)
def __str__(self):
return self.label
class Role(Model):
label = fields.CharField(max_length=50)
users = fields.ManyToManyField("models.User")
permissions: fields.ManyToManyRelation[Permission] = fields.ManyToManyField("models.Permission")
def __str__(self):
return self.label
``` |
{
"source": "JiJingYu/Python-Algorithm",
"score": 3
} |
#### File: JiJingYu/Python-Algorithm/a1072.py
```python
from collections import defaultdict as dd
G = dd(lambda: dd(lambda: 0))
vis = dd(lambda: False)
def dijkstra(s):
d = dd(lambda: 10**9)
d[s] = 0
vis = dd(lambda: False)
for i in range(len(G)):
u = min([v for v in G if not vis[v]], key=lambda v: d[v], default = -1)
vis[u]=True
if u==-1:return
for v in G[u]:
if not vis[v]:
if d[u]+G[u][v]<d[v]:
d[v]=d[u]+G[u][v]
return d
tempPath, path = [], []
def dfs(d):
global gas_list
for u in d:
if d[u]>Ds and u not in gas_list:
return False
return True
N,M,K,Ds = [int(x) for x in input().split()]
for i in range(K):
p1, p2, dist = input().split()
dist = int(dist)
G[p1][p2]=G[p2][p1]=dist
gas_list = [s for s in G if s[0]=='G']
gas_valid = []
mean_dist = Ds / N
min_dist = 0
min_ind = M+1
for gas in gas_list:
d = dijkstra(gas)
d = {u:d[u] for u in d if u not in gas_list}
if dfs(d):
tmp = min(d.values()), sum(d.values())/len(d)
if tmp[1]<mean_dist or int(gas[-1])<min_ind:
min_ind=int(gas[-1])
min_dist = tmp[0]
mean_dist = tmp[1]
if min_ind == M+1:
print("No Solution")
exit(0)
print('G{}'.format(min_ind))
print('{:.1f}'.format(min_dist), '{:.1f}'.format(mean_dist))
"""
4 3 11 5
1 2 2
1 4 2
1 G1 4
1 G2 3
2 3 2
2 G2 1
3 4 2
3 G3 2
4 G1 3
G2 G1 1
G3 G2 2
"""
```
#### File: Python-Algorithm/DP/a1040.py
```python
def manacher(s):
s='#'+"#".join(s)+"#"
max_right = 0
pos = 0
max_len = 1
RL = [1]*len(s)
for i in range(len(s)):
if i<max_right:
RL[i]=min(RL[2*pos-i], max_right-i)
while i-RL[i]>=0 and i+RL[i]<len(s) and s[i-RL[i]]==s[i+RL[i]]:
RL[i]+=1
if i+RL[i]-1>max_right:
max_right,pos = i+RL[i]-1, i
max_len = max(max_len, RL[i])
return max_len-1
print()
```
#### File: Python-Algorithm/DP/a1098.py
```python
import bisect
def insert_(nums, i):
# i start from 1
s, s_no = nums[:i], nums[i+1:]
bisect.insort_left(s, nums[i])
return s+s_no
n=int(input())
nums = [int(x) for x in input().split()]
tag = [int(x) for x in input().split()]
tmp = nums[:]
for i in range(1, n-1):
tmp = insert_(tmp, i)
if tmp==tag:
print("Insertion Sort")
tmp = insert_(tmp, i+1)
print(" ".join(map(str, tmp)))
exit(0)
print("Heap Sort")
```
#### File: Python-Algorithm/graph/a1132.py
```python
def foo(num):
n = len(num)//2
n1,n2,p = int(num[:n]), int(num[n:]), int(num)
if not n1 or not n2:
print("No")
return
if p % (n1*n2)==0:
print("Yes")
else:
print("No")
N = int(input())
res = []
for i in range(N):
res.append(input().split()[0])
[foo(s) for s in res]
"""
4
167334
10
12345678
123456789123456789
"""
```
#### File: Python-Algorithm/graph/graph_a1003.py
```python
from collections import defaultdict as dd
G = dd(lambda :dd(lambda :0))
vis = dd(lambda :False)
def load_data():
N,M,C1,C2 = [int(x) for x in input().split()]
weight = [int(x) for x in input().split()]
for i in range(M):
c1, c2, L = [int(x) for x in input().split()]
G[c1][c2] = G[c2][c1] = L
return C1, C2, weight
def dijkstra(s, weight):
d = dd(lambda :10**9)
d[s] = 0
nums = dd(lambda :0)
nums[s] = 1
w = dd(lambda :0)
w[s] = weight[s]
for i in range(len(G)):
u = min([v for v in G if not vis[v]], key=lambda v: d[v])
vis[u] = True
for v in G[u]:
if not vis[v]:
if d[u] + G[u][v] < d[v]:
d[v] = d[u] + G[u][v]
w[v] = w[u] + weight[v]
nums[v] = nums[u]
elif d[u] + G[u][v] == d[v]:
nums[v] += nums[u]
if w[u] + weight[v]>w[v]:
w[v] = w[u] + weight[v]
return nums, w
C1, C2, weight = load_data()
nums, w = dijkstra(C1, weight)
print(nums[C2], w[C2])
```
#### File: Python-Algorithm/graph/graph_a1030.py
```python
from collections import defaultdict as dd
G = dd(lambda :dd(lambda : 0))
cost = dd(lambda :dd(lambda : 0))
vis = dd(lambda :False)
def dijkstra(s):
d = dd(lambda :10**9)
d[s]=0
pre = dd(lambda :[])
for i in range(len(G)):
u = min([v for v in G if not vis[v]], key=lambda v:d[v], default=-1)
if u==-1:return
vis[u]=True
for v in G[u]:
if not vis[v]:
if d[u] + G[u][v] < d[v]:
d[v] = d[u] + G[u][v]
pre[v].clear()
pre[v].append(u)
elif d[u] + G[u][v] == d[v]:
pre[v].append(u)
return pre
tempPath, path = [], []
optvalue = 10** 9
def dfs(s, v):
global pre, tempPath, path, optvalue
if v == s:
tempPath.append(v)
value = sum([cost[tempPath[i]][tempPath[i-1]] for i in range(len(tempPath)-1, 0, -1)])
if value < optvalue:
path = tempPath[:]
optvalue = value
tempPath.pop()
return
tempPath.append(v)
for u in pre[v]:
dfs(s, u)
tempPath.pop()
N,M,S,D = [int(x) for x in input().split()]
for i in range(M):
c1, c2, dst, cos = [int(x) for x in input().split()]
G[c1][c2]=G[c2][c1] = dst
cost[c1][c2] = cost[c2][c1] = cos
pre = dijkstra(S)
dfs(S, D)
total_dist = sum(G[path[i]][path[i-1]] for i in range(len(path)-1, 0, -1))
total_cost = optvalue
print(' '.join(map(str, reversed(path))), total_dist, total_cost)
# path, total distance, total cost
```
#### File: Python-Algorithm/graph/graph_dfs_002.py
```python
from collections import defaultdict as dd
G = dd(lambda: dd(lambda: 0))
W = dd(lambda: 0)
vis = dd(lambda: False)
vis_e = dd(lambda: dd(lambda: False))
def load_data():
N, k = [int(x) for x in input().split()]
for i in range(N):
p1, p2, w = [x for x in input().split()]
w = int(w)
G[p1][p2] += w
G[p2][p1] += w
W[p1] += w
W[p2] += w
return k
def dfs(u, graph_weight, paths):
vis[u] = True
paths[-1].append(u)
for v in G[u]:
if not vis_e[v][u]:
vis_e[u][v] = True
vis_e[v][u] = True
graph_weight[-1] += G[u][v]
for v in G[u]:
if not vis[v]:
dfs(v, graph_weight, paths)
def dfs_trave(graph_weight, paths):
for u in G:
if not vis[u]:
graph_weight.append(0)
paths.append([])
dfs(u, graph_weight, paths)
k = load_data()
graph_weight = []
paths = []
dfs_trave(graph_weight, paths)
res = []
for i in range(len(paths)):
if graph_weight[i] > k and len(paths[i]) > 2:
leader = max(paths[i], key=lambda d: W[d])
res.append((leader, len(paths[i])))
res = sorted(res, key=lambda d: d[0])
print(len(res))
for it in res:
print(it[0], it[1])
```
#### File: JiJingYu/Python-Algorithm/longestWord.py
```python
import collections
class Solution:
# BFS ๅนฟๅบฆไผๅ
ๆ็ดข
def solve(self, board):
# ๆๅปบ้ๅ
queue = collections.deque([])
# ๆ็ดขๆๅคๅด็ โOโ๏ผๆทปๅ ๅฐ้ๅไธญ
for r in range(len(board)):
for c in range(len(board[0])):
if (r in [0, len(board)-1] or c in [0, len(board[0])-1]) and board[r][c] == "O":
queue.append((r, c))
while queue:
r, c = queue.popleft()
# ้ๅฑๅ้ๆ็ดข๏ผๅฆๆไธ โOโ่ฟ้๏ผๅๆ ่ฎฐไธบโDโ
if 0<=r<len(board) and 0<=c<len(board[0]) and board[r][c] == "O":
board[r][c] = "D"
queue.append((r-1, c)); queue.append((r+1, c))
queue.append((r, c-1)); queue.append((r, c+1))
# ๅทฒๆ ่ฎฐไธบD็๏ผๅๆขไธบO๏ผๅ
ถไฝ็ๅๆขไธบX
for r in range(len(board)):
for c in range(len(board[0])):
if board[r][c] == "O":
board[r][c] = "X"
elif board[r][c] == "D":
board[r][c] = "O"
return board
class Solution2:
# BFS
def solve(self, board):
queue = collections.deque([])
# height, weight = len(board), len(board[0])
# ๆๆๅคๅฑ็โOโๆทปๅ ๅฐ้ๅไธญ
for i in range(len(board)):
for j in range(len(board[0])):
if (i in [0, len(board) - 1] or j in [0, len(board[0]) - 1]) and board[i][j] == 'O':
queue.append([i, j])
print(queue)
# BFS
while queue:
i, j = queue.popleft()
if 0 <= i < len(board) and 0 <= j < len(board[0]) and board[i][j] == 'O':
board[i][j] = 'D'
queue.append([i + 1, j])
queue.append([i, j + 1])
queue.append([i - 1, j])
queue.append([i, j - 1])
# ๆ ่ฏ
#"""
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 'O':
board[i][j] = 'X'
elif board[i][j] == 'D':
board[i][j] = 'O'
#"""
def foo():
solution = Solution2()
board = [['X', 'X', 'X', 'X'],
['X', 'O', 'O', 'X'],
['X', 'X', 'O', 'X'],
['X', 'O', 'X', 'X']]
print(solution.solve(board))
if __name__ == '__main__':
foo()
```
#### File: JiJingYu/Python-Algorithm/pat.py
```python
from collections import defaultdict, Counter
class BCG():
def __init__(self):
self.father = {}
def findFather(self, x):
tmp = x
while self.father[x] != x:
x = self.father[x]
while tmp != self.father[tmp]:
tmp, self.father[tmp] = self.father[tmp], x
return x
def Union(self, a, b):
fA = self.findFather(a)
fB = self.findFather(b)
if fA != fB:
self.father[fA] = fB
def foo():
N = int(input())
fat = BCG()
people = [0 for i in range(N)]
for i in range(N):
tmp = [int(x) for x in input().split()[1:]]
people[i] = tmp[0]
for t in tmp:
if t not in fat.father:
fat.father[t] = t
for j in range(1, len(tmp)):
fat.Union(tmp[j], tmp[j - 1])
count = Counter([fat.findFather(p) for p in people])
ret = sorted(count.values(), reverse=True)
print(len(ret))
print(' '.join(map(str, ret)))
foo()
``` |
{
"source": "JiJingYu/tensorflow-exercise",
"score": 3
} |
#### File: tensorflow-exercise/HSI_evaluate/hsi_evaluate.py
```python
import numpy as np
from numpy.linalg import norm
from skimage.measure import compare_psnr, compare_ssim, compare_mse
def mpsnr(x_true, x_pred):
"""
:param x_true: ้ซๅ
่ฐฑๅพๅ๏ผๆ ผๅผ๏ผ(H, W, C)
:param x_pred: ้ซๅ
่ฐฑๅพๅ๏ผๆ ผๅผ๏ผ(H, W, C)
:return: ่ฎก็ฎๅๅง้ซๅ
่ฐฑๆฐๆฎไธ้ๆ้ซๅ
่ฐฑๆฐๆฎ็ๅๆน่ฏฏๅทฎ
References
----------
.. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
"""
n_bands = x_true.shape[2]
p = [compare_psnr(x_true[:, :, k], x_pred[:, :, k], dynamic_range=np.max(x_true[:, :, k])) for k in range(n_bands)]
return np.mean(p)
def sam(x_true, x_pred):
"""
:param x_true: ้ซๅ
่ฐฑๅพๅ๏ผๆ ผๅผ๏ผ(H, W, C)
:param x_pred: ้ซๅ
่ฐฑๅพๅ๏ผๆ ผๅผ๏ผ(H, W, C)
:return: ่ฎก็ฎๅๅง้ซๅ
่ฐฑๆฐๆฎไธ้ๆ้ซๅ
่ฐฑๆฐๆฎ็ๅ
่ฐฑ่ง็ธไผผๅบฆ
"""
assert x_true.ndim ==3 and x_true.shape == x_pred.shape
sam_rad = np.zeros(x_pred.shape[0, 1])
for x in range(x_true.shape[0]):
for y in range(x_true.shape[1]):
tmp_pred = x_pred[x, y].ravel()
tmp_true = x_true[x, y].ravel()
sam_rad[x, y] = np.arccos(tmp_pred / (norm(tmp_pred) * tmp_true / norm(tmp_true)))
sam_deg = sam_rad.mean() * 180 / np.pi
return sam_deg
def mssim(x_true,x_pred):
"""
:param x_true: ้ซๅ
่ฐฑๅพๅ๏ผๆ ผๅผ๏ผ(H, W, C)
:param x_pred: ้ซๅ
่ฐฑๅพๅ๏ผๆ ผๅผ๏ผ(H, W, C)
:return: ่ฎก็ฎๅๅง้ซๅ
่ฐฑๆฐๆฎไธ้ๆ้ซๅ
่ฐฑๆฐๆฎ็็ปๆ็ธไผผๅบฆ
"""
SSIM = compare_ssim(X=x_true, Y=x_pred, multichannel=True)
return SSIM
``` |
{
"source": "jiji-online/neptune-cli",
"score": 2
} |
#### File: swagger_client/models/cli_usage_event.py
```python
from pprint import pformat
from six import iteritems
import re
class CliUsageEvent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, cli_version=None, command_name=None, correct_usage=None, full_command=None, local_config=None, locale=None, os=None, python_version=None):
"""
CliUsageEvent - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'cli_version': 'str',
'command_name': 'str',
'correct_usage': 'bool',
'full_command': 'str',
'local_config': 'bool',
'locale': 'str',
'os': 'str',
'python_version': 'str'
}
self.attribute_map = {
'cli_version': 'cliVersion',
'command_name': 'commandName',
'correct_usage': 'correctUsage',
'full_command': 'fullCommand',
'local_config': 'localConfig',
'locale': 'locale',
'os': 'os',
'python_version': 'pythonVersion'
}
self._cli_version = cli_version
self._command_name = command_name
self._correct_usage = correct_usage
self._full_command = full_command
self._local_config = local_config
self._locale = locale
self._os = os
self._python_version = python_version
@property
def cli_version(self):
"""
Gets the cli_version of this CliUsageEvent.
:return: The cli_version of this CliUsageEvent.
:rtype: str
"""
return self._cli_version
@cli_version.setter
def cli_version(self, cli_version):
"""
Sets the cli_version of this CliUsageEvent.
:param cli_version: The cli_version of this CliUsageEvent.
:type: str
"""
self._cli_version = cli_version
@property
def command_name(self):
"""
Gets the command_name of this CliUsageEvent.
:return: The command_name of this CliUsageEvent.
:rtype: str
"""
return self._command_name
@command_name.setter
def command_name(self, command_name):
"""
Sets the command_name of this CliUsageEvent.
:param command_name: The command_name of this CliUsageEvent.
:type: str
"""
self._command_name = command_name
@property
def correct_usage(self):
"""
Gets the correct_usage of this CliUsageEvent.
:return: The correct_usage of this CliUsageEvent.
:rtype: bool
"""
return self._correct_usage
@correct_usage.setter
def correct_usage(self, correct_usage):
"""
Sets the correct_usage of this CliUsageEvent.
:param correct_usage: The correct_usage of this CliUsageEvent.
:type: bool
"""
self._correct_usage = correct_usage
@property
def full_command(self):
"""
Gets the full_command of this CliUsageEvent.
:return: The full_command of this CliUsageEvent.
:rtype: str
"""
return self._full_command
@full_command.setter
def full_command(self, full_command):
"""
Sets the full_command of this CliUsageEvent.
:param full_command: The full_command of this CliUsageEvent.
:type: str
"""
self._full_command = full_command
@property
def local_config(self):
"""
Gets the local_config of this CliUsageEvent.
:return: The local_config of this CliUsageEvent.
:rtype: bool
"""
return self._local_config
@local_config.setter
def local_config(self, local_config):
"""
Sets the local_config of this CliUsageEvent.
:param local_config: The local_config of this CliUsageEvent.
:type: bool
"""
self._local_config = local_config
@property
def locale(self):
"""
Gets the locale of this CliUsageEvent.
:return: The locale of this CliUsageEvent.
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""
Sets the locale of this CliUsageEvent.
:param locale: The locale of this CliUsageEvent.
:type: str
"""
self._locale = locale
@property
def os(self):
"""
Gets the os of this CliUsageEvent.
:return: The os of this CliUsageEvent.
:rtype: str
"""
return self._os
@os.setter
def os(self, os):
"""
Sets the os of this CliUsageEvent.
:param os: The os of this CliUsageEvent.
:type: str
"""
self._os = os
@property
def python_version(self):
"""
Gets the python_version of this CliUsageEvent.
:return: The python_version of this CliUsageEvent.
:rtype: str
"""
return self._python_version
@python_version.setter
def python_version(self, python_version):
"""
Sets the python_version of this CliUsageEvent.
:param python_version: The python_version of this CliUsageEvent.
:type: str
"""
self._python_version = python_version
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: swagger_client/models/modify_leaderboard_event.py
```python
from pprint import pformat
from six import iteritems
import re
class ModifyLeaderboardEvent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, aliases=None, columns=None, grid_search_view=None, numeric_channels=None, operation=None, parameters=None, properties=None, sorted_by=None, system_columns=None, text_channels=None):
"""
ModifyLeaderboardEvent - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'aliases': 'int',
'columns': 'int',
'grid_search_view': 'bool',
'numeric_channels': 'int',
'operation': 'str',
'parameters': 'int',
'properties': 'int',
'sorted_by': 'str',
'system_columns': 'int',
'text_channels': 'int'
}
self.attribute_map = {
'aliases': 'aliases',
'columns': 'columns',
'grid_search_view': 'gridSearchView',
'numeric_channels': 'numericChannels',
'operation': 'operation',
'parameters': 'parameters',
'properties': 'properties',
'sorted_by': 'sortedBy',
'system_columns': 'systemColumns',
'text_channels': 'textChannels'
}
self._aliases = aliases
self._columns = columns
self._grid_search_view = grid_search_view
self._numeric_channels = numeric_channels
self._operation = operation
self._parameters = parameters
self._properties = properties
self._sorted_by = sorted_by
self._system_columns = system_columns
self._text_channels = text_channels
@property
def aliases(self):
"""
Gets the aliases of this ModifyLeaderboardEvent.
:return: The aliases of this ModifyLeaderboardEvent.
:rtype: int
"""
return self._aliases
@aliases.setter
def aliases(self, aliases):
"""
Sets the aliases of this ModifyLeaderboardEvent.
:param aliases: The aliases of this ModifyLeaderboardEvent.
:type: int
"""
self._aliases = aliases
@property
def columns(self):
"""
Gets the columns of this ModifyLeaderboardEvent.
:return: The columns of this ModifyLeaderboardEvent.
:rtype: int
"""
return self._columns
@columns.setter
def columns(self, columns):
"""
Sets the columns of this ModifyLeaderboardEvent.
:param columns: The columns of this ModifyLeaderboardEvent.
:type: int
"""
self._columns = columns
@property
def grid_search_view(self):
"""
Gets the grid_search_view of this ModifyLeaderboardEvent.
:return: The grid_search_view of this ModifyLeaderboardEvent.
:rtype: bool
"""
return self._grid_search_view
@grid_search_view.setter
def grid_search_view(self, grid_search_view):
"""
Sets the grid_search_view of this ModifyLeaderboardEvent.
:param grid_search_view: The grid_search_view of this ModifyLeaderboardEvent.
:type: bool
"""
self._grid_search_view = grid_search_view
@property
def numeric_channels(self):
"""
Gets the numeric_channels of this ModifyLeaderboardEvent.
:return: The numeric_channels of this ModifyLeaderboardEvent.
:rtype: int
"""
return self._numeric_channels
@numeric_channels.setter
def numeric_channels(self, numeric_channels):
"""
Sets the numeric_channels of this ModifyLeaderboardEvent.
:param numeric_channels: The numeric_channels of this ModifyLeaderboardEvent.
:type: int
"""
self._numeric_channels = numeric_channels
@property
def operation(self):
"""
Gets the operation of this ModifyLeaderboardEvent.
:return: The operation of this ModifyLeaderboardEvent.
:rtype: str
"""
return self._operation
@operation.setter
def operation(self, operation):
"""
Sets the operation of this ModifyLeaderboardEvent.
:param operation: The operation of this ModifyLeaderboardEvent.
:type: str
"""
allowed_values = ["MOVE_COLUMN", "REMOVE_COLUMN", "ADD_COLUMN", "SORT_COLUMN"]
if operation not in allowed_values:
raise ValueError(
"Invalid value for `operation`, must be one of {0}"
.format(allowed_values)
)
self._operation = operation
@property
def parameters(self):
"""
Gets the parameters of this ModifyLeaderboardEvent.
:return: The parameters of this ModifyLeaderboardEvent.
:rtype: int
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""
Sets the parameters of this ModifyLeaderboardEvent.
:param parameters: The parameters of this ModifyLeaderboardEvent.
:type: int
"""
self._parameters = parameters
@property
def properties(self):
"""
Gets the properties of this ModifyLeaderboardEvent.
:return: The properties of this ModifyLeaderboardEvent.
:rtype: int
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of this ModifyLeaderboardEvent.
:param properties: The properties of this ModifyLeaderboardEvent.
:type: int
"""
self._properties = properties
@property
def sorted_by(self):
"""
Gets the sorted_by of this ModifyLeaderboardEvent.
:return: The sorted_by of this ModifyLeaderboardEvent.
:rtype: str
"""
return self._sorted_by
@sorted_by.setter
def sorted_by(self, sorted_by):
"""
Sets the sorted_by of this ModifyLeaderboardEvent.
:param sorted_by: The sorted_by of this ModifyLeaderboardEvent.
:type: str
"""
self._sorted_by = sorted_by
@property
def system_columns(self):
"""
Gets the system_columns of this ModifyLeaderboardEvent.
:return: The system_columns of this ModifyLeaderboardEvent.
:rtype: int
"""
return self._system_columns
@system_columns.setter
def system_columns(self, system_columns):
"""
Sets the system_columns of this ModifyLeaderboardEvent.
:param system_columns: The system_columns of this ModifyLeaderboardEvent.
:type: int
"""
self._system_columns = system_columns
@property
def text_channels(self):
"""
Gets the text_channels of this ModifyLeaderboardEvent.
:return: The text_channels of this ModifyLeaderboardEvent.
:rtype: int
"""
return self._text_channels
@text_channels.setter
def text_channels(self, text_channels):
"""
Sets the text_channels of this ModifyLeaderboardEvent.
:param text_channels: The text_channels of this ModifyLeaderboardEvent.
:type: int
"""
self._text_channels = text_channels
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: swagger_client/models/action_invocation.py
```python
from pprint import pformat
from six import iteritems
import re
class ActionInvocation(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, action_invocation_state=None, action_id=None, finished=None, result=None, action_invocation_id=None, action_name=None, started=None, argument=None):
"""
ActionInvocation - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'action_invocation_state': 'ActionEventType',
'action_id': 'str',
'finished': 'datetime',
'result': 'str',
'action_invocation_id': 'str',
'action_name': 'str',
'started': 'datetime',
'argument': 'str'
}
self.attribute_map = {
'action_invocation_state': 'actionInvocationState',
'action_id': 'actionId',
'finished': 'finished',
'result': 'result',
'action_invocation_id': 'actionInvocationId',
'action_name': 'actionName',
'started': 'started',
'argument': 'argument'
}
self._action_invocation_state = action_invocation_state
self._action_id = action_id
self._finished = finished
self._result = result
self._action_invocation_id = action_invocation_id
self._action_name = action_name
self._started = started
self._argument = argument
@property
def action_invocation_state(self):
"""
Gets the action_invocation_state of this ActionInvocation.
:return: The action_invocation_state of this ActionInvocation.
:rtype: ActionEventType
"""
return self._action_invocation_state
@action_invocation_state.setter
def action_invocation_state(self, action_invocation_state):
"""
Sets the action_invocation_state of this ActionInvocation.
:param action_invocation_state: The action_invocation_state of this ActionInvocation.
:type: ActionEventType
"""
self._action_invocation_state = action_invocation_state
@property
def action_id(self):
"""
Gets the action_id of this ActionInvocation.
:return: The action_id of this ActionInvocation.
:rtype: str
"""
return self._action_id
@action_id.setter
def action_id(self, action_id):
"""
Sets the action_id of this ActionInvocation.
:param action_id: The action_id of this ActionInvocation.
:type: str
"""
self._action_id = action_id
@property
def finished(self):
"""
Gets the finished of this ActionInvocation.
:return: The finished of this ActionInvocation.
:rtype: datetime
"""
return self._finished
@finished.setter
def finished(self, finished):
"""
Sets the finished of this ActionInvocation.
:param finished: The finished of this ActionInvocation.
:type: datetime
"""
self._finished = finished
@property
def result(self):
"""
Gets the result of this ActionInvocation.
:return: The result of this ActionInvocation.
:rtype: str
"""
return self._result
@result.setter
def result(self, result):
"""
Sets the result of this ActionInvocation.
:param result: The result of this ActionInvocation.
:type: str
"""
self._result = result
@property
def action_invocation_id(self):
"""
Gets the action_invocation_id of this ActionInvocation.
:return: The action_invocation_id of this ActionInvocation.
:rtype: str
"""
return self._action_invocation_id
@action_invocation_id.setter
def action_invocation_id(self, action_invocation_id):
"""
Sets the action_invocation_id of this ActionInvocation.
:param action_invocation_id: The action_invocation_id of this ActionInvocation.
:type: str
"""
self._action_invocation_id = action_invocation_id
@property
def action_name(self):
"""
Gets the action_name of this ActionInvocation.
:return: The action_name of this ActionInvocation.
:rtype: str
"""
return self._action_name
@action_name.setter
def action_name(self, action_name):
"""
Sets the action_name of this ActionInvocation.
:param action_name: The action_name of this ActionInvocation.
:type: str
"""
self._action_name = action_name
@property
def started(self):
"""
Gets the started of this ActionInvocation.
:return: The started of this ActionInvocation.
:rtype: datetime
"""
return self._started
@started.setter
def started(self, started):
"""
Sets the started of this ActionInvocation.
:param started: The started of this ActionInvocation.
:type: datetime
"""
self._started = started
@property
def argument(self):
"""
Gets the argument of this ActionInvocation.
:return: The argument of this ActionInvocation.
:rtype: str
"""
return self._argument
@argument.setter
def argument(self, argument):
"""
Sets the argument of this ActionInvocation.
:param argument: The argument of this ActionInvocation.
:type: str
"""
self._argument = argument
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: swagger_client/models/experiment.py
```python
from pprint import pformat
from six import iteritems
import re
class Experiment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, channels_last_values=None, storage_size=None, channels=None, code_access=None, name=None, project_name=None, hostname=None, trashed=None, state=None, description=None, tags=None, time_of_completion=None, channels_size=None, time_of_creation=None, third_party_data=None, project_id=None, organization_name=None, is_code_accessible=None, group_id=None, running_time=None, id=None, properties=None, short_id=None, time_of_entered_running_state=None, worker_type=None, environment=None, responding=None, actions=None, organization_id=None, owner=None, state_transitions=None, notebook_data=None, parameters=None):
"""
Experiment - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'channels_last_values': 'list[ChannelWithValue]',
'storage_size': 'int',
'channels': 'list[Channel]',
'code_access': 'ExperimentCodeAccess',
'name': 'str',
'project_name': 'str',
'hostname': 'str',
'trashed': 'bool',
'state': 'ExperimentState',
'description': 'str',
'tags': 'list[str]',
'time_of_completion': 'datetime',
'channels_size': 'int',
'time_of_creation': 'datetime',
'third_party_data': 'ThirdPartyData',
'project_id': 'str',
'organization_name': 'str',
'is_code_accessible': 'bool',
'group_id': 'str',
'running_time': 'int',
'id': 'str',
'properties': 'list[KeyValueProperty]',
'short_id': 'str',
'time_of_entered_running_state': 'datetime',
'worker_type': 'str',
'environment': 'str',
'responding': 'bool',
'actions': 'list[Action]',
'organization_id': 'str',
'owner': 'str',
'state_transitions': 'StateTransitions',
'notebook_data': 'NotebookData',
'parameters': 'list[Parameter]'
}
self.attribute_map = {
'channels_last_values': 'channelsLastValues',
'storage_size': 'storageSize',
'channels': 'channels',
'code_access': 'codeAccess',
'name': 'name',
'project_name': 'projectName',
'hostname': 'hostname',
'trashed': 'trashed',
'state': 'state',
'description': 'description',
'tags': 'tags',
'time_of_completion': 'timeOfCompletion',
'channels_size': 'channelsSize',
'time_of_creation': 'timeOfCreation',
'third_party_data': 'thirdPartyData',
'project_id': 'projectId',
'organization_name': 'organizationName',
'is_code_accessible': 'isCodeAccessible',
'group_id': 'groupId',
'running_time': 'runningTime',
'id': 'id',
'properties': 'properties',
'short_id': 'shortId',
'time_of_entered_running_state': 'timeOfEnteredRunningState',
'worker_type': 'workerType',
'environment': 'environment',
'responding': 'responding',
'actions': 'actions',
'organization_id': 'organizationId',
'owner': 'owner',
'state_transitions': 'stateTransitions',
'notebook_data': 'notebookData',
'parameters': 'parameters'
}
self._channels_last_values = channels_last_values
self._storage_size = storage_size
self._channels = channels
self._code_access = code_access
self._name = name
self._project_name = project_name
self._hostname = hostname
self._trashed = trashed
self._state = state
self._description = description
self._tags = tags
self._time_of_completion = time_of_completion
self._channels_size = channels_size
self._time_of_creation = time_of_creation
self._third_party_data = third_party_data
self._project_id = project_id
self._organization_name = organization_name
self._is_code_accessible = is_code_accessible
self._group_id = group_id
self._running_time = running_time
self._id = id
self._properties = properties
self._short_id = short_id
self._time_of_entered_running_state = time_of_entered_running_state
self._worker_type = worker_type
self._environment = environment
self._responding = responding
self._actions = actions
self._organization_id = organization_id
self._owner = owner
self._state_transitions = state_transitions
self._notebook_data = notebook_data
self._parameters = parameters
@property
def channels_last_values(self):
"""
Gets the channels_last_values of this Experiment.
:return: The channels_last_values of this Experiment.
:rtype: list[ChannelWithValue]
"""
return self._channels_last_values
@channels_last_values.setter
def channels_last_values(self, channels_last_values):
"""
Sets the channels_last_values of this Experiment.
:param channels_last_values: The channels_last_values of this Experiment.
:type: list[ChannelWithValue]
"""
self._channels_last_values = channels_last_values
@property
def storage_size(self):
"""
Gets the storage_size of this Experiment.
:return: The storage_size of this Experiment.
:rtype: int
"""
return self._storage_size
@storage_size.setter
def storage_size(self, storage_size):
"""
Sets the storage_size of this Experiment.
:param storage_size: The storage_size of this Experiment.
:type: int
"""
self._storage_size = storage_size
@property
def channels(self):
"""
Gets the channels of this Experiment.
:return: The channels of this Experiment.
:rtype: list[Channel]
"""
return self._channels
@channels.setter
def channels(self, channels):
"""
Sets the channels of this Experiment.
:param channels: The channels of this Experiment.
:type: list[Channel]
"""
self._channels = channels
@property
def code_access(self):
"""
Gets the code_access of this Experiment.
:return: The code_access of this Experiment.
:rtype: ExperimentCodeAccess
"""
return self._code_access
@code_access.setter
def code_access(self, code_access):
"""
Sets the code_access of this Experiment.
:param code_access: The code_access of this Experiment.
:type: ExperimentCodeAccess
"""
self._code_access = code_access
@property
def name(self):
"""
Gets the name of this Experiment.
:return: The name of this Experiment.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Experiment.
:param name: The name of this Experiment.
:type: str
"""
self._name = name
@property
def project_name(self):
"""
Gets the project_name of this Experiment.
:return: The project_name of this Experiment.
:rtype: str
"""
return self._project_name
@project_name.setter
def project_name(self, project_name):
"""
Sets the project_name of this Experiment.
:param project_name: The project_name of this Experiment.
:type: str
"""
self._project_name = project_name
@property
def hostname(self):
"""
Gets the hostname of this Experiment.
:return: The hostname of this Experiment.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this Experiment.
:param hostname: The hostname of this Experiment.
:type: str
"""
self._hostname = hostname
@property
def trashed(self):
"""
Gets the trashed of this Experiment.
:return: The trashed of this Experiment.
:rtype: bool
"""
return self._trashed
@trashed.setter
def trashed(self, trashed):
"""
Sets the trashed of this Experiment.
:param trashed: The trashed of this Experiment.
:type: bool
"""
self._trashed = trashed
@property
def state(self):
"""
Gets the state of this Experiment.
:return: The state of this Experiment.
:rtype: ExperimentState
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this Experiment.
:param state: The state of this Experiment.
:type: ExperimentState
"""
self._state = state
@property
def description(self):
"""
Gets the description of this Experiment.
:return: The description of this Experiment.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Experiment.
:param description: The description of this Experiment.
:type: str
"""
self._description = description
@property
def tags(self):
"""
Gets the tags of this Experiment.
:return: The tags of this Experiment.
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this Experiment.
:param tags: The tags of this Experiment.
:type: list[str]
"""
self._tags = tags
@property
def time_of_completion(self):
"""
Gets the time_of_completion of this Experiment.
:return: The time_of_completion of this Experiment.
:rtype: datetime
"""
return self._time_of_completion
@time_of_completion.setter
def time_of_completion(self, time_of_completion):
"""
Sets the time_of_completion of this Experiment.
:param time_of_completion: The time_of_completion of this Experiment.
:type: datetime
"""
self._time_of_completion = time_of_completion
@property
def channels_size(self):
"""
Gets the channels_size of this Experiment.
:return: The channels_size of this Experiment.
:rtype: int
"""
return self._channels_size
@channels_size.setter
def channels_size(self, channels_size):
"""
Sets the channels_size of this Experiment.
:param channels_size: The channels_size of this Experiment.
:type: int
"""
self._channels_size = channels_size
@property
def time_of_creation(self):
"""
Gets the time_of_creation of this Experiment.
:return: The time_of_creation of this Experiment.
:rtype: datetime
"""
return self._time_of_creation
@time_of_creation.setter
def time_of_creation(self, time_of_creation):
"""
Sets the time_of_creation of this Experiment.
:param time_of_creation: The time_of_creation of this Experiment.
:type: datetime
"""
self._time_of_creation = time_of_creation
@property
def third_party_data(self):
"""
Gets the third_party_data of this Experiment.
:return: The third_party_data of this Experiment.
:rtype: ThirdPartyData
"""
return self._third_party_data
@third_party_data.setter
def third_party_data(self, third_party_data):
"""
Sets the third_party_data of this Experiment.
:param third_party_data: The third_party_data of this Experiment.
:type: ThirdPartyData
"""
self._third_party_data = third_party_data
@property
def project_id(self):
"""
Gets the project_id of this Experiment.
:return: The project_id of this Experiment.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""
Sets the project_id of this Experiment.
:param project_id: The project_id of this Experiment.
:type: str
"""
self._project_id = project_id
@property
def organization_name(self):
"""
Gets the organization_name of this Experiment.
:return: The organization_name of this Experiment.
:rtype: str
"""
return self._organization_name
@organization_name.setter
def organization_name(self, organization_name):
"""
Sets the organization_name of this Experiment.
:param organization_name: The organization_name of this Experiment.
:type: str
"""
self._organization_name = organization_name
@property
def is_code_accessible(self):
"""
Gets the is_code_accessible of this Experiment.
:return: The is_code_accessible of this Experiment.
:rtype: bool
"""
return self._is_code_accessible
@is_code_accessible.setter
def is_code_accessible(self, is_code_accessible):
"""
Sets the is_code_accessible of this Experiment.
:param is_code_accessible: The is_code_accessible of this Experiment.
:type: bool
"""
self._is_code_accessible = is_code_accessible
@property
def group_id(self):
"""
Gets the group_id of this Experiment.
:return: The group_id of this Experiment.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""
Sets the group_id of this Experiment.
:param group_id: The group_id of this Experiment.
:type: str
"""
self._group_id = group_id
@property
def running_time(self):
"""
Gets the running_time of this Experiment.
:return: The running_time of this Experiment.
:rtype: int
"""
return self._running_time
@running_time.setter
def running_time(self, running_time):
"""
Sets the running_time of this Experiment.
:param running_time: The running_time of this Experiment.
:type: int
"""
self._running_time = running_time
@property
def id(self):
"""
Gets the id of this Experiment.
:return: The id of this Experiment.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Experiment.
:param id: The id of this Experiment.
:type: str
"""
self._id = id
@property
def properties(self):
"""
Gets the properties of this Experiment.
:return: The properties of this Experiment.
:rtype: list[KeyValueProperty]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of this Experiment.
:param properties: The properties of this Experiment.
:type: list[KeyValueProperty]
"""
self._properties = properties
@property
def short_id(self):
"""
Gets the short_id of this Experiment.
:return: The short_id of this Experiment.
:rtype: str
"""
return self._short_id
@short_id.setter
def short_id(self, short_id):
"""
Sets the short_id of this Experiment.
:param short_id: The short_id of this Experiment.
:type: str
"""
self._short_id = short_id
@property
def time_of_entered_running_state(self):
"""
Gets the time_of_entered_running_state of this Experiment.
:return: The time_of_entered_running_state of this Experiment.
:rtype: datetime
"""
return self._time_of_entered_running_state
@time_of_entered_running_state.setter
def time_of_entered_running_state(self, time_of_entered_running_state):
"""
Sets the time_of_entered_running_state of this Experiment.
:param time_of_entered_running_state: The time_of_entered_running_state of this Experiment.
:type: datetime
"""
self._time_of_entered_running_state = time_of_entered_running_state
@property
def worker_type(self):
"""
Gets the worker_type of this Experiment.
:return: The worker_type of this Experiment.
:rtype: str
"""
return self._worker_type
@worker_type.setter
def worker_type(self, worker_type):
"""
Sets the worker_type of this Experiment.
:param worker_type: The worker_type of this Experiment.
:type: str
"""
self._worker_type = worker_type
@property
def environment(self):
"""
Gets the environment of this Experiment.
:return: The environment of this Experiment.
:rtype: str
"""
return self._environment
@environment.setter
def environment(self, environment):
"""
Sets the environment of this Experiment.
:param environment: The environment of this Experiment.
:type: str
"""
self._environment = environment
@property
def responding(self):
"""
Gets the responding of this Experiment.
:return: The responding of this Experiment.
:rtype: bool
"""
return self._responding
@responding.setter
def responding(self, responding):
"""
Sets the responding of this Experiment.
:param responding: The responding of this Experiment.
:type: bool
"""
self._responding = responding
@property
def actions(self):
"""
Gets the actions of this Experiment.
:return: The actions of this Experiment.
:rtype: list[Action]
"""
return self._actions
@actions.setter
def actions(self, actions):
"""
Sets the actions of this Experiment.
:param actions: The actions of this Experiment.
:type: list[Action]
"""
self._actions = actions
@property
def organization_id(self):
"""
Gets the organization_id of this Experiment.
:return: The organization_id of this Experiment.
:rtype: str
"""
return self._organization_id
@organization_id.setter
def organization_id(self, organization_id):
"""
Sets the organization_id of this Experiment.
:param organization_id: The organization_id of this Experiment.
:type: str
"""
self._organization_id = organization_id
@property
def owner(self):
"""
Gets the owner of this Experiment.
:return: The owner of this Experiment.
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this Experiment.
:param owner: The owner of this Experiment.
:type: str
"""
self._owner = owner
@property
def state_transitions(self):
"""
Gets the state_transitions of this Experiment.
:return: The state_transitions of this Experiment.
:rtype: StateTransitions
"""
return self._state_transitions
@state_transitions.setter
def state_transitions(self, state_transitions):
"""
Sets the state_transitions of this Experiment.
:param state_transitions: The state_transitions of this Experiment.
:type: StateTransitions
"""
self._state_transitions = state_transitions
@property
def notebook_data(self):
"""
Gets the notebook_data of this Experiment.
:return: The notebook_data of this Experiment.
:rtype: NotebookData
"""
return self._notebook_data
@notebook_data.setter
def notebook_data(self, notebook_data):
"""
Sets the notebook_data of this Experiment.
:param notebook_data: The notebook_data of this Experiment.
:type: NotebookData
"""
self._notebook_data = notebook_data
@property
def parameters(self):
"""
Gets the parameters of this Experiment.
:return: The parameters of this Experiment.
:rtype: list[Parameter]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""
Sets the parameters of this Experiment.
:param parameters: The parameters of this Experiment.
:type: list[Parameter]
"""
self._parameters = parameters
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: swagger_client/models/leaderboard_entry.py
```python
from pprint import pformat
from six import iteritems
import re
class LeaderboardEntry(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, channels_last_values=None, name=None, source_size=None, size=None, source_md5=None, trashed=None, state=None, description=None, tags=None, time_of_completion=None, commit_id=None, time_of_creation=None, project_id=None, running_time=None, id=None, properties=None, short_id=None, experiment_states=None, entry_type=None, worker_type=None, environment=None, responding=None, owner=None, parameters=None, deleted=None):
"""
LeaderboardEntry - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'channels_last_values': 'list[ChannelWithValue]',
'name': 'str',
'source_size': 'int',
'size': 'int',
'source_md5': 'str',
'trashed': 'bool',
'state': 'ExperimentState',
'description': 'str',
'tags': 'list[str]',
'time_of_completion': 'datetime',
'commit_id': 'str',
'time_of_creation': 'datetime',
'project_id': 'str',
'running_time': 'int',
'id': 'str',
'properties': 'list[KeyValueProperty]',
'short_id': 'str',
'experiment_states': 'ExperimentStates',
'entry_type': 'EntryTypeEnum',
'worker_type': 'str',
'environment': 'str',
'responding': 'bool',
'owner': 'str',
'parameters': 'list[Parameter]',
'deleted': 'bool'
}
self.attribute_map = {
'channels_last_values': 'channelsLastValues',
'name': 'name',
'source_size': 'sourceSize',
'size': 'size',
'source_md5': 'sourceMd5',
'trashed': 'trashed',
'state': 'state',
'description': 'description',
'tags': 'tags',
'time_of_completion': 'timeOfCompletion',
'commit_id': 'commitId',
'time_of_creation': 'timeOfCreation',
'project_id': 'projectId',
'running_time': 'runningTime',
'id': 'id',
'properties': 'properties',
'short_id': 'shortId',
'experiment_states': 'experimentStates',
'entry_type': 'entryType',
'worker_type': 'workerType',
'environment': 'environment',
'responding': 'responding',
'owner': 'owner',
'parameters': 'parameters',
'deleted': 'deleted'
}
self._channels_last_values = channels_last_values
self._name = name
self._source_size = source_size
self._size = size
self._source_md5 = source_md5
self._trashed = trashed
self._state = state
self._description = description
self._tags = tags
self._time_of_completion = time_of_completion
self._commit_id = commit_id
self._time_of_creation = time_of_creation
self._project_id = project_id
self._running_time = running_time
self._id = id
self._properties = properties
self._short_id = short_id
self._experiment_states = experiment_states
self._entry_type = entry_type
self._worker_type = worker_type
self._environment = environment
self._responding = responding
self._owner = owner
self._parameters = parameters
self._deleted = deleted
@property
def channels_last_values(self):
"""
Gets the channels_last_values of this LeaderboardEntry.
:return: The channels_last_values of this LeaderboardEntry.
:rtype: list[ChannelWithValue]
"""
return self._channels_last_values
@channels_last_values.setter
def channels_last_values(self, channels_last_values):
"""
Sets the channels_last_values of this LeaderboardEntry.
:param channels_last_values: The channels_last_values of this LeaderboardEntry.
:type: list[ChannelWithValue]
"""
self._channels_last_values = channels_last_values
@property
def name(self):
"""
Gets the name of this LeaderboardEntry.
:return: The name of this LeaderboardEntry.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this LeaderboardEntry.
:param name: The name of this LeaderboardEntry.
:type: str
"""
self._name = name
@property
def source_size(self):
"""
Gets the source_size of this LeaderboardEntry.
:return: The source_size of this LeaderboardEntry.
:rtype: int
"""
return self._source_size
@source_size.setter
def source_size(self, source_size):
"""
Sets the source_size of this LeaderboardEntry.
:param source_size: The source_size of this LeaderboardEntry.
:type: int
"""
self._source_size = source_size
@property
def size(self):
"""
Gets the size of this LeaderboardEntry.
:return: The size of this LeaderboardEntry.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this LeaderboardEntry.
:param size: The size of this LeaderboardEntry.
:type: int
"""
self._size = size
@property
def source_md5(self):
"""
Gets the source_md5 of this LeaderboardEntry.
:return: The source_md5 of this LeaderboardEntry.
:rtype: str
"""
return self._source_md5
@source_md5.setter
def source_md5(self, source_md5):
"""
Sets the source_md5 of this LeaderboardEntry.
:param source_md5: The source_md5 of this LeaderboardEntry.
:type: str
"""
self._source_md5 = source_md5
@property
def trashed(self):
"""
Gets the trashed of this LeaderboardEntry.
:return: The trashed of this LeaderboardEntry.
:rtype: bool
"""
return self._trashed
@trashed.setter
def trashed(self, trashed):
"""
Sets the trashed of this LeaderboardEntry.
:param trashed: The trashed of this LeaderboardEntry.
:type: bool
"""
self._trashed = trashed
@property
def state(self):
"""
Gets the state of this LeaderboardEntry.
:return: The state of this LeaderboardEntry.
:rtype: ExperimentState
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this LeaderboardEntry.
:param state: The state of this LeaderboardEntry.
:type: ExperimentState
"""
self._state = state
@property
def description(self):
"""
Gets the description of this LeaderboardEntry.
:return: The description of this LeaderboardEntry.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this LeaderboardEntry.
:param description: The description of this LeaderboardEntry.
:type: str
"""
self._description = description
@property
def tags(self):
"""
Gets the tags of this LeaderboardEntry.
:return: The tags of this LeaderboardEntry.
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this LeaderboardEntry.
:param tags: The tags of this LeaderboardEntry.
:type: list[str]
"""
self._tags = tags
@property
def time_of_completion(self):
"""
Gets the time_of_completion of this LeaderboardEntry.
:return: The time_of_completion of this LeaderboardEntry.
:rtype: datetime
"""
return self._time_of_completion
@time_of_completion.setter
def time_of_completion(self, time_of_completion):
"""
Sets the time_of_completion of this LeaderboardEntry.
:param time_of_completion: The time_of_completion of this LeaderboardEntry.
:type: datetime
"""
self._time_of_completion = time_of_completion
@property
def commit_id(self):
"""
Gets the commit_id of this LeaderboardEntry.
:return: The commit_id of this LeaderboardEntry.
:rtype: str
"""
return self._commit_id
@commit_id.setter
def commit_id(self, commit_id):
"""
Sets the commit_id of this LeaderboardEntry.
:param commit_id: The commit_id of this LeaderboardEntry.
:type: str
"""
self._commit_id = commit_id
@property
def time_of_creation(self):
"""
Gets the time_of_creation of this LeaderboardEntry.
:return: The time_of_creation of this LeaderboardEntry.
:rtype: datetime
"""
return self._time_of_creation
@time_of_creation.setter
def time_of_creation(self, time_of_creation):
"""
Sets the time_of_creation of this LeaderboardEntry.
:param time_of_creation: The time_of_creation of this LeaderboardEntry.
:type: datetime
"""
self._time_of_creation = time_of_creation
@property
def project_id(self):
"""
Gets the project_id of this LeaderboardEntry.
:return: The project_id of this LeaderboardEntry.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""
Sets the project_id of this LeaderboardEntry.
:param project_id: The project_id of this LeaderboardEntry.
:type: str
"""
self._project_id = project_id
@property
def running_time(self):
"""
Gets the running_time of this LeaderboardEntry.
:return: The running_time of this LeaderboardEntry.
:rtype: int
"""
return self._running_time
@running_time.setter
def running_time(self, running_time):
"""
Sets the running_time of this LeaderboardEntry.
:param running_time: The running_time of this LeaderboardEntry.
:type: int
"""
self._running_time = running_time
@property
def id(self):
"""
Gets the id of this LeaderboardEntry.
:return: The id of this LeaderboardEntry.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this LeaderboardEntry.
:param id: The id of this LeaderboardEntry.
:type: str
"""
self._id = id
@property
def properties(self):
"""
Gets the properties of this LeaderboardEntry.
:return: The properties of this LeaderboardEntry.
:rtype: list[KeyValueProperty]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of this LeaderboardEntry.
:param properties: The properties of this LeaderboardEntry.
:type: list[KeyValueProperty]
"""
self._properties = properties
@property
def short_id(self):
"""
Gets the short_id of this LeaderboardEntry.
:return: The short_id of this LeaderboardEntry.
:rtype: str
"""
return self._short_id
@short_id.setter
def short_id(self, short_id):
"""
Sets the short_id of this LeaderboardEntry.
:param short_id: The short_id of this LeaderboardEntry.
:type: str
"""
self._short_id = short_id
@property
def experiment_states(self):
"""
Gets the experiment_states of this LeaderboardEntry.
:return: The experiment_states of this LeaderboardEntry.
:rtype: ExperimentStates
"""
return self._experiment_states
@experiment_states.setter
def experiment_states(self, experiment_states):
"""
Sets the experiment_states of this LeaderboardEntry.
:param experiment_states: The experiment_states of this LeaderboardEntry.
:type: ExperimentStates
"""
self._experiment_states = experiment_states
@property
def entry_type(self):
"""
Gets the entry_type of this LeaderboardEntry.
:return: The entry_type of this LeaderboardEntry.
:rtype: EntryTypeEnum
"""
return self._entry_type
@entry_type.setter
def entry_type(self, entry_type):
"""
Sets the entry_type of this LeaderboardEntry.
:param entry_type: The entry_type of this LeaderboardEntry.
:type: EntryTypeEnum
"""
self._entry_type = entry_type
@property
def worker_type(self):
"""
Gets the worker_type of this LeaderboardEntry.
:return: The worker_type of this LeaderboardEntry.
:rtype: str
"""
return self._worker_type
@worker_type.setter
def worker_type(self, worker_type):
"""
Sets the worker_type of this LeaderboardEntry.
:param worker_type: The worker_type of this LeaderboardEntry.
:type: str
"""
self._worker_type = worker_type
@property
def environment(self):
"""
Gets the environment of this LeaderboardEntry.
:return: The environment of this LeaderboardEntry.
:rtype: str
"""
return self._environment
@environment.setter
def environment(self, environment):
"""
Sets the environment of this LeaderboardEntry.
:param environment: The environment of this LeaderboardEntry.
:type: str
"""
self._environment = environment
@property
def responding(self):
"""
Gets the responding of this LeaderboardEntry.
:return: The responding of this LeaderboardEntry.
:rtype: bool
"""
return self._responding
@responding.setter
def responding(self, responding):
"""
Sets the responding of this LeaderboardEntry.
:param responding: The responding of this LeaderboardEntry.
:type: bool
"""
self._responding = responding
@property
def owner(self):
"""
Gets the owner of this LeaderboardEntry.
:return: The owner of this LeaderboardEntry.
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this LeaderboardEntry.
:param owner: The owner of this LeaderboardEntry.
:type: str
"""
self._owner = owner
@property
def parameters(self):
"""
Gets the parameters of this LeaderboardEntry.
:return: The parameters of this LeaderboardEntry.
:rtype: list[Parameter]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""
Sets the parameters of this LeaderboardEntry.
:param parameters: The parameters of this LeaderboardEntry.
:type: list[Parameter]
"""
self._parameters = parameters
@property
def deleted(self):
"""
Gets the deleted of this LeaderboardEntry.
:return: The deleted of this LeaderboardEntry.
:rtype: bool
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""
Sets the deleted of this LeaderboardEntry.
:param deleted: The deleted of this LeaderboardEntry.
:type: bool
"""
self._deleted = deleted
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: commands/data/upload.py
```python
from __future__ import print_function
import os
from neptune.internal.cli.commands.command_names import CommandNames
from neptune.internal.cli.commands.exceptions.data_exceptions import (
NeptuneCreateFileFromDirectoryException,
NeptuneNonRecursiveDirectoryUpload,
NeptuneCannotOverrideFileWithDirectory,
NeptuneCannotOverrideDirectory,
NeptuneCannotOverrideFile
)
from neptune.internal.cli.commands.framework import CommandUnsuccessfulError
from neptune.internal.cli.commands.neptune_command import NeptuneCommand
from neptune.internal.cli.storage.populate_storage_utils import (
collect_files,
CopyProgressBar)
from neptune.internal.cli.storage.upload_storage_utils import upload_to_storage
from neptune.internal.common.models.rich_project import ProjectNotFoundError, ProjectResolver
from neptune.internal.common.utils.paths import normalize_path, join_paths, getcwd
class DataUpload(NeptuneCommand):
DIRECTORY = u'dir'
FILE = u'file'
def __init__(self,
config,
api_service,
organization_name,
project_name,
path,
destination,
recursive=False):
super(DataUpload, self).__init__(CommandNames.UPLOAD, config, api_service)
self.organization_name = organization_name
self.project_name = project_name
self.path = normalize_path(path.rstrip(os.sep))
self.recursive = recursive
self.destination = destination
self.config = config
@staticmethod
def _replace_last_occurence(path, to_replace, replacement):
path_splitted = path.split(to_replace)
return to_replace.join(path_splitted[:-1]) + replacement + path_splitted[-1]
def run(self, args):
try:
project = ProjectResolver.resolve(
api_service=self.api_service,
organization_name=self.organization_name,
project_name=self.project_name)
except ProjectNotFoundError as exc:
raise CommandUnsuccessfulError(str(exc))
if not self.recursive:
normpath = join_paths(getcwd(), self.path)
if os.path.isdir(normpath) and len(os.listdir(normpath)) > 0:
raise NeptuneNonRecursiveDirectoryUpload(self.path)
files_list, size, empty_dir_list = collect_files(p=self.path, description=u"data")
src = join_paths(getcwd(), self.path)
copy_progress_bar = CopyProgressBar(size, u"Uploading data to server")
path_last_component = src.split(os.sep)[-1]
if self.destination is not None:
(dst_ls_len, dst_type) = self._get_dst_ls_with_file_type(project.id, self.destination)
if self.destination.endswith(os.sep) and \
os.path.isfile(join_paths(getcwd(), self.path)):
raise NeptuneCreateFileFromDirectoryException(self.destination)
dst = self.destination.rstrip(os.sep)
if dst_ls_len >= 0:
if dst_type == self.FILE and os.path.isdir(src):
raise NeptuneCannotOverrideFileWithDirectory(self.path, self.destination)
empty_dest = dst + os.sep + path_last_component
if dst_type == self.DIRECTORY:
file_dest = dst + os.sep + path_last_component
else:
file_dest = dst
files_list = [(x, self._replace_last_occurence(x, src, file_dest))
for x, _ in files_list]
empty_dir_list = [(x, self._replace_last_occurence(x, src, empty_dest))
for x, _ in empty_dir_list]
else:
files_list = [(x, self._replace_last_occurence(x, src, dst)) for x, _ in files_list]
empty_dir_list = [(x, self._replace_last_occurence(x, src, dst)) for x, _ in empty_dir_list]
else:
(dst_ls_len, dst_type) = self._get_dst_ls_with_file_type(project.id, self.path.split(os.sep)[-1])
if dst_ls_len >= 0:
dst_path = self.path.split(os.sep)[-1]
if dst_type == self.FILE:
if os.path.isdir(src):
raise NeptuneCannotOverrideFileWithDirectory(self.path, dst_path)
raise NeptuneCannotOverrideFile(self.path, dst_path)
raise NeptuneCannotOverrideDirectory(self.path, dst_path)
if not os.path.isfile(src):
files_list = [(x,
x[(x.find(src) + len(os.sep.join(src.split(os.sep)[:-1]))):]
if os.sep in src else x[(x.find(src) + len(src))])
for x, _ in files_list]
files_list = [(x, y[1:] if y.startswith(os.sep) else y) for x, y in files_list]
empty_dir_list = [(x,
x[(x.find(src) + len(os.sep.join(src.split(os.sep)[:-1]))):]
if os.sep in src else x[(x.find(src) + len(src))])
for x, _ in empty_dir_list]
empty_dir_list = [(x, y[1:] if y.startswith(os.sep) else y) for x, y in empty_dir_list]
else:
files_list = [(x, y) for x, y in files_list]
empty_dir_list = [(x, y) for x, y in empty_dir_list]
upload_to_storage(files_list=files_list,
dir_list=empty_dir_list,
upload_api_fun=self.api_service.upload_data,
upload_tarstream_api_fun=self.api_service.upload_data_as_tarstream,
callback=copy_progress_bar.update,
project_id=project.id)
copy_progress_bar.finalize()
def _get_dst_ls_with_file_type(self, project_id, dst_path):
try:
ls_data = self.api_service.ls_data(project_id=project_id, path_param=dst_path, recursive=False)
file_type = self.DIRECTORY
if len(ls_data) == 1 and ls_data[0].file_type == self.FILE:
dst_path_parent = '.' if os.sep not in dst_path else join_paths(dst_path.split(os.sep)[:-1])
ls_data_parent = self.api_service.ls_data(project_id=project_id,
path_param=dst_path_parent,
recursive=False)
filtered_ls_data_parent = [x for x in ls_data_parent if x.name == dst_path.split(os.sep)[-1]]
if len(filtered_ls_data_parent) == 1 and filtered_ls_data_parent[0].file_type == self.FILE:
file_type = self.FILE
return len(ls_data), file_type
except Exception as _:
return -1, None
```
#### File: commands/exceptions/enqueue_exceptions.py
```python
from neptune.internal.common import NeptuneException
from neptune.internal.common.parsers.common_parameters_configurator import CommonParametersConfigurator
class NeptuneInvalidEnvironmentName(NeptuneException):
def __init__(self, error_msg):
super(NeptuneInvalidEnvironmentName, self).__init__(
(u"{error_msg}\n" +
u"Visit {doc_url} for a " +
u"list of Neptune's environment types.").format(error_msg=error_msg,
doc_url=CommonParametersConfigurator
.DOCS_AVAILABLE_ENVIRONMENTS)
)
class NeptuneInvalidWorkerType(NeptuneException):
def __init__(self, error_msg):
super(NeptuneInvalidWorkerType, self).__init__(
(u"{error_msg}\n" +
u"Visit {doc_url} for a " +
u"list of Neptune's worker types.").format(error_msg=error_msg,
doc_url=CommonParametersConfigurator.DOCS_AVAILABLE_WORKERS)
)
class NeptuneInputFileNotFound(NeptuneException):
def __init__(self):
super(NeptuneInputFileNotFound, self).__init__(
u"You cannot use input that is not in Neptune storage.\n" +
u"Use 'neptune data upload' to upload data to storage."
)
class NeptuneFailedToExecute(NeptuneException):
def __init__(self, cmd, source_exception):
super(NeptuneFailedToExecute, self).__init__(
u"Failed to start experiment process.\n" +
u"Command: {}\n".format(" ".join(cmd)) +
u"Exception: {}".format(str(source_exception))
)
class NeptunePipInstallFailure(NeptuneException):
def __init__(self, exit_code):
super(NeptunePipInstallFailure, self).__init__(
u"pip failed to install the requirements with exit code=" + str(exit_code) + ".\n" +
u"For more details, see the output/neptune-stdout.log and output/neptune-stderr.log files."
)
class NeptuneNoCreditsToRunExperiment(NeptuneException):
def __init__(self):
super(NeptuneNoCreditsToRunExperiment, self).__init__(
u"Sorry, you don't have enough credits in your account to run this experiment.\n" +
u"Register your credit card in Neptune to run experiments in the cloud."
)
class NeptuneExperimentCreationUnauthorized(NeptuneException):
def __init__(self, organization_name, project_name):
super(NeptuneExperimentCreationUnauthorized, self).__init__(
u"Failed to create experiment.\n" +
u"You are not allowed to create experiments in project {}/{}.".format(
organization_name, project_name
)
)
```
#### File: experiment/ls/formatting.py
```python
from collections import OrderedDict
from neptune.internal.common.utils.data_utils import decamelize_keys
class RowFactory(object):
def __init__(self, formatter, fields, table_factory):
self.index = 1
self.formatter = formatter
self.fields = fields
self.table_factory = table_factory
def format(self, entities, **ctx):
for i, entity in enumerate(entities, start=self.index):
add_header = self.index == 1
entity['No.'] = i
entity = OrderedDict([(f, entity[f]) for f in self.fields if f in entity])
entity = self.formatter.create_row(entity, self.fields, **ctx)
entity = self.table_factory.create_horizontal_table(
self.fields, [entity], heading=add_header)
self.index += 1
yield entity
def decamelized(dcts):
for dct in dcts:
yield decamelize_keys(dct)
```
#### File: cli/commands/framework.py
```python
from __future__ import print_function
import abc
import collections
from future.builtins import str
from future.utils import with_metaclass
from neptune.internal.cli.commands.neptune_command import OK_EXIT_CODE
from neptune.internal.common import NeptuneException
from neptune.internal.common.api.exceptions import NeptuneServerResponseErrorException
EXIT_CODE_UNSUCCESSFUL = 1
class Command(object):
exit_code = OK_EXIT_CODE
def abort(self):
pass
class GenericCommand(with_metaclass(abc.ABCMeta), object):
exit_code = OK_EXIT_CODE
@abc.abstractmethod
def execute(self, ctx, *args):
raise NotImplementedError
def abort(self):
pass
class NeptuneCommandAdapter(object):
''' Adapter to make GenericCommand classes compatible with INeptuneCommand interface. '''
def __init__(self, command, ctx):
self.command = command
self.ctx = ctx
def run(self, *_):
self.command.execute(self.ctx)
@property
def name(self):
return self.command.name
@property
def exit_code(self):
return self.command.exit_code
def abort(self):
self.command.abort()
class HandleCommandErrors(GenericCommand):
''' Catch CommandUnsuccessfulError exceptions and print them and care of the exit code. '''
def __init__(self, command):
self.command = command
self._exit_code = OK_EXIT_CODE
def execute(self, ctx, *args):
try:
self.command.execute(ctx, *args)
except CommandUnsuccessfulError as error:
self._exit_code = error.exit_code
print(str(error))
except NeptuneServerResponseErrorException as e:
self._exit_code = EXIT_CODE_UNSUCCESSFUL
if e.status == 400:
print(e.response_message)
elif e.status == 401 or e.status == 403:
print(u'neptune: Not authorized')
else:
raise e
except:
self._exit_code = EXIT_CODE_UNSUCCESSFUL
raise
@property
def name(self):
return self.command.name
@property
def exit_code(self):
return self._exit_code
class CommandUnsuccessfulError(NeptuneException):
''' Raise this exception in commands. '''
def __init__(self, message, exit_code=EXIT_CODE_UNSUCCESSFUL):
super(CommandUnsuccessfulError, self).__init__(message)
self.exit_code = exit_code
CommandExecutionContext = collections.namedtuple(
'CommandExecutionContext',
['api_service', 'config', 'session'])
```
#### File: cli/commands/neptune_exec.py
```python
from __future__ import print_function
from future.builtins import object, str
from neptune.internal.cli.commands.command_names import CommandNames
from neptune.internal.cli.commands.executing.null_executor import NullExecutor
from neptune.internal.cli.commands.neptune_command import NeptuneCommand
from neptune.internal.cli.commands.utils.docker_utils import resolve_docker_image
from neptune.internal.cli.validation.validations import (
JobIsInState,
JobValidationRules,
ValidationError
)
from neptune.internal.common import NeptuneException
from neptune.internal.common.api.exceptions import NeptuneEntityNotFoundException, NeptuneUnprocessableEntityException
from neptune.internal.common.api.job_api_service import ExperimentState
class GetExperimentError(NeptuneException):
def __init__(self, message, critical=True):
super(GetExperimentError, self).__init__(message)
self.critical = critical
class NeptuneExec(NeptuneCommand):
def __init__(self, experiment_id, config, api_service, experiment_executor_factory,
environment=None, custom_execution_paths=None):
super(NeptuneExec, self).__init__(CommandNames.EXEC, config, api_service)
self._experiment_executor_factory = experiment_executor_factory
self._custom_execution_paths = custom_execution_paths
self._validations = JobValidationRules(JobIsInState(
expectedState=ExperimentState.waiting,
critical=True
))
self.exit_code = self.OK_EXIT_CODE
self._executor = NullExecutor()
self.environment = environment
self.experiment_id = experiment_id
def abort(self):
self._executor.abort()
def _get_experiment(self):
try:
return self.api_service.get_experiment(self.experiment_id)
except NeptuneEntityNotFoundException as exc:
raise GetExperimentError(exc.response_message)
def run(self, args):
try:
experiment = self._get_experiment()
self._validations.validate(experiment, args)
self._execute(experiment, args)
except NeptuneUnprocessableEntityException as error:
self.exit_code = self.ENTITY_NOT_FOUND_EXIT_CODE
self.logger.debug(error)
except ValidationError as error:
self.exit_code = self.INVALID_EXPERIMENT_STATE_EXIT_CODE
print(str(error))
except GetExperimentError as error:
self.exit_code = self.NO_EXPERIMENT_TO_EXECUTE
print(str(error))
except SystemExit:
self.exit_code = self.UNKNOWN_EXCEPTION_EXIT_CODE
raise
except NeptuneException:
self.exit_code = self.UNKNOWN_EXCEPTION_EXIT_CODE
raise
except BaseException as error:
self.exit_code = self.UNKNOWN_EXCEPTION_EXIT_CODE
self.logger.exception(error)
return self.exit_code
def _execute(self, experiment, args):
docker_image = resolve_docker_image(self.environment, self.api_service)
self._executor = self._experiment_executor_factory.create(
docker_image=docker_image, experiment=experiment, custom_execution_paths=self._custom_execution_paths)
self.exit_code = self._executor.execute(experiment, args)
class NeptuneExecFactory(object):
def __init__(self, config, api_service, experiment_executor_factory):
self._config = config
self._api_service = api_service
self._experiment_executor_factory = experiment_executor_factory
def create(self, experiment_id, environment=None, custom_execution_paths=None):
if not experiment_id:
raise ValueError("experiment_id argument must be provided.")
return NeptuneExec(experiment_id=experiment_id, config=self._config,
api_service=self._api_service, environment=environment,
experiment_executor_factory=self._experiment_executor_factory,
custom_execution_paths=custom_execution_paths)
```
#### File: commands/parsers/abstract_neptune_command_parser.py
```python
import argparse
from abc import abstractmethod, ABCMeta
from neptune.internal.cli.commands.parsers.utils.neptune_help_formatters import NeptuneRawDescriptionHelpFormatter
from neptune.internal.cli.commands.parsers.utils.validators import VoidValidator
from neptune.internal.common.parsers.neptune_argparse_wrapper import NeptuneArgparseWrapper
class AbstractNeptuneCommandParser(NeptuneArgparseWrapper):
__metaclass__ = ABCMeta
def __init__(self, parent):
super(AbstractNeptuneCommandParser, self).__init__(
parent.subparsers.add_parser(**self._get_params()))
def _get_params(self):
params = {
'name': self.command_name(),
'description': self.description(),
'add_help': False,
'formatter_class': NeptuneRawDescriptionHelpFormatter
}
if self.help() != argparse.SUPPRESS:
params['help'] = self.help()
return params
def get_validator(self):
return VoidValidator()
@staticmethod
@abstractmethod
def command_name():
raise NotImplementedError()
def description(self):
return self.help()
@abstractmethod
def help(self):
raise NotImplementedError()
```
#### File: commands/parsers/experiment_parser.py
```python
from neptune.internal.cli.commands.command_names import CommandNames
from neptune.internal.cli.commands.parsers.abstract_neptune_command_parser import AbstractNeptuneCommandParser
from neptune.internal.cli.commands.parsers.experiment.abort_parser import AbortParser
from neptune.internal.cli.commands.parsers.experiment.ls_parser import LsParser
from neptune.internal.cli.commands.parsers.experiment.notebook_parser import NotebookParser
from neptune.internal.cli.commands.parsers.experiment.run_parser import RunParser
from neptune.internal.cli.commands.parsers.experiment.send_parser import SendParser
from neptune.internal.cli.commands.parsers.utils.validators import (
CombinedArgumentsValidator,
SubcommandValidator)
class ExperimentParser(AbstractNeptuneCommandParser):
def __init__(self, parent):
super(ExperimentParser, self).__init__(parent)
self.subparsers = self.argparse_parser.add_subparsers(title='Subcommands', dest="subcommand")
# Required for Python 3.
# http://bugs.python.org/issue9253#msg186387
self.subparsers.required = True
self.abort_parser = AbortParser(parent=self)
self.notebook_parser = NotebookParser(parent=self)
self.send_parser = SendParser(parent=self)
self.run_parser = RunParser(parent=self)
self.ls_parser = LsParser(parent=self)
@staticmethod
def command_name():
return CommandNames.EXPERIMENT
def help(self):
return u'Using this command family you can start, stop and list your experiments .'
def get_validator(self):
return CombinedArgumentsValidator([
SubcommandValidator([
self.send_parser,
self.run_parser,
self.notebook_parser,
self.abort_parser,
self.ls_parser
])
])
```
#### File: commands/parsers/project_parser.py
```python
from future.builtins import str
from neptune.internal.cli.commands.command_names import CommandNames
from neptune.internal.cli.commands.parsers.abstract_neptune_command_parser import AbstractNeptuneCommandParser
from neptune.internal.cli.commands.parsers.utils.validators import (
CheckPositionalParamsFirst,
CombinedArgumentsValidator
)
class ProjectParser(AbstractNeptuneCommandParser):
def __init__(self, parent):
super(ProjectParser, self).__init__(parent)
self.subparsers = self.argparse_parser.add_subparsers(title='Subcommands', dest="subcommand")
# Required for Python 3.
# http://bugs.python.org/issue9253#msg186387
self.subparsers.required = True
self.activate_parser = ProjectActivateParser(parent=self)
@staticmethod
def command_name():
return CommandNames.PROJECT
def help(self):
return u'You can manipulate projects using this command family.'
def get_validator(self):
return CombinedArgumentsValidator([CheckPositionalParamsFirst()])
class ProjectActivateParser(AbstractNeptuneCommandParser):
@staticmethod
def command_name():
return CommandNames.ACTIVATE
def help(self):
return (u'This command changes the global configuration of your `neptune` command\n'
u'to use another project by default. This can still be overridden by using\n'
u'the `--project` option in most commands.\n')
def _config_positional_args(self):
self.add_argument(
name='project',
type=str,
help='Name of the project to activate.\n'
'It can be either `your-organization/your-project` or simply `your-project`. '
'In the latter form, `your-project` is expected to reside within the organization '
'associated with your account. If your username is `jacob`, then `jacob/sandbox` points '
'to the `sandbox` project within your organization.'
)
```
#### File: commands/parsers/root_parser.py
```python
from __future__ import print_function
from neptune.internal.cli.commands.command_names import CommandNames
from neptune.internal.cli.commands.parsers.account_parser import AccountParser
from neptune.internal.cli.commands.parsers.aliasing import alias
from neptune.internal.cli.commands.parsers.data_parser import DataParser
from neptune.internal.cli.commands.parsers.exec_parser import ExecParser
from neptune.internal.cli.commands.parsers.experiment.run_parser import RunParser
from neptune.internal.cli.commands.parsers.experiment.send_parser import SendParser
from neptune.internal.cli.commands.parsers.experiment_parser import ExperimentParser
from neptune.internal.cli.commands.parsers.project_parser import ProjectParser
from neptune.internal.cli.commands.parsers.utils.neptune_help_formatters import RawDescriptionOnlyHeaderHelpFormatter
from neptune.internal.cli.commands.parsers.utils.validators import ArgumentsValidationException
from neptune.internal.common import NeptuneException
from neptune.internal.common.parsers.extended_argparse_parser import ExtendedArgparseParser
from neptune.internal.common.parsers.neptune_argparse_wrapper import NeptuneArgparseWrapper
class NeptuneRootCommandParser(NeptuneArgparseWrapper):
NEWLINE_AND_SPACE = u'\n '
SPACE_AND_NEWLINE = u' \n'
DESCRIPTION = NEWLINE_AND_SPACE + u'''
Neptune CLI allows you to manage and run your experiments.
Find more information at https://docs.neptune.ml
Use neptune <command | group of commands> -h for more information,
e.g. neptune experiment run -h
Running Experiments
neptune run Run a new experiment locally.
Alias for `neptune experiment run`.
neptune send Run a new experiment in the cloud.
Alias for `neptune experiment send`.
neptune experiment run Run a new experiment locally.
neptune experiment send Run a new experiment in the cloud.
neptune experiment send-notebook Start a new Jupyter notebook in the cloud.
neptune experiment abort Stop an experiment.
neptune experiment list List current experiments.
Managing Data in Neptune
neptune data upload Upload data to Neptune storage.
neptune data download Download data from Neptune storage.
neptune data ls Browse your datasets and results.
neptune data rm Remove data from Neptune storage.
Authentication
neptune account login Log into your Neptune account.
neptune account logout Log out of your Neptune account.
Project
neptune project activate Change active Neptune project.
''' + SPACE_AND_NEWLINE
def __init__(self):
public_first_level_parsers = [
alias(CommandNames.RUN, RunParser),
alias(CommandNames.SEND, SendParser),
alias(CommandNames.EX, ExperimentParser),
ExperimentParser, AccountParser, DataParser, ProjectParser,
]
public_subcommands = [p.command_name() for p in public_first_level_parsers]
registered_first_level_parsers = public_first_level_parsers + [ExecParser]
super(NeptuneRootCommandParser, self).__init__(
argparse_parser=self._base_parser(public_subcommands=public_subcommands))
self.subparsers = self.argparse_parser.add_subparsers(
title=u'commands', dest=u'command_to_run', metavar=u'<command or group of commands>', prog='neptune')
# pylint: disable=unused-argument
def completer(*args, **kwargs):
return public_subcommands
self.subparsers.completer = completer
# Required for Python 3.
# http://bugs.python.org/issue9253#msg186387
self.subparsers.required = True
self.first_level_command_parsers = [
parser_constructor(parent=self)
for parser_constructor in registered_first_level_parsers
]
def validate(self, arguments, raw_args):
command = arguments.known_args.command_to_run
found_parsers = [parser for parser in self.first_level_command_parsers if parser.command_name() == command]
if found_parsers:
command_parser = found_parsers[0]
try:
command_parser.get_validator().validate(arguments, raw_args)
except ArgumentsValidationException:
command_parser.argparse_parser.print_usage()
raise
else:
raise NeptuneException(u'Unknown command neptune {}'.format(command))
@staticmethod
def _base_parser(public_subcommands):
return ExtendedArgparseParser(
public_subcommands=public_subcommands,
prog=u'neptune',
formatter_class=RawDescriptionOnlyHeaderHelpFormatter,
usage=u'neptune',
description=NeptuneRootCommandParser.DESCRIPTION,
add_help=False
)
```
#### File: parsers/utils/autocompletion.py
```python
from argcomplete import CompletionFinder
class OverridableSubActionsCompletionCompleter(CompletionFinder):
"""
The default CompletionFinder doesn't respect the need to override
completion behaviour for subparsers.
This class allows to define a custom completer for a subparser:
def complete(parser, cword_prefix):
return ...
self.argparse_parser.add_subparsers(...).completer = complete
"""
def __init__(self, *args, **kwargs):
super(OverridableSubActionsCompletionCompleter, self).__init__(*args, **kwargs)
def _get_subparser_completions(self, parser, cword_prefix):
if hasattr(parser, 'completer') and callable(parser.completer):
all_completions = parser.completer(parser, cword_prefix)
return [c for c in all_completions if c.startswith(cword_prefix)]
return super(OverridableSubActionsCompletionCompleter, self)._get_subparser_completions(parser, cword_prefix)
autocomplete = OverridableSubActionsCompletionCompleter()
```
#### File: cli/commands/session.py
```python
from __future__ import print_function
from future.builtins import input
from future.utils import raise_from
import base64
import json
import socketserver
import sys
from threading import Thread
from flask import Flask, request
from neptune.internal.cli.commands.command_names import CommandNames
from neptune.internal.cli.commands.framework import Command
from neptune.internal.cli.commands.neptune_command import NeptuneCommand
from neptune.internal.common import NeptuneException, NeptuneInternalException
from neptune.internal.common.api.api_service_factory import create_services
from neptune.internal.common.exceptions.keycloak_exceptions import KeycloakException
from neptune.internal.common.threads.neptune_future import NeptuneFuture
class NeptuneLogout(Command):
name = u'logout'
LOGGED_OUT_MESSAGE = u'You have been successfully logged out.'
def __init__(self, token_storage):
self.token_storage = token_storage
def run(self, *_):
if self.token_storage.contains_token():
self.token_storage.clear()
print(self.LOGGED_OUT_MESSAGE)
class NeptuneManualLogin(Command):
name = u'manual login'
def __init__(self, config, auth_code_url, keycloak_service, token_storage,
api_service, webbrowser):
self.config = config
self.auth_code_url = auth_code_url
self.keycloak_service = keycloak_service
self.token_storage = token_storage
self.api_service = api_service
self.webbrowser = webbrowser
def run(self, *_):
print(u'Please follow {} to obtain authentication token.\n'.format(self.auth_code_url))
self.webbrowser.open(self.auth_code_url)
user_input = input(u'Authentication token: ')
authorization_code, redirect_uri = extract_fields(decode_token(user_input))
offline_token = self.keycloak_service.request_offline_token(
authorization_code=authorization_code,
redirect_uri=redirect_uri)
self.token_storage.save(offline_token)
services = create_services(self.token_storage)
services.api_service.user_logged_to_cli()
print(u'Login successful.')
class NeptuneApiToken(NeptuneCommand):
name = u'api key'
def __init__(self, config, api_service):
super(NeptuneApiToken, self).__init__(CommandNames.API_TOKEN, config, api_service=api_service)
def run(self, *_):
print(self.api_service.get_api_token())
class NeptuneLocalLogin(NeptuneCommand):
name = u'local login'
def __init__(self, config, keycloak_api_service, offline_token_storage_service,
api_service, webbrowser):
super(NeptuneLocalLogin, self).__init__(CommandNames.LOGIN, config, api_service=None)
self._keycloak_api_service = keycloak_api_service
self._offline_token_storage_service = offline_token_storage_service
self._aborted = False
self._stock_server_bind = socketserver.TCPServer.server_bind
self.api_service = api_service
self.webbrowser = webbrowser
def run(self, args):
webserver_port_future, authorization_code_future = self._start_webserver(
self._keycloak_api_service.get_local_login_redirect_url()
)
webserver_port = webserver_port_future.wait()
url = self._keycloak_api_service.get_request_authorization_code_url(
redirect_uri=self._webserver_url(webserver_port))
# Open webbrowser in the seperate thread to avoid freeze in Firefox.
t = Thread(target=self.webbrowser.open, args=(url,))
t.daemon = True
t.start()
print("Waiting for authentication, press Ctrl+C to abort...")
authorization_code = self._wait_for_authorization_code(authorization_code_future)
try:
offline_token = self._request_offline_token(
authorization_code=authorization_code,
redirect_uri=self._webserver_url(webserver_port)
)
except KeycloakException as e:
print(e.message)
sys.exit(1)
self._offline_token_storage_service.save(offline_token)
services = create_services(self._offline_token_storage_service)
# Performs operations needed to be run for a new user on his first login.
# TODO Consider moving this API call to Keycloak.
services.api_service.login()
services.api_service.user_logged_to_cli()
print('Login successful.')
def abort(self):
self._aborted = True
def _start_webserver(self, login_redirect_address):
app = Flask(__name__)
webserver_port_future = self._intercept_server_port()
authorization_code_future = NeptuneFuture()
app.add_url_rule(
rule='/',
endpoint='_authorization_code_request_handler',
view_func=self._authorization_code_request_handler(authorization_code_future, login_redirect_address)
)
webserver_port = Thread(target=app.run, kwargs={"port": 0})
webserver_port.setDaemon(True)
webserver_port.start()
return webserver_port_future, authorization_code_future
def _wait_for_authorization_code(self, authorization_code_future):
while not self._aborted:
authorization_code = authorization_code_future.wait(timeout=1)
if authorization_code:
return authorization_code
def _request_offline_token(self, authorization_code, redirect_uri):
offline_token = self._keycloak_api_service.request_offline_token(
authorization_code=authorization_code,
redirect_uri=redirect_uri
)
return offline_token
def _authorization_code_request_handler(self, authorization_code_future, login_redirect_address):
def handler():
authorization_code_future.set(request.args['code'])
request.environ.get('werkzeug.server.shutdown')()
return '<script type="text/javascript">' \
'window.location.href = "{frontend_address}";' \
'</script>'.format(frontend_address=login_redirect_address)
return handler
def _intercept_server_port(self):
websocket_port_future = NeptuneFuture()
def _server_bind_wrapper(tcp_server):
return_value = self._stock_server_bind(tcp_server)
websocket_port_future.set(tcp_server.socket.getsockname()[1])
socketserver.TCPServer.server_bind = self._stock_server_bind
return return_value
socketserver.TCPServer.server_bind = _server_bind_wrapper
return websocket_port_future
def _webserver_url(self, webserver_port):
return 'http://localhost:{}'.format(webserver_port)
def decode_token(string):
try:
raw_message = base64.b64decode(string)
return json.loads(raw_message.decode('UTF-8'))
except:
raise NeptuneException('Invalid authentication token.')
def extract_fields(message):
try:
redirect_uri = message['redirect_uri']
authorization_code = message['code']
except KeyError as error:
raise_from(NeptuneInternalException('Invalid JSON received from frontend.'), error)
return authorization_code, redirect_uri
```
#### File: commands/utils/configuration_overriding_utils.py
```python
from future.builtins import object
from neptune.internal.common.models.key_value_properties_utils import merge_properties_lists
class ConfigurationOverridingUtils(object):
def __init__(self):
pass
@staticmethod
def merge_tags(job_tags, override_args_tags):
"""
:param job_tags: list[str]
:param override_args_tags: list[str]
:return: list[str]
Returns job_tags + override_args_tags.
"""
new_job_tags = []
if job_tags:
new_job_tags += job_tags
if override_args_tags:
new_job_tags += override_args_tags
return list(set(new_job_tags))
@staticmethod
def merge_properties(job_swagger_properties, override_args_properties):
"""
:param job_swagger_properties: list[KeyValuePropertyParam]
:param override_args_properties: list[KeyValuePropertyParam]
:return: list[KeyValuePropertyParam]
Overrides job_swagger_properties with override_args_properties.
"""
return merge_properties_lists(job_swagger_properties or [], override_args_properties or [])
```
#### File: commands/utils/file_copying_utils.py
```python
import logging
import os
from distutils.errors import DistutilsFileError # pylint: disable=import-error,no-name-in-module
from distutils.file_util import copy_file as distutils_copy_file # pylint: disable=import-error,no-name-in-module,line-too-long
from neptune.internal.common.utils.paths import (
absolute_path,
make_path,
join_paths)
_logger = logging.getLogger(__name__)
def copy_tree(src, dst, exclude=None, preserve_mode=0, preserve_times=0,
preserve_symlinks=0, update=0, verbose=1, dry_run=0,
post_file_copy_callback=lambda src_name, dst_name: None):
"""This is a copy of distutils.dir_util.copy_tree extended with parameters:
'exclude' and 'post_file_copy_callback' and support of names with Unicode characters.
The exclude parameter allows to exclude directories from being copied.
The post_file_copy_callback parameter allows to provide callback called after file copy.
The changes between this function and the original one are marked in the source code.
"""
exclude = [path_to_exclude for path_to_exclude in exclude] if exclude else []
# Change: There was no exclude. By default the parameter should a value of an empty list.
exclude = [absolute_path(e) for e in exclude] if exclude else []
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError("cannot copy tree '{}': not a directory".format(src))
try:
names = os.listdir(src)
# Change: remove excluded files from 'names' (a list with files to be copied).
names = [n for n in names if join_paths(src, n) not in exclude]
except os.error as error:
(_, errstr) = error.args
if dry_run:
names = []
else:
raise DistutilsFileError("error listing files in '{}': {}".format(src, errstr))
if not dry_run:
make_path(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
# skip NFS rename files
continue
if preserve_symlinks and os.path.islink(src_name):
# os.readline and os.symlink doesn't exist on Windows
# but os.path.islink always returns false.
# pylint:disable=no-member
link_dest = os.readlink(src_name)
if verbose >= 1:
_logger.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
# Change: supply exclude and post_file_copy_callback to recursive call
copy_tree(src_name, dst_name, exclude, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run,
post_file_copy_callback=post_file_copy_callback))
else:
distutils_copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
# Change: call provided post_file_copy_callback
post_file_copy_callback(src_name, dst_name)
outputs.append(dst_name)
return outputs
```
#### File: commands/utils/git_utils.py
```python
import os
import subprocess
from neptune.internal.common.utils.system import IS_WINDOWS
from neptune.generated.swagger_client import GitCommit, GitHistoryParams
def get_git_version():
try:
with open(os.devnull, 'w') as devnull:
return subprocess.check_output(['git', '--version'], stderr=devnull).decode("utf-8").strip()
except OSError:
return None
except BaseException:
return None
def get_git_info(experiment_ids):
if not get_git_version():
return None
if IS_WINDOWS and r'GIT_PYTHON_GIT_EXECUTABLE' not in os.environ:
os.environ[r'GIT_PYTHON_GIT_EXECUTABLE'] = os.popen("where git").read().strip()
import git # pylint:disable=wrong-import-position
repository_path = os.getcwd()
try:
repo = git.Repo(repository_path, search_parent_directories=True)
# TODO(mara): Get rid of NoSuchPathError. It is here only because of some unittests
# feeds this code with non-existing paths, which should not be possible in normal case.
except (git.InvalidGitRepositoryError, git.NoSuchPathError):
return None
git_dir = git.Git(repository_path)
ids = git_dir.log('--pretty=%H').split('\n')
commits = [repo.rev_parse(c) for c in ids]
root_sha = commits[-1].hexsha
commit_ids = set()
commit_array = list()
for commit in commits:
if commit.hexsha in commit_ids:
continue
commit_ids.add(commit.hexsha)
commit_array.append(
GitCommit(
commit_id=commit.hexsha,
message=commit.message,
author_name=commit.author.name,
author_email=commit.author.email,
commit_date=commit.committed_datetime,
parents=[c.hexsha for c in commit.parents]))
return GitHistoryParams(
repo_id=root_sha,
current_commit_id=repo.head.commit.hexsha,
dirty=repo.is_dirty(),
commits=commit_array,
experiment_ids=experiment_ids
)
```
#### File: commands/utils/payments_utils.py
```python
from __future__ import print_function
from future.builtins import object
class PaymentsUtils(object):
def __init__(self,
config):
"""
:type config:
"""
self.config = config
def print_insufficient_funds(self):
frontend_url = u"{frontend_url}/?noFunds".format(
frontend_url=self.config.frontend_http_url
)
print(
u">\n" +
u"> Insufficient funds, follow:\n" +
u"> {frontend_url}\n>\n".format(frontend_url=frontend_url)
)
```
#### File: client_library/background_services/action_services.py
```python
from neptune.internal.client_library.background_services.service import Service
from neptune.internal.client_library.threads.action_executor import ActionExecutor
from neptune.internal.client_library.threads.action_invocation_receiver_thread import \
ActionInvocationReceiverThread
class ActionInvocationsService(Service):
def __init__(self, executor_service, websocket_factory):
super(ActionInvocationsService, self).__init__(u"ActionsService")
self._action_invocation_receiver_thread = ActionInvocationReceiverThread(
action_executor=executor_service,
websocket_factory=websocket_factory,
shutdown_event=self._shutdown_event)
self._done_event.set() # can be shut down in any moment
self._action_invocation_receiver_thread.start()
def await_termination(self):
self._action_invocation_receiver_thread.join()
self._done_event.wait()
class ActionsExecutorService(Service):
def __init__(self, experiment_id, job_actions, job_api_service):
super(ActionsExecutorService, self).__init__(u"ActionsService")
self._action_executor = ActionExecutor(
thread_pool_size=1,
experiment_id=experiment_id,
api_service=job_api_service,
actions_map=job_actions,
done_event=self._done_event,
shutdown_event=self._shutdown_event)
def execute_action(self, action_invocation):
self._action_executor.execute_action(action_invocation)
def await_termination(self):
self._done_event.wait()
```
#### File: client_library/background_services/service.py
```python
from future.builtins import object
import logging
import threading
class Service(object):
def __init__(self, name):
self._done_event = threading.Event()
self._shutdown_event = threading.Event()
self._name = name
self._logger = logging.getLogger(__name__)
@property
def name(self):
return self._name
@property
def done(self):
return self._done_event
def shutdown(self):
self._shutdown_event.set()
def await_termination(self):
raise NotImplementedError
```
#### File: client_library/job_development_api/action.py
```python
from future.builtins import object, str
import uuid
class Action(object):
"""
An Action is a registered function that can be invoked externally with passed argument.
"""
def __init__(self, name, handler):
"""Registers a new action that calls handler with provided argument on invocation.
.. warning:: For internal use only.
Use :py:meth:`~neptune.Job.register_action` instead to register a new action.
:param name: Unique action name.
:param handler: An one argument function that will be called with an action invocation.
Handler must take one unicode or str argument and return unicode or str
as the result.
:type name: unicode
"""
self.id = str(uuid.uuid4())
self.name = name
self.handler = handler
```
#### File: client_library/job_development_api/context_params.py
```python
from future.builtins import object
from neptune.internal.cli.exceptions.params_exceptions import ReadOnlyException
from neptune.internal.common.models.parameter_value_converter import ParameterValueConverter
class ContextParams(object):
"""
Parameters are a set of user-defined variables that will be passed to the jobโs program.
Jobโs parameters are defined in the configuration file.
Parametersโ values can be passed to a job using command line parameters
when enqueuing or executing the job.
Each parameter has:
- name,
- description,
- type(string, integer, float, bool),
- optional default value,
- โrequiredโ flag - defines whether a parameter is necessary for job execution.
Parameter values can be retrieved using two different notations:
the object notation and the dictionary notation.
Access with object notation::
print ctx.params.x
Access with dict-like notation::
print ctx.params['y']
"""
def __init__(self):
"""
Create a new instance of neptune.params.ContextParams
.. warning:: For internal use only.
Use :py:attr:`neptune.Context.params` instead to access and modify params.
"""
pass
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value, immutable=True):
if immutable:
raise ReadOnlyException()
return setattr(self, item, value, False)
def __setattr__(self, key, value, immutable=True):
if immutable:
raise ReadOnlyException()
super(ContextParams, self).__setattr__(key, value)
def __delitem__(self, key):
raise ReadOnlyException()
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
@classmethod
def create_from(cls, job_api_model):
job_parameters = cls()
for param in job_api_model.parameters:
job_parameters.__setattr__(param.name,
ParameterValueConverter(param).convert_value(param.value, param.parameter_type),
immutable=False)
return job_parameters
```
#### File: client_library/job_development_api/image.py
```python
from future import standard_library
standard_library.install_aliases()
# pylint: disable=wrong-import-position
from future.builtins import object
import base64
import io
import PIL.Image
from neptune.generated.swagger_client import InputImage
from neptune.internal.common.models.parameters_validation import (
of_type_validator,
text_conv,
validate
)
class Image(object):
"""
Represents information about images sent to image channels.
"""
@validate(name=text_conv, description=text_conv, data=of_type_validator(PIL.Image.Image))
def __init__(self, name, description, data):
"""
Creates a new Image.
:param name: Name of the image, displayed in the Channels tab on job's dashboard.
:param description: Description of the image displayed in the Channels tab
on job's dashboard.
:param data: Image data.
:type name: unicode
:type description: unicode
:type data: PIL.Image
"""
self._name = name
self._description = description
self._data = data
def to_input_image(self):
"""
Creates InputImage that can be sent to Neptune.
:return: input image in format appropriate to be sent to Neptune.
:rtype: InputImage
"""
image_buffer = io.BytesIO()
self.data.save(image_buffer, format='PNG')
contents = image_buffer.getvalue()
image_buffer.close()
input_image = InputImage()
input_image.name = self.name
input_image.description = self.description
input_image.data = base64.b64encode(contents).decode('utf-8')
return input_image
@property
def name(self):
"""
Gets name of this Image.
:return: The name of this Image.
:rtype: str
"""
return self._name
@property
def description(self):
"""
Gets description of this Image.
:return: The description of this Image.
:rtype: str
"""
return self._description
@property
def data(self):
"""
Gets data of this Image.
:return: The data of this Image.
:rtype: PIL.Image
"""
return self._data
```
#### File: internal/client_library/third_party_integration.py
```python
from __future__ import print_function
from future.builtins import object, str
import io
import logging
import os
import time
import uuid
from distutils.version import LooseVersion # pylint: disable=no-name-in-module, import-error
import PIL
from neptune.generated.swagger_client.models.tensorflow_graph import TensorflowGraph
from neptune.internal.client_library.job_development_api.channel_type import ChannelType
from neptune.internal.client_library.job_development_api.image import Image
from neptune.internal.common import NeptuneException
_LOGGER = logging.getLogger(__name__)
_integrated_with_keras = False
_tensorflow_integrator = None
class ThirdPartyIntegration(object):
# pylint:disable=global-statement
''' Objective wrapper for functions within this module. '''
def __init__(self, verbose=False):
self.verbose = verbose
def integrate_with_tensorflow(self, job):
return integrate_with_tensorflow(job)
def integrate_with_keras(self, job):
integrate_with_keras(job, self.verbose)
class TensorflowIntegrator(object):
def __init__(self, job, api_service):
self._job = job
self._api_service = api_service
self._channels = {}
self._summary_writer_to_graph_id = {}
def add_summary(self, summary_writer, summary, global_step=None):
from tensorflow.core.framework import summary_pb2 # pylint:disable=import-error,no-name-in-module
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
x = self._calculate_x_value(global_step)
for value in summary.value:
field = value.WhichOneof('value')
if field == 'simple_value':
self._send_numeric_value(summary_writer, value.tag, x, value.simple_value)
elif field == 'image':
self._send_image(summary_writer, value.tag, x, value.image.encoded_image_string)
def add_graph_def(self, graph_def, logdir):
writer = self.get_writer_name(logdir)
if writer in list(self._summary_writer_to_graph_id.keys()):
graph_id = self._summary_writer_to_graph_id[writer]
graph = TensorflowGraph(graph_id, str(graph_def))
else:
graph_id = str(uuid.uuid4())
self._summary_writer_to_graph_id[writer] = graph_id
graph = TensorflowGraph(graph_id, str(graph_def))
self._api_service.put_tensorflow_graph(self._job.id, graph)
def _send_numeric_value(self, summary_writer, value_tag, x, simple_value):
channel = self._get_channel(summary_writer, value_tag, ChannelType.NUMERIC)
channel.send(x, simple_value)
def _send_image(self, summary_writer, image_tag, x, encoded_image):
channel = self._get_channel(summary_writer, image_tag, ChannelType.IMAGE)
pil_image = PIL.Image.open(io.BytesIO(encoded_image))
image_desc = "({}. Step {})".format(image_tag, x)
channel.send(x=x, y=Image(name=image_desc, description=image_desc, data=pil_image))
def _get_channel(self, summary_writer, value_tag, channel_type):
# pylint: disable=protected-access
writer_name = self.get_writer_name(summary_writer.get_logdir())
channel_name = '{}_{}'.format(writer_name, value_tag)
return self._job.create_channel(channel_name, channel_type)
@staticmethod
def get_writer_name(log_dir):
return os.path.basename(os.path.normpath(log_dir))
@staticmethod
def _calculate_x_value(global_step):
if global_step is not None:
return int(global_step)
else:
return time.time()
def _create_neptune_add_summary_wrapper(tensorflow_integrator, _add_summary_method):
def _neptune_add_summary(summary_writer, summary, global_step=None, *args, **kwargs):
tensorflow_integrator.add_summary(summary_writer, summary, global_step)
_add_summary_method(summary_writer, summary, global_step, *args, **kwargs)
return _neptune_add_summary
def integrate_with_tensorflow(job):
_LOGGER.info("==== Integrating Tensorflow with Neptune ====")
global _tensorflow_integrator # pylint:disable=global-statement
if _tensorflow_integrator:
return _tensorflow_integrator
print('neptune: Integrating with tensorflow...')
_tensorflow_integrator = _integrate_with_tensorflow(job)
return _tensorflow_integrator
def _integrate_with_tensorflow(job):
try:
import tensorflow
except ImportError:
raise NeptuneException('Requested integration with tensorflow while '
'tensorflow is not installed.')
# pylint: disable=no-member, protected-access, no-name-in-module, import-error
tensorflow_integrator = TensorflowIntegrator(job, job._api_service)
version = LooseVersion(tensorflow.__version__)
if LooseVersion('0.11.0') <= version < LooseVersion('0.12.0'):
_add_summary_method = tensorflow.train.SummaryWriter.add_summary
_add_graph_def_method = tensorflow.train.SummaryWriter._add_graph_def
tensorflow.train.SummaryWriter.add_summary = \
_create_neptune_add_summary_wrapper(tensorflow_integrator, _add_summary_method)
tensorflow.train.SummaryWriter._add_graph_def = \
_create_neptune_add_graph_def_wrapper(tensorflow_integrator, _add_graph_def_method)
elif (LooseVersion('0.12.0') <= version < LooseVersion('0.13.0')) or (
LooseVersion('1.0.0') <= version):
_add_summary_method = tensorflow.summary.FileWriter.add_summary
_add_graph_def_method = tensorflow.summary.FileWriter._add_graph_def
tensorflow.summary.FileWriter.add_summary = \
_create_neptune_add_summary_wrapper(tensorflow_integrator, _add_summary_method)
tensorflow.summary.FileWriter._add_graph_def = \
_create_neptune_add_graph_def_wrapper(tensorflow_integrator, _add_graph_def_method)
else:
raise NeptuneException("Tensorflow version {} is not supported.".format(version))
return tensorflow_integrator
def _create_neptune_add_graph_def_wrapper(tensorflow_integrator, _add_graph_def_method):
def _neptune_add_graph_def(summary_writer, graph_def, global_step=None, *args, **kwargs):
try:
from tensorflow.tensorboard.backend import process_graph # pylint:disable=import-error
except ImportError:
from tensorboard.backend import process_graph # pylint:disable=import-error
# Restricting graph only to UI relevant information.
process_graph.prepare_graph_for_ui(graph_def)
# pylint: disable=protected-access
tensorflow_integrator.add_graph_def(graph_def, summary_writer.get_logdir())
_add_graph_def_method(summary_writer, graph_def, global_step, *args, **kwargs)
return _neptune_add_graph_def
def integrate_with_keras(job, verbose=True):
global _integrated_with_keras # pylint:disable=global-statement
if _integrated_with_keras:
return
if verbose:
print('neptune: Integrating with keras...')
_integrate_with_keras(job, verbose=verbose)
_integrated_with_keras = True
if verbose:
print('neptune: Done.\n')
def _integrate_with_keras(job, verbose=True):
try:
import keras
except ImportError:
raise NeptuneException('Requested integration with keras while keras is not installed.')
from keras.callbacks import BaseLogger, Callback # pylint:disable=import-error
class NeptuneLogger(Callback):
def __init__(self, job, verbose):
super(NeptuneLogger, self).__init__()
self.job = job
self.verbose = verbose
def on_batch_end(self, batch, logs=None): # pylint:disable=unused-argument
if logs is None:
return
for metric, value in logs.items():
if metric in ('batch', 'size'):
continue
name = 'keras_on_batch_end_' + metric
self.job.create_channel(name, ChannelType.NUMERIC).send(value)
def on_epoch_end(self, epoch, logs=None): # pylint:disable=unused-argument
if logs is None:
return
for metric, value in logs.items():
if metric in ('epoch', 'size'):
continue
name = 'keras_on_epoch_end_' + metric
self.job.create_channel(name, ChannelType.NUMERIC).send(value)
class KerasAggregateCallback(Callback):
def __init__(self, *callbacks):
super(KerasAggregateCallback, self).__init__()
self.callbacks = callbacks
def set_params(self, params):
for callback in self.callbacks:
callback.params = params
def set_model(self, model):
for callback in self.callbacks:
callback.model = model
def on_epoch_begin(self, epoch, logs=None):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs=logs)
def on_batch_end(self, batch, logs=None):
for callback in self.callbacks:
callback.on_batch_end(batch, logs=logs)
def on_epoch_end(self, epoch, logs=None):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs=logs)
def monkey_patched_BaseLogger(*args, **kwargs):
return KerasAggregateCallback(BaseLogger(*args, **kwargs), NeptuneLogger(job, verbose))
keras.callbacks.BaseLogger = monkey_patched_BaseLogger
```
#### File: client_library/threads/action_executor.py
```python
from future.builtins import object, str
import logging
import threading
import traceback
from multiprocessing.pool import ThreadPool
from neptune.internal.common.utils.str import to_unicode
class ActionExecutor(object):
def __init__(self, thread_pool_size, experiment_id, api_service, actions_map,
done_event, shutdown_event):
self._thread_pool = ThreadPool(thread_pool_size)
self._experiment_id = experiment_id
self._api_service = api_service
self._actions_map = actions_map
self._logger = logging.getLogger(__name__)
self._done_event = done_event
self._shutdown_event = shutdown_event
self._running_action_count = 0
self._running_actions_lock = threading.Lock()
done_event.set()
def execute_action(self, action_invocation):
def execute_action(action_invocation_info):
self._logger.debug("Invoking action: " + str(action_invocation_info))
try:
action_handler = self._actions_map[action_invocation_info.action_id].handler
result = action_handler(action_invocation_info.argument)
result = "" if result is None else to_unicode(result)
self._api_service.mark_action_invocation_succeeded(
self._experiment_id,
action_invocation_info.action_id,
action_invocation_info.action_invocation_id,
result)
except BaseException:
exception_traceback = "\n".join(to_unicode(traceback.format_exc()).splitlines())
self._api_service.mark_action_invocation_failed(
self._experiment_id,
action_invocation_info.action_id,
action_invocation_info.action_invocation_id,
exception_traceback)
finally:
self._execution_finished()
if self._shutdown_event.is_set():
self._logger.debug("Got action to invoke, but experiment is shutting down!")
else:
self._execution_started()
return self._thread_pool.apply_async(func=execute_action, args=(action_invocation,))
def _execution_finished(self):
self._running_actions_lock.acquire()
self._running_action_count -= 1
if self._running_action_count == 0:
self._done_event.set()
self._running_actions_lock.release()
def _execution_started(self):
self._running_actions_lock.acquire()
self._done_event.clear()
self._running_action_count += 1
self._running_actions_lock.release()
def allow_no_more_actions(self):
self._thread_pool.close()
def wait_for_running_actions(self):
self._thread_pool.join()
class ActionInvocationInfo(object):
def __init__(self, action_id, action_invocation_id, argument):
self.action_id = action_id
self.action_invocation_id = action_invocation_id
self.argument = argument
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
else:
return False
def __unicode__(self):
return u"action_id: {}, action_invocation_id: {}, argument: {}".format(
self.action_id, self.action_invocation_id, self.argument)
```
#### File: cli/processes/aborting.py
```python
from future.builtins import object
import logging
import psutil
class Aborting(object):
def __init__(self, pid):
self._pid = pid
self._logger = logging.getLogger(__name__)
def abort(self, kill_timeout=5):
process = None
try:
process = psutil.Process(self._pid)
except psutil.NoSuchProcess:
self._logger.debug('Received an abort message, but the job is already finished.')
if process is not None:
processes = self._get_processes(process)
for p in processes:
self._abort(p)
_, alive = psutil.wait_procs(processes, timeout=kill_timeout)
for p in alive:
self._kill(p)
@staticmethod
def _get_processes(process):
try:
return [process] + process.children(recursive=True)
except psutil.NoSuchProcess:
return []
def _abort(self, process):
try:
if process.is_running():
self._logger.debug("Sending SIGTERM to %s", process.pid)
process.terminate()
except psutil.NoSuchProcess:
self._logger.info("Process %s already finished...", process.pid)
def _kill(self, process):
for process in self._get_processes(process):
try:
if process.is_running():
self._logger.debug("Sending SIGKILL to %s", process.pid)
process.kill()
except psutil.NoSuchProcess:
self._logger.info("Process %s already finished...", process.pid)
```
#### File: cli/processes/job_spawner.py
```python
from __future__ import print_function
import collections
import logging
import os
import subprocess
import sys
from future.builtins import object, str
from future.utils import PY3
from neptune.internal.cli.commands.exceptions.enqueue_exceptions import NeptuneFailedToExecute
from neptune.internal.cli.processes import build_process_command, recognize_execution_command
from neptune.internal.cli.processes.aborting import Aborting
from neptune.internal.cli.threads.stream_redirecting_thread import (
ChannelHandler,
CroppingHandler,
FileHandler,
LogChannelHandler,
QueueHandler,
StreamRedirectingThread
)
from neptune.internal.common.utils.str import to_bytestring
from neptune.internal.common.utils.system import IS_WINDOWS
logger = logging.getLogger(__name__)
class JobSpawner(object):
def spawn(self,
command,
config=None,
cwd=None,
env=None,
memorized_stderr_line_count=100,
stdout_filepath=None,
stderr_filepath=None,
channel_factory=None,
redirect_output_to_console=True,
online=True):
cwd = cwd or os.getcwd()
env = env or os.environ
env = env.copy()
env['PYTHONIOENCODING'] = 'utf-8'
if 'PATH' not in env:
env['PATH'] = ''
if online:
env['NEPTUNE_ONLINE_CONTEXT'] = "yes"
if not PY3 and IS_WINDOWS:
# In Python 2.7, fetching non-string (non-bytes) env into subprocess.Popen results in
# `TypeError: environment can only contain strings.` error.
env = {to_bytestring(k): to_bytestring(v) for k, v in env.items()}
# FIXME: We should create these commands in unicode way before.
cmd = []
for arg in command:
if not isinstance(arg, str):
str(cmd.append(arg.decode('UTF-8')))
else:
cmd.append(arg)
else:
cmd = command
logger.debug("SPAWNING: %s", command)
try:
process = self._start_process(cmd=cmd, cwd=cwd, env=env)
except OSError as e:
raise NeptuneFailedToExecute(cmd, e)
stderr_buffer = collections.deque(maxlen=memorized_stderr_line_count)
stdout_handlers = []
stderr_handlers = [CroppingHandler(QueueHandler(stderr_buffer))]
if stdout_filepath:
try:
stdout_handlers.append(FileHandler(stdout_filepath))
except IOError:
print(u'Could not create {}'.format(stdout_filepath))
if stderr_filepath:
try:
stderr_handlers.append(FileHandler(stderr_filepath))
except IOError:
print(u'Could not create {}'.format(stderr_filepath))
if channel_factory and config:
log_channels = dict()
for log_channel in config.log_channels:
log_channels[log_channel.prefix] = channel_factory.create(log_channel.name)
if log_channels:
stdout_handlers.append(LogChannelHandler(log_channels))
stderr_handlers.append(LogChannelHandler(log_channels))
if config.stdout_channel:
stdout_handlers.append(ChannelHandler(channel_factory.get_or_create_stdout_channel()))
if config.stderr_channel:
stderr_handlers.append(ChannelHandler(channel_factory.get_or_create_stderr_channel()))
stdout_thread = StreamRedirectingThread(
input_stream=process.stdout,
handlers=stdout_handlers,
target_stream=sys.stdout if redirect_output_to_console else None)
stderr_thread = StreamRedirectingThread(
input_stream=process.stderr,
handlers=stderr_handlers,
target_stream=sys.stderr if redirect_output_to_console else None)
return RunningJob(process, stdout_thread, stderr_thread, stderr_buffer)
def _start_process(self, cmd, env, cwd):
logger.info('Starting process')
logger.info('Command: ' + str(cmd))
logger.info('OS environment variables staring with \'NEPTUNE\': ' +
'; '.join([env_name + ": " + str(env_value)
for env_name, env_value in env.items()
if env_name.startswith('NEPTUNE')]))
return subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1)
@classmethod
def execute(cls, script, params=None, additional_env=None, **kwargs):
language = recognize_execution_command(script)
command = build_process_command(language, script, [] if params is None else params)
env = os.environ.copy()
if additional_env is not None:
env.update(additional_env)
experiment = cls().spawn(command=command, env=env, online=False, **kwargs)
return experiment.wait_for_finish()
class RunningJob(object):
def __init__(self,
process,
stdout_thread,
stderr_thread,
stderr_buffer):
self.process = process
self._stderr_buffer = stderr_buffer
self._stdout_thread = stdout_thread
self._stderr_thread = stderr_thread
self._stdout_thread.start()
self._stderr_thread.start()
def wait_for_finish(self):
"""
This method waits for experiment execution and for threads that are responsible for a realtime
output redirection from the experiment to sys.stdout and stdout/stderr files.
:return: Return code from the experiment process.
"""
while True:
try:
return_code = self.process.wait()
self._cleanup()
return return_code
except KeyboardInterrupt:
logger.debug("Received SIGINT. Killing subprocess.")
self.process.kill()
def abort(self):
Aborting(self.process.pid).abort()
self._cleanup()
def _cleanup(self):
# Don't interrupt stdout/stderr handlers here.
# They should terminate right after they finish
# processing buffered subprocess' output.
# Better just join and wait for them to terminate.
logger.debug("Waiting for stream redirecting threads to terminate.")
self._stdout_thread.join()
self._stderr_thread.join()
def memorized_stderr(self):
joined_stderr = '\n'.join(self._stderr_buffer)
return joined_stderr + '\n' if joined_stderr else joined_stderr
```
#### File: internal/cli/signal_handlers.py
```python
import signal
import sys
from neptune.internal.common.utils.system import IS_WINDOWS
_is_being_handled = False
ABORT_EXIT_CODE = 10
def setup_signal_handlers(command):
def sigint_handler(*_):
''' SIGINT handler that calls command.abort exactly one time. '''
global _is_being_handled # pylint:disable=global-statement
if not _is_being_handled:
_is_being_handled = True
command.abort()
sys.exit(ABORT_EXIT_CODE)
signal.signal(signal.SIGINT, sigint_handler)
if IS_WINDOWS:
signal.signal(signal.SIGBREAK, sigint_handler) # pylint:disable=no-member
def setup_subprocess_signal_handlers():
"""
Ignore sigint (sigbreak on Win) to avoid subprocesses to get SIGINT signal "at the same time" as parent process.
Parent command signal handlers are responsible to finish subprocesses.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if IS_WINDOWS:
signal.signal(signal.SIGBREAK, signal.SIG_IGN) # pylint:disable=no-member
```
#### File: api/neptune_api/handler.py
```python
import os
import requests
from neptune import version
from neptune.generated.analytics.swagger_client.api_client import ApiClient as AnalyticsApiClient
from neptune.generated.analytics.swagger_client.apis.analyticscontroller_api import AnalyticscontrollerApi
from neptune.generated.swagger_client.api_client import ApiClient
from neptune.generated.swagger_client.apis.default_api import DefaultApi
from neptune.internal.common.api.neptune_api.neptune_oauth2_session import NeptuneOAuth2Session
from neptune.internal.common.api.tokens import CompositeToken
from neptune.internal.common.api.utils import (
WithLoggedExceptions,
WithWrappedExceptions,
REQUESTS_TIMEOUT, WithWarningHandler)
def create_neptune_api_handler(base_api_handler):
handler = WithLoggedExceptions(
WithWrappedExceptions(
WithWarningHandler(base_api_handler)
),
skipped_error_codes={
'create_experiment': [400],
'update_experiment': [400],
'create_project': [400]
})
return handler
def create_requests_client(rest_api_url, offline_token_storage_service):
if 'http://' in rest_api_url:
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
token = offline_token_storage_service.load()
if token:
refresh_kwargs = {'client_id': token.refresh_token.client_name}
def save_raw_token(raw_json_token):
offline_token_storage_service.save(CompositeToken.from_json(raw_json_token))
return NeptuneOAuth2Session(
client_id=token.refresh_token.client_name,
token=token.raw_with_expires_at,
auto_refresh_url=token.refresh_token.refresh_url,
auto_refresh_kwargs=refresh_kwargs,
token_updater=save_raw_token
)
else:
return requests.Session()
def create_base_neptune_api_handler(rest_api_url, offline_token_storage_service):
requests_client = create_requests_client(
rest_api_url, offline_token_storage_service)
api_client = ApiClient(requests_client=requests_client,
host=rest_api_url,
headers={'X-Neptune-CliVersion': version.__version__},
timeout=REQUESTS_TIMEOUT)
return DefaultApi(api_client), requests_client
def create_base_neptune_api_handler_without_auth(rest_api_url):
return DefaultApi(ApiClient(requests_client=requests.Session(), host=rest_api_url))
def create_analytics_api_handler(rest_api_url, offline_token_storage_service):
requests_client = create_requests_client(
rest_api_url, offline_token_storage_service)
return AnalyticscontrollerApi(AnalyticsApiClient(requests_client=requests_client, host=rest_api_url))
```
#### File: common/config/validation_rules.py
```python
from neptune.generated.swagger_client.models import ParameterTypeEnum as PTE
from neptune.internal.cli import MLFramework
from neptune.internal.common.values import Tag
UNKNOWN_ML_FRAMEWORK = u"Invalid ml framework '{}'. Must be one of {}."
_PARAM_TYPE_MISMATCH = ("Invalid value '{value}' for parameter '{name}'. "
"Type declared in config is '{type}'.")
_INVALID_TYPE = "Invalid type '{type}' declared for parameter '{name}'."
def validate_parameters(value, rule_obj, path): # pylint:disable=unused-argument
parameters = value
if not isinstance(parameters, dict):
raise AssertionError(u"Invalid structure of the parameters' section in the config file.")
return True
def validate_tags(value, rule_obj, path): # pylint:disable=unused-argument
tags = value
for tag in tags:
try:
Tag.create_from(tag)
except ValueError as error:
raise AssertionError(str(error))
return True
def validate_ml_framework(value, rule_obj, path): # pylint:disable=unused-argument
legal_frameworks = MLFramework.__members__.values() # pylint:disable=no-member
framework = value
if framework not in legal_frameworks:
raise AssertionError(
UNKNOWN_ML_FRAMEWORK.format(framework, ', '.join(legal_frameworks)))
return True
def guess_swagger_type(value):
if isinstance(value, int):
return PTE.double
elif isinstance(value, float):
return PTE.double
elif isinstance(value, dict):
return PTE.double
elif isinstance(value, list):
return _guess_list_swagger_type(value)
else:
return PTE.string
def _guess_list_swagger_type(value):
value_types = set()
for v in value:
value_types.add(guess_swagger_type(v))
return _calculate_type(value_types)
def _calculate_type(value_types):
if value_types == {PTE.double}:
return PTE.double
return PTE.string
def _guess_dict_swagger_type(value):
if 'ranges' in value:
return PTE.double
def _is_valid(param_type, rule_obj):
# pylint:disable=protected-access
return param_type in rule_obj._schema_str['map']['type']['enum']
```
#### File: common/config/yaml_utils.py
```python
import yaml
from neptune.internal.common import NeptuneException
class SimpleYamlEditor(object):
def __init__(self):
self._content = ''
def set(self, key, value):
start, end = self.find(key)
key_value_str = u'{key}: {value}'.format(key=key, value=value)
if start and end:
self._content = self.replace(start, end, key_value_str)
else:
self._content = self._content + u'\n' + key_value_str + u'\n'
try:
yaml.load(self._content)
except:
raise NeptuneException(u'Wrong yaml content')
def set_content(self, content):
self._content = content
def get_content(self):
return self._content
def read(self, path):
with open(path) as infile:
self._content = infile.read()
def write(self, path):
with open(path, 'w+') as outfile:
outfile.write(self._content)
def find(self, key):
start = None
for token in yaml.scan(self._content):
if isinstance(token, yaml.ScalarToken) and start:
end = (token.end_mark.line, token.end_mark.column)
return start, end
if isinstance(token, yaml.ScalarToken) and token.value == key:
start = (token.start_mark.line, token.start_mark.column)
return None, None
def replace(self, start_pos, end_pos, text):
ret = u''
start_line, start_column = start_pos
end_line, end_column = end_pos
for i, line in enumerate(self._content.splitlines()):
if i < start_line or i > end_line:
ret += line
ret += u'\n'
if i == start_line:
ret += line[0:start_column]
ret += text
if i == end_line:
ret += line[end_column:]
ret += u'\n'
return ret
```
#### File: common/models/key_value_property_param.py
```python
from future.builtins import object
from neptune.generated.swagger_client import KeyValueProperty
from neptune.internal.common.utils.str import to_bytestring, to_unicode
class KeyValuePropertyParam(object):
def __init__(self, key, value):
self.key = to_unicode(key)
self.value = to_unicode(value)
def to_swagger_key_value_property(self):
key_value_property = KeyValueProperty(self.key, self.value)
return key_value_property
def to_dict(self):
return {self.key: self.value}
def __repr__(self):
return u"KeyValuePropertyParam({}, {})".format(self.key, self.value)
def __unicode__(self):
return u"{}:{}".format(self.key, self.value)
def __str__(self):
return to_bytestring(self.__unicode__())
def __eq__(self, other):
return isinstance(other, type(self)) and other.key == self.key and other.value == self.value
```
#### File: common/models/rich_input_channel_values.py
```python
from future.builtins import str
from neptune.generated.swagger_client import InputChannelValues
class RichInputChannelValues(InputChannelValues):
# pylint:disable=super-init-not-called
def __init__(self, input_channel_values):
self.__dict__.update(vars(input_channel_values))
def __eq__(self, other):
if isinstance(other, RichInputChannelValues):
return (self.channel_id == other.channel_id) and (self.values == other.values)
else:
return NotImplemented
def __hash__(self):
return hash(str(self.channel_id) + str(self.values))
```
#### File: common/parsers/extended_argparse_parser.py
```python
from __future__ import print_function
import argparse
import sys
from neptune.internal.common.parsers.common_parameters_configurator import CommonParametersConfigurator
from neptune.internal.common.parsers.type_mapper import TypeMapper
class ExtendedArgparseParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
self._public_subcommands = kwargs.pop('public_subcommands', None)
super(ExtendedArgparseParser, self).__init__(*args, **kwargs)
self.register('type', bool, TypeMapper.to_bool)
def error(self, message):
print(u'Error: {}\n'.format(message))
self.print_help()
sys.exit(2)
def _check_value(self, action, value):
if action.choices is not None and value not in action.choices:
possibilities = self._public_subcommands or action.choices
raise argparse.ArgumentError(
action, 'invalid choice: {} (choose from [{}])'.format(value, ', '.join(possibilities)))
def format_help(self):
"""
This method is required, so that Global Options end up at the end of help.
Without it, Subcommands are placed after Global Options.
"""
global_options_list = [
ag for ag in self._action_groups if ag.title == CommonParametersConfigurator.GLOBAL_OPTIONS_GROUP]
rest = [ag for ag in self._action_groups if ag.title != CommonParametersConfigurator.GLOBAL_OPTIONS_GROUP]
self._action_groups = rest + global_options_list
return super(ExtendedArgparseParser, self).format_help()
```
#### File: common/parsers/tracked_parameter_validations.py
```python
import functools
import re
from collections import Counter
from past.builtins import basestring
from neptune.internal.common.parsers.tracked_parameter_parser import GridSearchRange, GridSearchArray
_LEADING_WHITESPACES_REGEX = re.compile(r'^\s+.*$')
_TRAILING_WHITESPACES_REGEX = re.compile(r'^.*\s+$')
class TrackedParameterValidationError(ValueError):
def __init__(self, *args, **kwargs):
super(TrackedParameterValidationError, self).__init__(*args, **kwargs)
def validate_tracked_parameters(tracked_parameters):
for tracked_parameter in tracked_parameters:
validate_tracked_parameter(tracked_parameter)
_check_display_names_are_not_duplicated(tracked_parameters)
return tracked_parameters
def validate_tracked_parameter(tracked_parameter):
_check_name_is_defined(tracked_parameter)
_check_name_has_no_leading_or_trailing_whitespaces(tracked_parameter)
_check_value_is_string_or_grid_search_value(tracked_parameter)
if isinstance(tracked_parameter.value, GridSearchRange):
_check_grid_search_range(tracked_parameter)
elif isinstance(tracked_parameter.value, GridSearchArray):
_check_grid_search_array(tracked_parameter)
return tracked_parameter
def _check_name_is_defined(tracked_parameter):
if not tracked_parameter.display_name():
raise TrackedParameterValidationError(
u'Parameter with value: "{}" does not have a name!'.format(str(tracked_parameter.value)))
def _check_name_has_no_leading_or_trailing_whitespaces(tracked_parameter):
if tracked_parameter.name is not None and _contains_leading_or_trailing_whitespaces(tracked_parameter.name):
raise TrackedParameterValidationError(
u'Parameter "{}" contains leading or trailing whitespaces!'.format(tracked_parameter.name))
if tracked_parameter.api_name is not None and _contains_leading_or_trailing_whitespaces(tracked_parameter.api_name):
raise TrackedParameterValidationError(
u'Parameter "{}" contains leading or trailing whitespaces!'.format(tracked_parameter.api_name))
def _contains_leading_or_trailing_whitespaces(s):
return _LEADING_WHITESPACES_REGEX.match(s) or _TRAILING_WHITESPACES_REGEX.match(s)
def _check_value_is_string_or_grid_search_value(tracked_parameter):
if tracked_parameter.value is None:
raise TrackedParameterValidationError(u'Parameter "{}" has no value!'.format(tracked_parameter.display_name()))
allowed_types = [basestring, GridSearchArray, GridSearchRange]
value_of_allowed_type = functools.reduce(
lambda x, y: x or y,
[isinstance(tracked_parameter.value, allowed_type) for allowed_type in allowed_types]
)
if not value_of_allowed_type:
raise TrackedParameterValidationError(
u'Parameter "{}" has invalid value type ({})!'. \
format(tracked_parameter.display_name(), type(tracked_parameter.value)))
def _check_grid_search_range(tracked_parameter):
if float(tracked_parameter.value.start) > float(tracked_parameter.value.end):
raise TrackedParameterValidationError(
u'Grid search parameter "{}" contains an invalid range (`from` > `to`)!'. \
format(tracked_parameter.display_name()))
if float(tracked_parameter.value.step) <= 0:
raise TrackedParameterValidationError(
u'Grid search parameter "{}" contains an invalid range (`step <= 0`)!'. \
format(tracked_parameter.display_name()))
def _check_grid_search_array(tracked_parameter):
if not tracked_parameter.value.values:
raise TrackedParameterValidationError(
u'Grid search parameter "{}" is empty!'.format(tracked_parameter.display_name())
)
def _check_display_names_are_not_duplicated(tracked_parameters):
display_names = [tracked_parameter.display_name() for tracked_parameter in tracked_parameters]
duplicated_display_names = [
u'"{}"'.format(display_name)
for display_name, occurence_count
in dict(Counter(display_names)).items()
if occurence_count > 1
]
if duplicated_display_names:
raise TrackedParameterValidationError(
u'Duplicated parameter names: {}!'.format(u', '.join(duplicated_display_names))
)
```
#### File: common/utils/data_utils.py
```python
from future.utils import iteritems
import re
def decamelize_keys(dct):
return update_dict_keys(dct, decamelize)
def update_dict_keys(obj, function):
if isinstance(obj, dict):
return {function(k): update_dict_keys(v, function) for k, v in iteritems(obj)}
else:
return obj
def decamelize(string, joiner='_'):
"""Convert a *CamelCase* `string` to a *lower_case* string.
- The underscores can be changed to a custom `joiner` string.
"""
def replace(match):
prefix = ( # Don't prepend the joiner to the beginning of string
'' if match.group(2) else joiner
)
caps = match.group(1).lower()
follower = match.group(3)
if not follower:
return prefix + caps
if len(caps) == 1:
return prefix + caps + follower
return prefix + caps[:-1] + joiner + caps[-1] + follower
return re.sub('((^[A-Z]+)|[A-Z]+)([a-z])?', replace, string)
```
#### File: common/utils/memory_units.py
```python
from __future__ import division
from collections import Mapping, Set, deque
from numbers import Number
import sys
from past.builtins import basestring
from past.utils import old_div
BYTES_IN_ONE_MB = 2 ** 20
BYTES_IN_ONE_GB = 2 ** 30
def bytes_to_megabytes(number):
return old_div(float(number), BYTES_IN_ONE_MB)
def get_object_size(obj):
# from http://stackoverflow.com/a/30316760
try: # Python 2
zero_depth_bases = (basestring, Number, xrange, bytearray)
iteritems = 'iteritems'
except NameError: # Python 3
zero_depth_bases = (str, bytes, Number, range, bytearray)
iteritems = 'items'
def getsize(obj_0):
"""Recursively iterate to sum size of object & members."""
def inner(obj, _seen_ids=None):
if _seen_ids is None:
_seen_ids = set()
obj_id = id(obj)
if obj_id in _seen_ids:
return 0
_seen_ids.add(obj_id)
size = sys.getsizeof(obj)
if isinstance(obj, zero_depth_bases):
pass # bypass remaining control flow and return
elif isinstance(obj, (tuple, list, Set, deque)):
size += sum(inner(i) for i in obj)
elif isinstance(obj, Mapping) or hasattr(obj, iteritems):
size += sum(inner(k) + inner(v) for k, v in getattr(obj, iteritems)())
# Check for custom object instances - may subclass above too
if hasattr(obj, '__dict__'):
size += inner(vars(obj))
if hasattr(obj, '__slots__'): # can have __slots__ with __dict__
size += sum(inner(getattr(obj, s)) for s in obj.__slots__ if hasattr(obj, s))
return size
return inner(obj_0)
return getsize(obj)
def human_readable(num_bytes, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num_bytes) < 1024.0:
return "%3.1f%s%s" % (num_bytes, unit, suffix)
num_bytes /= 1024.0
return "%.1f%s%s" % (num_bytes, 'Yi', suffix)
```
#### File: common/utils/version_utils.py
```python
import re
from neptune import server
def cli_major_version():
matched_major_version = re.search(r'(\d+\.\d+)\..*', server.__version__)
if matched_major_version:
return matched_major_version.group(1)
else:
raise Exception('Wrong CLI version {}'.format(server.__version__))
```
#### File: jiji-online/neptune-cli/setup.py
```python
import os
import sys
from setuptools import find_packages, setup
from setup_utils import migrate_tokens, migrate_to_profile
def main():
sys.path.append('neptune')
from version import __version__ # nopep8
root_dir = os.path.dirname(__file__)
with open(os.path.join(root_dir, "requirements.txt")) as f:
requirements = [r.strip() for r in f]
setup(
name='neptune-cli',
version=__version__,
description='Neptune client library',
author='deepsense.ai',
author_email='<EMAIL>',
url='https://neptune.ml/',
long_description="""\
Neptune client library
""",
license='Apache License 2.0',
install_requires=requirements,
packages=find_packages(
include=['neptune*', 'deepsense*'],
exclude=['neptune.generated.test']),
py_modules=['setup_utils'],
package_data={
'neptune.internal.cli.job_config': ['resources/*.yaml'],
'neptune.internal.common.api': ['resources/*.json'],
'neptune.internal.common.config': ['resources/*.ini', 'resources/*.yaml']
},
scripts=['scripts/neptune', 'scripts/neptune.bat']
)
migrate_tokens()
migrate_to_profile()
if __name__ == "__main__":
main()
``` |
{
"source": "JiJiU33C43I/UCI-Schedule-Assistant",
"score": 2
} |
#### File: App/src/course_data_decoder.py
```python
from course import Course
from derived_class import DerivedClass
#=======================================
#== GLOBAL CONSTANTS ==
#=======================================
PRIMARYCLASS_TYPE = ('ACT', 'COL', 'FLD', 'LEC', 'QIZ', 'RES', 'SEM', 'STU', 'TAP', 'TUT');
SECONDARYCLASS_TYPE = ('DIS', 'LAB');
DEBUGGING = False;
#=======================================
#== Source Code ==
#=======================================
class DecodeCourseError(Exception):
pass;
class CourseDecoder:
def __init__(self, quarter, course_data):
self.course_obj_list = [];
self.quarter = quarter;
self.generate_course_lists(course_data);
def __len__(self):
return len(self.course_obj_list);
def __iter__(self):
for course in self.course_obj_list:
yield course;
@staticmethod
def secondary_class_exists(derived_class_lst: list):
''' The Algorithm of this function might seem WEIRD and UNREASONABLE at the beginning.
1. If there are "primary classes" detected, then there may be possibility of secondary class exists -> Function returns True
2. If all derived classes are "secondary class", then "secondary class" are actually interpreted as primary class for this course,
then we can deduce that there is NOT ANY secondary class -> Function returns False
'''
for derived_class in derived_class_lst:
if derived_class["Type"].upper() in PRIMARYCLASS_TYPE:
return True;
return False;
@staticmethod
def decode_one_course(quarter, course:dict) -> Course:
'''
course = { 'coursename': 'xxx', 'formalname':'xxx', '_derived_classes': [dict] }
'''
current_course = Course(quarter, course['coursename'], course['formalname']);
current_primary_class = None;
if CourseDecoder.secondary_class_exists(course['_derived_classes']):
#print("secondary classes exist")
for derived_class in course['_derived_classes']:
if derived_class['Type'].upper() in PRIMARYCLASS_TYPE:
try:
current_primary_class = DerivedClass(current_course, **derived_class);
current_course.add(current_primary_class);
except Exception as E:
if DEBUGGING: raise E;
continue;
elif derived_class['Type'].upper() in SECONDARYCLASS_TYPE:
try:
current_secondary_class = DerivedClass(current_course, **derived_class);
current_primary_class.add(current_secondary_class);
except Exception as E:
if DEBUGGING: raise E;
continue;
else:
raise DecodeCourseError('Unrecognized Class Type : class with type {} in course {}??'.format(derived_class['Type'], current_course.name()));
else:
for derived_class in course['_derived_classes']:
current_course.add(DerivedClass(current_course, **derived_class));
return current_course;
def generate_course_lists(self, course_data):
if course_data != None:
for course in course_data:
current_course = CourseDecoder.decode_one_course(self.quarter, course);
self.course_obj_list.append(current_course);
def get_course_lists(self):
return self.course_obj_list;
#=======================================
#== DEBUGGING AND TESTING ==
#=======================================
import web_scrape_engine
if __name__ == '__main__':
user_input_dict = {"YearTerm":"2019-14", "Dept":"CHEM"}
# You Might change/alter/add to the ^user_input_dict^ for the purpose of further testing
engine = web_scrape_engine.web_scrape_engine(user_input_dict);
course_data = engine.extract_data();
CD = CourseDecoder(user_input_dict["YearTerm"], course_data);
print("\n\n",course_data, "\n\n")
print(f'---------------------- START DECODING ----------------------')
for course in CD:
print(course);
for primary_class in course:
print('\tprimary:', primary_class);
for secondary_class in primary_class:
print('\t\tsecondary:', secondary_class);
print(f'-------------------- DECODED {len(CD)} COURSES --------------------');
```
#### File: App/src/course.py
```python
class InvalidCourseException(Exception):
pass;
class InvalidOperandforCourse(Exception):
pass;
class Course:
def __init__(self, quarter, coursename, formalname = None):
self.set_coursename(coursename);
self.set_formalname(formalname);
self.set_quarter(quarter);
self._derived_classes = list();
def __str__(self):
name = self.name()
return f"{self.quarter()} {name[0]}: {name[1]}";
def __iter__(self):
for classes in self._derived_classes:
yield classes;
def __getitem__(self, item:int):
if type(item) != int:
raise KeyError("'Course' object can only accept integer indexing")
else:
index = 0;
for classes in self:
if index == item:
return classes;
else:
index+= 1;
return None;
def __len__(self):
return len(self._derived_classes);
def __eq__(self, right):
if type(right) != Course:
raise InvalidOperandforCourse("== operators only works when both sides are of type 'Course'")
else:
if self.name() == right.name() and len(self) == len(right):
index = 0;
for i in range(len(self)):
if self[i] != right[i]:
return False
i += 1;
return True;
else:
return False
def __ne__(self, right):
return not self.__eq__(right)
def add(self, value):
if not isinstance(value, Course):
raise InvalidCourseException(f"{type(self)}.add(self, {value}): \
ONLY Course instance object can be added to the derived class list; \
argument = {value}");
self._derived_classes.append(value);
def set_coursename(self, coursename:str):
self._coursename = (''.join(coursename.split()));
def set_formalname(self, formalname:str):
self._formalname = formalname;
def name(self):
return (self._coursename, self._formalname);
def set_quarter(self, quarter:str):
self._quarter = quarter;
def quarter(self):
return self._quarter;
```
#### File: src/gui_src/Pages.py
```python
import tkinter as tk
import GuiWidgets as W;
import pathlib
from os import sys, path
sys.path.append(path.dirname(path.dirname(__file__)))
import smtp_engine
import web_scrape_engine
import update_course_data
import course_data_decoder
#=======================================
#== GLOBAL CONSTANTS ==
#=======================================
DEBUGGING = True;
CURR_WORKING_DIR = pathlib.Path.cwd();
def get_current_os():
operating_systems = {'linux1':'Linux', 'linux2':'Linux', 'darwin':'OS X', 'win32':'Windows'}
if sys.platform not in operating_systems:
return sys.platform;
else:
return operating_systems[sys.platform];
CURR_OPERATING_SYSTEM = get_current_os();
#=======================================
#== Source Code ==
#=======================================
class Pages:
class PageHasInvalidSize(Exception):
pass;
class SendMailMustReceiveTextAsArguments(Exception):
pass;
class SearchFailure(Exception):
pass;
ALL_STICK = tk.N + tk.S + tk.W + tk.E;
def __init__(self, MainFrame, account_name, account_password):
self.MainFrame = MainFrame;
w,h = self.page_size();
self.PageFrame = W.Frame(self.MainFrame, w, h);
self.PageFrame.pack();
self.account_name = account_name;
self.account_password = <PASSWORD>;
self._switch_page = False;
def send_email(self, acc_name, acc_pw, *args):
email_engine = smtp_engine.Email_Engine();
email_engine.setup_tls_connection();
email_engine.login((acc_name,acc_pw));
for msg in args:
if type(msg) != str:
raise self.SendMailMustReceiveTextAsArguments("SendMail Receive Non-Str Arguments: {} whose type is {}".format(msg, type(msg)));
else:
email_engine.sendmail(acc_name, acc_name, msg);
email_engine.__del__();
def scrape_course_data(self, term_curr = "", dept_curr = "", code_curr = ""):
user_input_dict = {"YearTerm":self.search_fields["YearTerm"][term_curr],
"Dept":self.search_fields["Dept"][dept_curr],
"CourseCodes":code_curr};
try:
engine = web_scrape_engine.web_scrape_engine(user_input_dict);
course_data = engine.extract_data();
#print(course_data)
if course_data == None:
raise self.SearchFailure("course_data = None");
return course_data_decoder.CourseDecoder(term_curr, course_data);
except:
raise self.SearchFailure("fatal error during search");
def switch(self):
return self._switch_page;
def page_size(self) -> tuple:
return ( self.MainFrame.cget("width") , self.MainFrame.cget("height") );
def get_account_info(self):
return (self.account_name, self.account_password);
def update(self):
self.PageFrame.update();
def destroy(self):
self.PageFrame.destroy();
```
#### File: App/src/scrape_search_fields.py
```python
import urllib.request
from bs4 import BeautifulSoup
from collections import defaultdict
import ssl
#=======================================
#== GLOBAL CONSTANTS ==
#=======================================
SCHEDULE_OF_CLASSES_URL = "https://www.reg.uci.edu/perl/WebSoc/";
SEARCH_FIELDS = ("YearTerm","Dept");
#=======================================
#== Source Code ==
#=======================================
class InvalidSelectTag(Exception):
pass;
class scrape_fields:
default_url = SCHEDULE_OF_CLASSES_URL;
default_search_fields = SEARCH_FIELDS;
def __init__(self, url = default_url, search_fields = default_search_fields):
self._url = url;
self._search_fields = search_fields;
self._field_option_dict = defaultdict(dict);
self._ssl_context = ssl.SSLContext();
def start_to_scrape(self):
HTTP_response = urllib.request.urlopen(self._url, context = self._ssl_context);
schedule_search_soup = BeautifulSoup(HTTP_response.read(), "html.parser");
for search_field in self._search_fields:
select_tag_lst = schedule_search_soup.find_all("select", {"name":search_field});
if ( len(select_tag_lst) == 1 ):
for option in select_tag_lst[0].find_all("option"):
self._field_option_dict[search_field][option.text] = option['value'];
def get_fields_dict(self):
return self._field_option_dict;
#=======================================
#== DEBUGGING AND TESTING ==
#=======================================
if __name__ == "__main__":
scrape_search_engine = scrape_fields();
scrape_search_engine.start_to_scrape();
for k,d in scrape_search_engine.get_fields_dict().items():
print(k);
for a,b in d.items():
print("{} : {}".format(a,b))
print();
``` |
{
"source": "jijkoun/sudoku",
"score": 3
} |
#### File: sudoku/tests/test_image.py
```python
import numpy as np
import image
def test_read_image():
im, a = image.read_image('data/sudoku1.jpg')
assert (im.width, im.height) == (298, 298)
assert a.shape == (298, 298)
def test_values_around():
a = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert image.values_around(a, 1, 1) == [1, 2, 3, 4, 6, 7, 8, 9]
assert image.values_around(a, 0, 2) == [2, 5, 6]
def test_shrink():
c = np.array([
[True, True, True, True, True],
[True, True, True, True, True],
[True, True, False, True, True],
[True, True, False, True, True],
[True, True, True, True, True],
])
expected = np.array([
[False],
[False],
])
shrunk, position = image.shrink(c, (0, 0))
print(shrunk)
assert np.array_equal(shrunk, expected)
assert position == (2, 2)
``` |
{
"source": "jijo-paulose/django-profile",
"score": 2
} |
#### File: userprofile/templatetags/avatars.original.py
```python
from django.template import Library, Node, Template, TemplateSyntaxError, \
Variable
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as u_
from django.contrib.auth.models import User
from django.conf import settings
from userprofile import profile_settings as _settings
from userprofile.models import Profile
# from PythonMagick import Image
from utils.TuxieMagick import Image
from os import path, makedirs
from shutil import copy
register = Library()
class ResizedThumbnailNode(Node):
def __init__(self, size, username=None):
try:
self.size = int(size)
except:
self.size = Variable(size)
self.user = username
def get_user(self, context):
# If there's a username, go get it! Otherwise get the current.
if self.user:
try:
user = User.objects.get(username=self.user)
except:
user = Variable(self.user).resolve(context)
else:
user = Variable('user').resolve(context)
return user
def size_equals(self, file=None):
if not file:
return self.size == _settings.DEFAULT_AVATAR_WIDTH
else:
return self.size == Image(file).size().width()
def get_profile(self):
# Maybe django-profile it's not set as AUTH_PROFILE_MODULE
try:
profile = self.user.get_profile()
except Exception, e:
print e
if self.user.is_authenticated():
profile = Profile.objects.get(user=self.user)
else:
print "There is no user to get it's avatars for."
return ''
return profile
def get_file(self, profile=None):
# For compatibility with the official django-profile model I check
# whether it's a path or just a filename.
# In my opinion in the database should only be saved the file name,
# and all files be stored in a standard directory:
# settings.AVATAR_DIRS[int]/str(User)/settings_DEFAULT_AVATAR_WIDTH/
default = False
try:
file_root = path.join(settings.MEDIA_ROOT,
profile.avatar[:profile.avatar.rindex('/')+1])
file_name = profile.avatar[profile.avatar.rindex('/')+1:]
except:
file_root = _settings.AVATARS_DIR
if profile is not None and profile.avatar:
file_root = path.join(file_root, self.size)
file_name = profile.avatar
else:
file_name = _settings.DEFAULT_AVATAR
default = True
return (file_root, file_name, default)
def as_url(self, path):
try:
return path.replace(settings.MEDIA_ROOT, settings.MEDIA_URL)
except:
return ''
def render(self, context):
try:
# If size is not an int, then it's a Variable, so try to resolve it.
if not isinstance(self.size, int):
self.size = int(self.size.resolve(context))
self.user = self.get_user(context)
except Exception, e:
print e
return '' # just die...
if self.size > _settings.DEFAULT_AVATAR_WIDTH:
return '' # unacceptable
profile = self.get_profile()
if not profile:
return ''
# Avatar's heaven, where all the avatars go.
avatars_root = path.join(_settings.AVATARS_DIR,
slugify(self.user.username))
file_root, file_name, defaulting = self.get_file(profile)
if defaulting:
file_root = _settings.AVATARS_DIR
if self.size_equals():
return self.as_url(path.join(file_root, file_name))
file_path = path.join(file_root, file_name)
# I don't return the default because I have to resize it.
if not defaulting:
if path.exists(file_path) and self.size_equals(file_path):
return self.as_url(file_path)
else:
if not profile.avatar:
file_root = _settings.AVATARS_DIR
file_path = path.join(file_root, _settings.DEFAULT_AVATAR)
# Oops, I din't find it, let's try to generate it.
if path.exists(file_path):
orig_file = Image(file_path)
dest_root = path.join(avatars_root, str(self.size))
try:
makedirs(dest_root)
except Exception, e:
print e
# Save the new path for later...
dest_path = path.join(dest_root, file_name)
else:
# Did my best...
return '' # fail silently
orig_file.scale(self.size)
if orig_file.write(dest_path):
return self.as_url(dest_path)
else:
print '=== ERROR ==='
return '' # damn! Close but no cigar...
@register.tag('avatar')
def Thumbnail(parser, token):
bits = token.contents.split()
username = None
if len(bits) > 3:
raise TemplateSyntaxError, u_(u"You have to provide only the size as \
an integer (both sides will be equal) and optionally, the \
username.")
elif len(bits) == 3:
username = bits[2]
elif len(bits) < 2:
bits.append(_settings.DEFAULT_AVATAR_WIDTH)
return ResizedThumbnailNode(bits[1], username)
``` |
{
"source": "jijoy/cuteparty-registrar",
"score": 2
} |
#### File: jijoy/cuteparty-registrar/cuteparty-registrar.py
```python
import os
import json
from threading import Thread
import time
from time import sleep
from flask import Flask, json, render_template, request
import redis
from collections import OrderedDict
from Queue import Queue
app = Flask(__name__)
port = int(os.getenv("PORT"))
vcap = json.loads(os.environ['VCAP_SERVICES'])
svc = vcap['rediscloud'][0]['credentials']
db = redis.StrictRedis(host=svc["hostname"], port=svc["port"], password=svc["password"],db=0)
@app.route('/update',methods=['POST'])
def update():
"""
This is the entry point for updating the aggregator info
Each of the invidividual apps will call this endpoint with their latest info
"""
appname = request.form['applicationname']
appdetails = request.form['appinfo']
obj = json.loads(appdetails)
if appname and obj:
db.hset('applications', appname, appdetails)
return json.dumps({'message':'success'})
@app.route('/applicationsdetails')
def applicationsdetails():
"""
This is the endpoint for providing all info about the applications
This is an internal method for registrator through which index.html loads all info
"""
appdicts = db.hgetall('applications')
finaldict = OrderedDict()
for appname in sorted(appdicts):
instances = json.loads(appdicts.get(appname))
finaldict.__setitem__(appname,instances)
return render_template('robots.html', appdicts=finaldict)
@app.route('/')
def index():
"""
Main entry point
"""
return render_template('index.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=port, debug=True)
``` |
{
"source": "jijyisme/ner-cp-cu",
"score": 3
} |
#### File: jijyisme/ner-cp-cu/metric.py
```python
from collections import OrderedDict
import numpy as np
from sklearn import metrics
import sklearn.metrics
from NER import constant
import pandas as pd
def custom_metric(y_true, y_pred):
"""Calculate score with custom metric"""
# Find score on each metric
scores = OrderedDict(sorted({
"f1_micro": 0.0,
"f1_macro": 0.0,
"precision": 0.0,
"recall": 0.0
}.items()))
scores['f1_micro'] = sklearn.metrics.f1_score(
y_pred=y_pred, y_true=y_true, average='micro')
scores['f1_macro'] = sklearn.metrics.f1_score(
y_pred=y_pred, y_true=y_true, average='macro')
scores["precision"] = sklearn.metrics.precision_score(
y_pred=y_pred, y_true=y_true, average='macro')
scores["recall"] = sklearn.metrics.recall_score(
y_pred=y_pred, y_true=y_true, average='macro')
result = (y_true == y_pred)
p = y_pred
t = y_true
r = result
result_table = pd.DataFrame(data = {
'predict' : p,
'true' : t,
'result' : r
}
)
most_incorrect_prediction_lable = result_table[result_table['result']==False]['predict'].value_counts()
count_label = result_table['predict'].value_counts()
print('++++++++++++++++++++++detail+++++++++++++++++++++')
for index in most_incorrect_prediction_lable.index:
print(index,'\t',
most_incorrect_prediction_lable[index]/count_label[index],'\t',
most_incorrect_prediction_lable[index],'\t',
count_label[index],'\t',
constant.TAG_LIST[index-2],'\t')
return scores
``` |
{
"source": "jikamens/PenguinDome",
"score": 2
} |
#### File: PenguinDome/server/sign.py
```python
import argparse
import os
import shutil
import subprocess
import sys
from penguindome import (
top_dir,
set_gpg,
release_files_iter,
signatures_dir,
verify_signature,
)
from penguindome.server import get_logger, sign_file
os.chdir(top_dir)
log = get_logger('sign')
def parse_args():
parser = argparse.ArgumentParser(description='Generate digital signatures '
'for client files')
parser.add_argument('--full', action='store_true', help='Regenerate all '
'signatures rather than only invalid ones')
args = parser.parse_args()
return args
def main():
args = parse_args()
old_signatures = set()
if args.full:
log.info('Renerating all signatures')
shutil.rmtree(signatures_dir, ignore_errors=True)
elif os.path.exists(signatures_dir):
for dirpath, dirnames, filenames in os.walk(signatures_dir):
for f in filenames:
old_signatures.add(os.path.join(dirpath, f))
for file in release_files_iter():
set_gpg('client')
signature = verify_signature(file)
if signature:
log.debug('Preserving valid signature for {}', file)
else:
set_gpg('server')
log.info('Signing {}', file)
signature = sign_file(file)
old_signatures.discard(signature)
for file in old_signatures:
log.info('Removing obsolete signature {}', file)
os.unlink(file)
try:
subprocess.check_output(
('python', os.path.join('client', 'verify.py')),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
sys.exit('Verify failed, try running bin/sign again. Output:\n{}'.
format(e.output.decode('utf8')))
if __name__ == '__main__':
main()
``` |
{
"source": "JikanDev/jikanvision",
"score": 3
} |
#### File: jikanvision/jikanvision/FaceMeshModule.py
```python
import cv2
import mediapipe as mp
class FaceMeshDetector():
"""
Find 468 Landmarks using the mediapipe library. Exports the landmarks in pixel format.
"""
def __init__(self, mode=False, maxFaces=1, refine_lm=False, minDetectCon=0.5, minTrackCon=0.5):
"""
:param mode: In static mode, detection is done on each image: slower.
:param maxFaces: Maximum number of faces to detect.
:param refine_lm: Whether to further refine the landmark coordinates
around the eyes and lips, and output additional landmarks around the
irises.
:param minDetectCon: Minimum Detection Confidence Threshold.
:param minTrackCon: Minimum Tracking Confidence Threshold.
"""
self.mode = mode
self.maxFaces = maxFaces
self.refine_lm = refine_lm
self.minDetectCon = minDetectCon
self.minTrackCon = minTrackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpDrawingStyles = mp.solutions.drawing_styles
self.faceMesh = mp.solutions.face_mesh
self.meshDetection = self.faceMesh.FaceMesh(mode, maxFaces, refine_lm, minDetectCon, minTrackCon)
def findFaces(self, img, draw=True, drawTesselation=True):
"""
Find faces in an image and return the bbox info
:param img: Image to find the faces in.
:param draw: Flag to draw the output contours of the mesh on the image.
:param drawTesselation: Flag to draw the output tesselation of the mesh on the image.
:return: Image with or without drawings.
"""
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.meshDetection.process(imgRGB)
allFaces = []
h, w, c = img.shape
if self.results.multi_face_landmarks:
for faceLms in self.results.multi_face_landmarks:
myMesh = {}
mylmList = []
for id, lm in enumerate(faceLms.landmark):
px, py = int(lm.x * w), int(lm.y * h)
mylmList.append([px, py])
myMesh["lmList"] = mylmList
if draw:
self.mpDraw.draw_landmarks(img, faceLms, self.faceMesh.FACEMESH_CONTOURS, None)
if drawTesselation:
self.mpDraw.draw_landmarks(img, faceLms, self.faceMesh.FACEMESH_TESSELATION, None,
self.mpDrawingStyles.get_default_face_mesh_tesselation_style())
allFaces.append(myMesh)
return allFaces, img
def main():
"""
Example code to use the module.
"""
cap = cv2.VideoCapture(0) # Get your camera
detector = FaceMeshDetector() # Call the FaceMeshDetector class
while True:
success, img = cap.read() # If success, img = read your camera image
meshes, img = detector.findFaces(img) # meshes & img call the findFaces() function of FaceMeshDetector
if meshes:
# Mesh 1
mesh1 = meshes[0]
lmList1 = mesh1["lmList"] # List of 21 Landmark points
if len(meshes) == 2:
# Mesh 2
mesh2 = meshes[1]
lmList2 = mesh2["lmList"] # List of 21 Landmark points
cv2.imshow("Face Mesh Module", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
```
#### File: jikanvision/jikanvision/HandsTrackingModule.py
```python
import cv2
import mediapipe as mp
class HandDetector:
"""
Finds Hands using the mediapipe library. Exports the landmarks in pixel format.
Also provides bounding box info of the hand found.
"""
def __init__(self, mode=False, maxHands=2, minDetectCon=0.5, minTrackCon=0.5):
"""
:param mode: In static mode, detection is done on each image: slower.
:param maxHands: Maximum number of hands to detect.
:param minDetectCon: Minimum Detection Confidence Threshold.
:param minTrackCon: Minimum Tracking Confidence Threshold.
"""
self.mode = mode
self.maxHands = maxHands
self.minDetectCon = minDetectCon
self.minTrackCon = minTrackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.minDetectCon, self.minTrackCon)
self.mpDraw = mp.solutions.drawing_utils
self.lmList = []
def findHands(self, img, draw=True, drawBboxs=True, camFlip=True):
"""
Finds hands in a BGR image.
:param img: Image to find the hands in.
:param draw: Flag to draw the output on the image.
:param drawBboxs: Flag to draw bboxs on the draw output.
:param camFlip: Flag to know if your camera flip your image.
:return: Image with or without drawings.
"""
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
allHands = []
h, w, c = img.shape
if self.results.multi_hand_landmarks:
for handType, handLms in zip(self.results.multi_handedness, self.results.multi_hand_landmarks):
myHand = {}
mylmList = []
xList = []
yList = []
for id, lm in enumerate(handLms.landmark):
px, py = int(lm.x * w), int(lm.y * h)
mylmList.append([px, py])
xList.append(px)
yList.append(py)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
boxW, boxH = xmax - xmin, ymax - ymin
bbox = xmin, ymin, boxW, boxH
cx, cy = bbox[0] + (bbox[2] // 2), bbox[1] + (bbox[3] // 2)
myHand["lmList"] = mylmList
myHand["bbox"] = bbox
myHand["center"] = (cx, cy)
if camFlip:
if handType.classification[0].label == "Right":
myHand["type"] = "Left"
else:
myHand["type"] = "Right"
else:
myHand["type"] = handType.classification[0].label
allHands.append(myHand)
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
if drawBboxs:
cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20), (bbox[0] + bbox[2] + 20, bbox[1] + bbox[3] + 20), (255, 255, 255), 2)
cv2.putText(img, myHand["type"], (bbox[0] - 30, bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 2)
return allHands, img
def main():
"""
Example code to use the module.
"""
cap = cv2.VideoCapture(0) # Get your camera
detector = HandDetector() # Call the HandDetector class
while True:
success, img = cap.read() # If success, img = read your camera image
hands, img = detector.findHands(img) # hands & img call the findHands() function of HandDetector
if hands:
# Hand 1
hand1 = hands[0]
lmList1 = hand1["lmList"] # List of 21 Landmark points
bbox1 = hand1["bbox"] # Bounding box info x,y,w,h
centerPoint1 = hand1['center'] # Center of the hand cx,cy
handType1 = hand1["type"] # Hand Type "Left" or "Right"
if len(hands) == 2:
# Hand 2
hand2 = hands[1]
lmList2 = hand2["lmList"] # List of 21 Landmark points
bbox2 = hand2["bbox"] # Bounding box info x,y,w,h
centerPoint2 = hand2['center'] # Center of the hand cx,cy
handType2 = hand2["type"] # Hand Type "Left" or "Right"
cv2.imshow("Hands Tracking Module", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
``` |
{
"source": "jikdo/ethplorer",
"score": 2
} |
#### File: ethplorer/indexer/tasks.py
```python
from datetime import datetime
import time
import traceback
from django.utils import timezone
from celery import shared_task
from celery.utils.log import get_task_logger
from web3 import Web3
from web3.exceptions import BlockNotFound
from django.conf import settings
from .utils import (
get_account_type,
create_account,
)
from .models import (
Block, Transaction,
Account
)
logger = get_task_logger(__name__)
@shared_task
def index_blockchain(start_block_id):
logger.info('lets work')
# initialize web3
provider = Web3.HTTPProvider(settings.WEB3_PROVIDER)
w3 = Web3(provider)
# fetch blockchain data
start_block = start_block_id
current_block_number = w3.eth.block_number
if (start_block > current_block_number):
logger.info(
f'Error: start_block must be <= current block height - {current_block_number}')
return 'Invalid block number'
while(True):
try:
logger.info(f'something is happening: block {start_block}')
block_response = w3.eth.get_block(
start_block, full_transactions=True)
miner = Account.objects.create(
hash=block_response.miner,
account_type=get_account_type(block_response.miner, w3)
)
def get_parent_block(number):
try:
Block.objects.get(number=number)
except Block.DoesNotExist:
return None
block = Block.objects.create(
hash=Web3.toHex(block_response.hash),
parent_block=None if block_response.number == 0 else get_parent_block(
block_response.number - 1),
difficulty=block_response.difficulty,
extra_data=block_response.extraData,
gas_limit=block_response.gasLimit,
gas_used=block_response.gasUsed,
logs_bloom=block_response.logsBloom,
miner=miner,
nonce=Web3.toHex(block_response.nonce),
number=block_response.number,
receipt_root=Web3.toHex(block_response.receiptsRoot),
sha3_uncles=Web3.toHex(block_response.sha3Uncles),
size=block_response.size,
state_root=Web3.toHex(block_response.stateRoot),
timestamp=timezone.make_aware(
datetime.fromtimestamp(block_response.timestamp)),
total_difficulty=block_response.totalDifficulty,
transactions_root=Web3.toHex(block_response.transactionsRoot),
transaction_count=w3.eth.get_block_transaction_count(
block_response.number)
)
# save transactions
for tx in block_response.transactions:
Transaction.objects.create(
block=block,
from_address=create_account(tx['from'], w3),
gas=tx.gas,
gas_price=tx.gasPrice,
hash=Web3.toHex(tx.hash),
input=tx.input,
# max_fee_per_gas=tx.maxFeePerGas,
# max_priority_fee_per_gas=tx.maxPriorityFeePerGas,
nonce=tx.nonce,
to_address=create_account(tx.to, w3),
transaction_index=tx.transactionIndex,
value=tx.value,
)
# go to next block
start_block += 1
# wait for 15 mins
except BlockNotFound:
print('Block not found')
# wait for new block (blocktime for ethereum is around 13s )
print('Waiting for next block ..')
time.sleep(20)
except Exception as e:
traceback.print_exc()
break
``` |
{
"source": "jike-algorithm-zhangxiao/DCL",
"score": 2
} |
#### File: jike-algorithm-zhangxiao/DCL/train.py
```python
import os
import datetime
import argparse
import logging
import pandas as pd
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
import torch.utils.data as torchdata
from torchvision import datasets, models
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
from transforms import transforms
from utils.train_model import train
from models.LoadModel import MainModel
from config import LoadConfig, load_data_transformers
from utils.dataset_DCL import collate_fn4train, collate_fn4val, collate_fn4test, collate_fn4backbone, dataset
import pdb
os.environ['CUDA_DEVICE_ORDRE'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
# parameters setting
def parse_args():
parser = argparse.ArgumentParser(description='dcl parameters')
parser.add_argument('--data', dest='dataset',
default='CUB', type=str)
parser.add_argument('--save', dest='resume',
default=None,
type=str)
parser.add_argument('--backbone', dest='backbone',
default='resnet50', type=str)
parser.add_argument('--auto_resume', dest='auto_resume',
action='store_true')
parser.add_argument('--epoch', dest='epoch',
default=360, type=int)
parser.add_argument('--tb', dest='train_batch',
default=16, type=int)
parser.add_argument('--vb', dest='val_batch',
default=512, type=int)
parser.add_argument('--sp', dest='save_point',
default=5000, type=int)
parser.add_argument('--cp', dest='check_point',
default=5000, type=int)
parser.add_argument('--lr', dest='base_lr',
default=0.0008, type=float)
parser.add_argument('--lr_step', dest='decay_step',
default=60, type=int)
parser.add_argument('--cls_lr_ratio', dest='cls_lr_ratio',
default=10.0, type=float)
parser.add_argument('--start_epoch', dest='start_epoch',
default=0, type=int)
parser.add_argument('--tnw', dest='train_num_workers',
default=16, type=int)
parser.add_argument('--vnw', dest='val_num_workers',
default=32, type=int)
parser.add_argument('--detail', dest='discribe',
default='', type=str)
parser.add_argument('--size', dest='resize_resolution',
default=512, type=int)
parser.add_argument('--crop', dest='crop_resolution',
default=448, type=int)
parser.add_argument('--cls_2', dest='cls_2',
action='store_true')
parser.add_argument('--cls_mul', dest='cls_mul',
action='store_true')
parser.add_argument('--swap_num', default=[7, 7],
nargs=2, metavar=('swap1', 'swap2'),
type=int, help='specify a range')
args = parser.parse_args()
return args
def auto_load_resume(load_dir):
folders = os.listdir(load_dir)
date_list = [int(x.split('_')[1].replace(' ',0)) for x in folders]
choosed = folders[date_list.index(max(date_list))]
weight_list = os.listdir(os.path.join(load_dir, choosed))
acc_list = [x[:-4].split('_')[-1] if x[:7]=='weights' else 0 for x in weight_list]
acc_list = [float(x) for x in acc_list]
choosed_w = weight_list[acc_list.index(max(acc_list))]
return os.path.join(load_dir, choosed, choosed_w)
if __name__ == '__main__':
args = parse_args()
print(args, flush=True)
Config = LoadConfig(args, 'train')
Config.cls_2 = args.cls_2
Config.cls_2xmul = args.cls_mul
assert Config.cls_2 ^ Config.cls_2xmul
transformers = load_data_transformers(args.resize_resolution, args.crop_resolution, args.swap_num)
# inital dataloader
train_set = dataset(Config = Config,\
anno = Config.train_anno,\
common_aug = transformers["common_aug"],\
swap = transformers["swap"],\
totensor = transformers["train_totensor"],\
train = True)
trainval_set = dataset(Config = Config,\
anno = Config.train_anno,\
common_aug = transformers["None"],\
swap = transformers["None"],\
totensor = transformers["val_totensor"],\
train = False,
train_val = True)
val_set = dataset(Config = Config,\
anno = Config.val_anno,\
common_aug = transformers["None"],\
swap = transformers["None"],\
totensor = transformers["test_totensor"],\
test=True)
dataloader = {}
dataloader['train'] = torch.utils.data.DataLoader(train_set,\
batch_size=args.train_batch,\
shuffle=True,\
num_workers=args.train_num_workers,\
collate_fn=collate_fn4train if not Config.use_backbone else collate_fn4backbone,
drop_last=True if Config.use_backbone else False,
pin_memory=True)
setattr(dataloader['train'], 'total_item_len', len(train_set))
dataloader['trainval'] = torch.utils.data.DataLoader(trainval_set,\
batch_size=args.val_batch,\
shuffle=False,\
num_workers=args.val_num_workers,\
collate_fn=collate_fn4val if not Config.use_backbone else collate_fn4backbone,
drop_last=True if Config.use_backbone else False,
pin_memory=True)
setattr(dataloader['trainval'], 'total_item_len', len(trainval_set))
setattr(dataloader['trainval'], 'num_cls', Config.numcls)
dataloader['val'] = torch.utils.data.DataLoader(val_set,\
batch_size=args.val_batch,\
shuffle=False,\
num_workers=args.val_num_workers,\
collate_fn=collate_fn4test if not Config.use_backbone else collate_fn4backbone,
drop_last=True if Config.use_backbone else False,
pin_memory=True)
setattr(dataloader['val'], 'total_item_len', len(val_set))
setattr(dataloader['val'], 'num_cls', Config.numcls)
cudnn.benchmark = True
print('Choose model and train set', flush=True)
model = MainModel(Config)
# load model
if (args.resume is None) and (not args.auto_resume):
print('train from imagenet pretrained models ...', flush=True)
else:
if not args.resume is None:
resume = args.resume
print('load from pretrained checkpoint %s ...'% resume, flush=True)
elif args.auto_resume:
resume = auto_load_resume(Config.save_dir)
print('load from %s ...'%resume, flush=True)
else:
raise Exception("no checkpoints to load")
model_dict = model.state_dict()
pretrained_dict = torch.load(resume)
pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('Set cache dir', flush=True)
time = datetime.datetime.now()
filename = '%s_%d%d%d_%s'%(args.discribe, time.month, time.day, time.hour, Config.dataset)
save_dir = os.path.join(Config.save_dir, filename)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if Config.use_backbone:
ignored_params = list(map(id, model.classifier.parameters()))
else:
ignored_params1 = list(map(id, model.classifier.parameters()))
ignored_params2 = list(map(id, model.classifier_swap.parameters()))
ignored_params3 = list(map(id, model.Convmask.parameters()))
ignored_params = ignored_params1 + ignored_params2 + ignored_params3
print('the num of new layers:', len(ignored_params), flush=True)
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
lr_ratio = args.cls_lr_ratio
base_lr = args.base_lr
if Config.use_backbone:
optimizer = optim.SGD([{'params': base_params},
{'params': model.classifier.parameters(), 'lr': base_lr}], lr = base_lr, momentum=0.9)
else:
optimizer = optim.SGD([{'params': base_params},
{'params': model.classifier.parameters(), 'lr': lr_ratio*base_lr},
{'params': model.classifier_swap.parameters(), 'lr': lr_ratio*base_lr},
{'params': model.Convmask.parameters(), 'lr': lr_ratio*base_lr},
], lr = base_lr, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.decay_step, gamma=0.1)
# train entry
train(Config,
model,
epoch_num=args.epoch,
start_epoch=args.start_epoch,
optimizer=optimizer,
exp_lr_scheduler=exp_lr_scheduler,
data_loader=dataloader,
save_dir=save_dir,
data_size=args.crop_resolution,
savepoint=args.save_point,
checkpoint=args.check_point)
``` |
{
"source": "jike-algorithm-zhangxiao/open_model_zoo",
"score": 2
} |
#### File: demos/tests/cases.py
```python
import collections
import itertools
import sys
from args import (
DataDirectoryArg, DataDirectoryOrigFileNamesArg, DataPatternArg,
ModelArg, ModelFileArg, OMZ_DIR, TestDataArg, image_net_arg, image_retrieval_arg
)
from data_sequences import DATA_SEQUENCES
MONITORS = {'-u': 'cdm'}
TestCase = collections.namedtuple('TestCase', ['options', 'extra_models'])
# TODO with Python3.7 use namedtuple defaults instead
TestCase.__new__.__defaults__ = [],
class Demo:
IMPLEMENTATION_TYPES = set()
def __init__(self, name, implementation, model_keys=None, device_keys=None, test_cases=None):
self.implementation = implementation
self.subdirectory = name + '/' + implementation
self.device_keys = device_keys
self.model_keys = model_keys if model_keys else ['-m']
self.test_cases = test_cases
self._exec_name = self.subdirectory.replace('/', '_')
self.parser = None
Demo.IMPLEMENTATION_TYPES.add(implementation)
def models_lst_path(self, source_dir):
return source_dir / self.subdirectory / 'models.lst'
def device_args(self, device_list):
if len(self.device_keys) == 0:
return {'CPU': []}
return {device: [arg for key in self.device_keys for arg in [key, device]] for device in device_list}
def get_models(self, case):
return ((case.options[key], key) for key in self.model_keys if key in case.options)
def update_case(self, case, updated_options, with_replacement=False):
if not updated_options: return
new_options = case.options.copy()
for key, value in updated_options.items():
new_options[key] = value
new_case = case._replace(options=new_options)
if with_replacement:
self.test_cases.remove(case)
self.test_cases.append(new_case)
def add_parser(self, parser):
self.parser = parser(self)
return self
def parse_output(self, output, test_case, device):
if self.parser:
self.parser(output, test_case, device)
def update_option(self, updated_options):
for case in self.test_cases[:]:
self.update_case(case, updated_options, with_replacement=True)
return self
def add_test_cases(self, *new_cases):
for test_case in new_cases:
self.test_cases = combine_cases(self.test_cases, test_case)
return self
def exclude_models(self, models):
for case in self.test_cases[:]:
for model, _ in self.get_models(case):
if not isinstance(model, ModelArg) or model.name in set(models):
self.test_cases.remove(case)
continue
return self
def only_models(self, models):
for case in self.test_cases[:]:
for model, _ in self.get_models(case):
if not isinstance(model, ModelArg) or model.name not in set(models):
self.test_cases.remove(case)
continue
return self
def set_precisions(self, precisions, model_info):
for case in self.test_cases[:]:
updated_options = {p: {} for p in precisions}
for model, key in self.get_models(case):
if not isinstance(model, ModelArg):
continue
supported_p = list(set(precisions) & set(model_info[model.name]["precisions"]))
if len(supported_p):
model.precision = supported_p[0]
for p in supported_p[1:]:
updated_options[p][key] = ModelArg(model.name, p)
else:
print("Warning: {} model does not support {} precisions and will not be tested\n".format(
model.name, ','.join(precisions)))
self.test_cases.remove(case)
break
for p in precisions:
self.update_case(case, updated_options[p])
class CppDemo(Demo):
def __init__(self, name, implementation='cpp', model_keys=None, device_keys=None, test_cases=None):
super().__init__(name, implementation, model_keys, device_keys, test_cases)
self._exec_name = self._exec_name.replace('_cpp', '')
def fixed_args(self, source_dir, build_dir):
return [str(build_dir / self._exec_name)]
class PythonDemo(Demo):
def __init__(self, name, implementation='python', model_keys=None, device_keys=None, test_cases=None):
super().__init__(name, implementation, model_keys, device_keys, test_cases)
self._exec_name = self._exec_name.replace('_python', '')
def fixed_args(self, source_dir, build_dir):
cpu_extension_path = build_dir / 'lib/libcpu_extension.so'
return [sys.executable, str(source_dir / self.subdirectory / (self._exec_name + '.py')),
*(['-l', str(cpu_extension_path)] if cpu_extension_path.exists() else [])]
def join_cases(*args):
options = {}
for case in args: options.update(case.options)
extra_models = set()
for case in args: extra_models.update(case.extra_models)
return TestCase(options=options, extra_models=list(case.extra_models))
def combine_cases(*args):
return [join_cases(*combination)
for combination in itertools.product(*[[arg] if isinstance(arg, TestCase) else arg for arg in args])]
def single_option_cases(key, *args):
return [TestCase(options={} if arg is None else {key: arg}) for arg in args]
DEMOS = [
CppDemo(name='background_subtraction_demo', device_keys=['-d'], implementation='cpp_gapi', test_cases=combine_cases(
TestCase(options={'--no_show': None, '-at': 'maskrcnn',
**MONITORS,
'-i': DataPatternArg('instance-segmentation'),
}),
single_option_cases('-m',
ModelArg('instance-segmentation-person-0007'),
ModelArg('instance-segmentation-security-0091')),
)),
CppDemo(name='gaze_estimation_demo', implementation='cpp_gapi',
model_keys=['-m', '-m_fd', '-m_hp', '-m_lm', '-m_es'],
device_keys=['-d', '-d_fd', '-d_hp', '-d_lm'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('gaze-estimation-adas')}),
TestCase(options={
'-m': ModelArg('gaze-estimation-adas-0002'),
'-m_hp': ModelArg('head-pose-estimation-adas-0001'),
'-m_lm': ModelArg('facial-landmarks-35-adas-0002'),
'-m_es': ModelArg('open-closed-eye-0001'),
}),
single_option_cases(
'-m_fd',
ModelArg('face-detection-adas-0001'),
ModelArg('face-detection-retail-0004')),
)),
CppDemo(name='gesture_recognition_demo', implementation='cpp_gapi',
model_keys=['-m_a', '-m_d'],
device_keys=['-d_a', '-d_d'],
test_cases=combine_cases(
TestCase(options={'--no_show': None,
'-i': TestDataArg('msasl/global_crops/_nz_sivss20/clip_0017/img_%05d.jpg'),
'-m_d': ModelArg('person-detection-asl-0001')}),
[
TestCase(options={'-m_a': ModelArg('asl-recognition-0004'), '-c': str(OMZ_DIR / 'data/dataset_classes/msasl100.json')}),
TestCase(options={'-m_a': ModelArg('common-sign-language-0001'),
'-c': str(OMZ_DIR / 'data/dataset_classes/jester27.json')}),
TestCase(options={'-m_a': ModelArg('common-sign-language-0002'),
'-c': str(OMZ_DIR / 'data/dataset_classes/common_sign_language12.json')}),
],
)),
CppDemo(name='face_detection_mtcnn_demo', implementation='cpp_gapi',
model_keys=['-m_p', '-m_r', '-m_o'],
device_keys=['-d_p', '-d_r', '-d_o'],
test_cases=combine_cases(
TestCase(options={'--no_show': None,
'-i': image_net_arg('00000002'),
'-m_p': ModelArg('mtcnn-p'),
'-m_r': ModelArg('mtcnn-r'),
'-m_o': ModelArg('mtcnn-o')}),
)),
CppDemo(name='interactive_face_detection_demo', implementation='cpp_gapi',
model_keys=['-m', '-m_ag', '-m_em', '-m_lm', '-m_hp', '-m_am'],
device_keys=['-d', '-d_ag', '-d_em', '-d_lm', '-d_hp', '-d_am'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('375x500')}),
[
TestCase(options={
'-m': ModelArg('face-detection-retail-0004'),
'-m_ag': ModelArg('age-gender-recognition-retail-0013'),
'-m_am': ModelArg('anti-spoof-mn3'),
'-m_em': ModelArg('emotions-recognition-retail-0003'),
'-m_hp': ModelArg('head-pose-estimation-adas-0001'),
'-m_lm': ModelArg('facial-landmarks-35-adas-0002'),
}),
TestCase(options={'-m': ModelArg('face-detection-adas-0001')})
]
)),
CppDemo(name='smart_classroom_demo', implementation='cpp_gapi',
model_keys=['-m_act', '-m_fd', '-m_lm', '-m_reid'],
device_keys=['-d_act', '-d_fd', '-d_lm', '-d_reid'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('smart-classroom-demo'),
'-m_fd': ModelArg('face-detection-adas-0001')}),
[
*combine_cases(
[
TestCase(options={'-m_act': ModelArg('person-detection-action-recognition-0005')}),
TestCase(options={'-m_act': ModelArg('person-detection-action-recognition-0006'),
'-student_ac': 'sitting,writing,raising_hand,standing,turned_around,lie_on_the_desk'}),
# person-detection-action-recognition-teacher-0002 is supposed to be provided with -teacher_id, but
# this would require providing a gallery file with -fg key. Unless -teacher_id is provided
# -teacher_ac is ignored thus run the test just with default actions pretending it's about students
TestCase(options={'-m_act': ModelArg('person-detection-action-recognition-teacher-0002')}),
],
[
TestCase(options={}),
TestCase(options={
'-m_lm': ModelArg('landmarks-regression-retail-0009'),
'-m_reid': ModelArg('Sphereface'),
}),
TestCase(options={
'-m_lm': ModelArg('landmarks-regression-retail-0009'),
'-m_reid': ModelArg('face-recognition-resnet100-arcface-onnx'),
}),
],
),
TestCase(options={'-m_act': ModelArg('person-detection-raisinghand-recognition-0001'), '-a_top': '5'}),
],
)),
CppDemo(name='classification_benchmark_demo',
device_keys=['-d'],
test_cases=combine_cases(
TestCase(options={
'-no_show': None,
'-time': '5',
'-i': DataDirectoryOrigFileNamesArg('classification'),
'-labels': str(OMZ_DIR / 'data/dataset_classes/imagenet_2012.txt'),
'-gt': TestDataArg("ILSVRC2012_img_val/ILSVRC2012_val.txt")}),
single_option_cases('-m',
ModelArg('alexnet'),
ModelArg('densenet-121-tf'),
ModelArg('googlenet-v1'),
ModelArg('googlenet-v1-tf'),
ModelArg('googlenet-v3'),
ModelArg('googlenet-v3-pytorch'),
ModelArg('mixnet-l'),
ModelArg('mobilenet-v2'),
ModelArg('mobilenet-v2-pytorch'),
ModelArg('repvgg-a0'),
ModelArg('repvgg-b1'),
ModelArg('repvgg-b3'),
))),
CppDemo(name='crossroad_camera_demo',
model_keys=['-m', '-m_pa', '-m_reid'],
device_keys=['-d', '-d_pa', '-d_reid'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('person-vehicle-bike-detection-crossroad')}),
TestCase(options={'-m': ModelArg('person-vehicle-bike-detection-crossroad-0078')}),
single_option_cases('-m_pa', None, ModelArg('person-attributes-recognition-crossroad-0230')),
single_option_cases('-m_reid',
None,
ModelArg('person-reidentification-retail-0277'),
ModelArg('person-reidentification-retail-0286'),
ModelArg('person-reidentification-retail-0287'),
ModelArg('person-reidentification-retail-0288')
),
)),
CppDemo(name='gaze_estimation_demo',
model_keys=['-<KEY>', '-m_es'],
device_keys=['-<KEY>'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('gaze-estimation-adas')}),
TestCase(options={
'-m': ModelArg('gaze-estimation-adas-0002'),
'-m_hp': ModelArg('head-pose-estimation-adas-0001'),
'-m_es': ModelArg('open-closed-eye-0001'),
}),
[
*combine_cases(
single_option_cases('-m_lm',
ModelArg('facial-landmarks-35-adas-0002'),
ModelArg('facial-landmarks-98-detection-0001'),
)),
],
[
*combine_cases(
single_option_cases('-m_fd',
ModelArg('face-detection-adas-0001'),
ModelArg('face-detection-retail-0004')
)),
],
)),
CppDemo(name='human_pose_estimation_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('human-pose-estimation')}),
[
TestCase(options={'-at': 'openpose',
'-m': ModelArg('human-pose-estimation-0001')}
),
TestCase(options={'-at': 'higherhrnet',
'-m': ModelArg('higher-hrnet-w32-human-pose-estimation')}
),
*combine_cases(
TestCase(options={'-at': 'ae'}),
single_option_cases('-m',
ModelArg('human-pose-estimation-0005'),
ModelArg('human-pose-estimation-0006'),
ModelArg('human-pose-estimation-0007')
)),
],
)),
CppDemo(name='image_processing_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
**MONITORS,
'-i': DataDirectoryArg('single-image-super-resolution')}),
[
*combine_cases(
TestCase(options={'-at': 'sr'}),
single_option_cases('-m',
ModelArg('single-image-super-resolution-1032'),
ModelArg('single-image-super-resolution-1033'),
ModelArg('text-image-super-resolution-0001'))
),
TestCase(options={'-at': 'deblur',
'-m': ModelArg('deblurgan-v2')}
),
TestCase(options={'-at': 'jr',
'-m': ModelArg('fbcnn')}
)
]
)),
CppDemo(name='interactive_face_detection_demo',
model_keys=['-m', '-m_ag', '-m_em', '-m_lm', '-m_hp', '-m_am'],
device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('375x500')}),
[
TestCase(options={
'-m': ModelArg('face-detection-retail-0004'),
'-m_ag': ModelArg('age-gender-recognition-retail-0013'),
'-m_am': ModelArg('anti-spoof-mn3'),
'-m_em': ModelArg('emotions-recognition-retail-0003'),
'-m_hp': ModelArg('head-pose-estimation-adas-0001'),
'-m_lm': ModelArg('facial-landmarks-35-adas-0002'),
}),
TestCase(options={'-m': ModelArg('face-detection-adas-0001')})
]
)),
CppDemo(name='mask_rcnn_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-i': DataDirectoryArg('instance-segmentaion-mask-rcnn')}),
single_option_cases('-m',
ModelArg('mask_rcnn_inception_resnet_v2_atrous_coco'),
ModelArg('mask_rcnn_resnet50_atrous_coco'))
)),
CppDemo(name='multi_channel_face_detection_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DATA_SEQUENCES['face-detection-adas']}),
[
TestCase(options={'-m': ModelArg('face-detection-adas-0001')}),
TestCase(options={'-m': ModelArg('face-detection-retail-0004'), '-bs': '2',
'-show_stats': '', '-n_iqs': '1', '-duplicate_num': '2'}),
TestCase(options={'-m': ModelArg('face-detection-retail-0005'), '-bs': '3',
'-n_iqs': '999'}),
TestCase(options={'-m': ModelArg('face-detection-retail-0044'), '-bs': '4',
'-show_stats': '', '-duplicate_num': '3', '-real_input_fps': ''})
]
)),
CppDemo(name='multi_channel_human_pose_estimation_demo', device_keys=['-d'],
test_cases=[TestCase(options={'-no_show': None,
**MONITORS,
'-i': DATA_SEQUENCES['human-pose-estimation'],
'-m': ModelArg('human-pose-estimation-0001')}),
]),
CppDemo(name='multi_channel_object_detection_demo_yolov3', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('object-detection-demo')}),
[
TestCase(options={'-m': ModelArg('person-vehicle-bike-detection-crossroad-yolov3-1020')}),
TestCase(options={'-m': ModelArg('yolo-v3-tf'), '-duplicate_num': '2',
'-n_iqs': '20', '-fps_sp': '1', '-n_sp': '1', '-show_stats': '', '-real_input_fps': ''}),
TestCase(options={'-m': ModelArg('yolo-v3-tiny-tf'), '-duplicate_num': '3',
'-n_iqs': '9999', '-fps_sp': '50', '-n_sp': '30'})
]
)),
CppDemo(name='noise_suppression_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-i': TestDataArg('how_are_you_doing.wav')}),
single_option_cases('-m',
ModelArg('noise-suppression-denseunet-ll-0001'),
ModelArg('noise-suppression-poconetlike-0001')),
)),
CppDemo(name='object_detection_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
**MONITORS,
'-i': DataPatternArg('object-detection-demo')}),
[
*combine_cases(
TestCase(options={'-at': 'centernet'}),
[
*single_option_cases('-m',
ModelArg('ctdet_coco_dlav0_512'),
),
*combine_cases(
TestCase(options={
'-mean_values': "104.04 113.985 119.85",
'-scale_values': "73.695 69.87 70.89",
}),
single_option_cases('-m',
ModelFileArg('ctdet_coco_dlav0_512', 'ctdet_coco_dlav0_512.onnx'),
),
),
]
),
*combine_cases(
TestCase(options={'-at': 'faceboxes'}),
[
TestCase(options={'-m': ModelArg('faceboxes-pytorch')}),
TestCase(options={'-m': ModelFileArg('faceboxes-pytorch', 'faceboxes-pytorch.onnx'),
'-mean_values': "104.0 117.0 123.0"}),
]
),
*combine_cases(
TestCase(options={'-at': 'retinaface-pytorch'}),
[
TestCase(options={'-m': ModelArg('retinaface-resnet50-pytorch')}),
TestCase(options={'-m': ModelFileArg('retinaface-resnet50-pytorch', 'retinaface-resnet50-pytorch.onnx'),
'-mean_values': "104.0 117.0 123.0"}),
]
),
*combine_cases(
TestCase(options={'-at': 'ssd'}),
[
*single_option_cases('-m',
ModelArg('efficientdet-d0-tf'),
ModelArg('efficientdet-d1-tf'),
ModelArg('face-detection-0200'),
ModelArg('face-detection-0202'),
ModelArg('face-detection-0204'),
ModelArg('face-detection-0205'),
ModelArg('face-detection-0206'),
ModelArg('face-detection-adas-0001'),
ModelArg('face-detection-retail-0004'),
ModelArg('face-detection-retail-0005'),
ModelArg('face-detection-retail-0044'),
ModelArg('faster-rcnn-resnet101-coco-sparse-60-0001'),
ModelArg('pedestrian-and-vehicle-detector-adas-0001'),
ModelArg('pedestrian-detection-adas-0002'),
ModelArg('pelee-coco'),
ModelArg('person-detection-0200'),
ModelArg('person-detection-0201'),
ModelArg('person-detection-0202'),
ModelArg('person-detection-retail-0013'),
ModelArg('person-vehicle-bike-detection-2000'),
ModelArg('person-vehicle-bike-detection-2001'),
ModelArg('person-vehicle-bike-detection-2002'),
ModelArg('person-vehicle-bike-detection-2003'),
ModelArg('person-vehicle-bike-detection-2004'),
ModelArg('product-detection-0001'),
ModelArg('rfcn-resnet101-coco-tf'),
ModelArg('retinanet-tf'),
ModelArg('ssd300'),
ModelArg('ssd512'),
ModelArg('ssd_mobilenet_v1_coco'),
ModelArg('ssd_mobilenet_v1_fpn_coco'),
ModelArg('ssdlite_mobilenet_v2'),
ModelArg('vehicle-detection-0200'),
ModelArg('vehicle-detection-0201'),
ModelArg('vehicle-detection-0201'),
ModelArg('vehicle-detection-adas-0002'),
ModelArg('vehicle-license-plate-detection-barrier-0106'),
ModelArg('vehicle-license-plate-detection-barrier-0123')),
TestCase(options={'-m': ModelFileArg('ssd-resnet34-1200-onnx', 'resnet34-ssd1200.onnx'),
'-reverse_input_channels': None,
'-mean_values': "123.675 116.28 103.53",
'-scale_values': "58.395 57.12 57.375"}),
]
),
*combine_cases(
TestCase(options={'-at': 'yolo'}),
single_option_cases('-m',
ModelArg('mobilenet-yolo-v4-syg'),
ModelArg('person-vehicle-bike-detection-crossroad-yolov3-1020'),
ModelArg('yolo-v1-tiny-tf'),
ModelArg('yolo-v2-ava-0001'),
ModelArg('yolo-v2-ava-sparse-35-0001'),
ModelArg('yolo-v2-ava-sparse-70-0001'),
ModelArg('yolo-v2-tiny-ava-0001'),
ModelArg('yolo-v2-tiny-ava-sparse-30-0001'),
ModelArg('yolo-v2-tiny-ava-sparse-60-0001'),
ModelArg('yolo-v2-tiny-vehicle-detection-0001'),
ModelArg('yolo-v2-tf'),
ModelArg('yolo-v2-tiny-tf'),
ModelArg('yolo-v3-tf'),
ModelArg('yolo-v3-tiny-tf'),
ModelArg('yolo-v4-tf'),
ModelArg('yolo-v4-tiny-tf'))),
],
)),
CppDemo(name='pedestrian_tracker_demo',
model_keys=['-m_det', '-m_reid'],
device_keys=['-d_det', '-d_reid'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('person-detection-retail')}),
[
*combine_cases(
TestCase(options={'-at': 'ssd'}),
single_option_cases('-m_det',
ModelArg('person-detection-retail-0002'),
ModelArg('person-detection-retail-0013')),
),
TestCase(options={'-person_label': '0', '-at': 'yolo', '-m_det': ModelArg('yolo-v3-tf')}),
TestCase(options={'-person_label': '0', '-at': 'centernet', '-m_det': ModelArg('ctdet_coco_dlav0_512')}),
TestCase(options={'-person_label': '1', '-at': 'ssd', '-m_det': ModelArg('retinanet-tf')}),
],
single_option_cases('-m_reid',
ModelArg('person-reidentification-retail-0277'),
ModelArg('person-reidentification-retail-0286'),
ModelArg('person-reidentification-retail-0287'),
ModelArg('person-reidentification-retail-0288')),
)),
CppDemo(name='security_barrier_camera_demo',
model_keys=['-m', '-m_lpr', '-m_va'],
device_keys=['-d', '-d_lpr', '-d_va'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataDirectoryArg('vehicle-license-plate-detection-barrier')}),
TestCase(options={'-m': ModelArg('vehicle-license-plate-detection-barrier-0106')}),
single_option_cases('-m_lpr',
None,
ModelArg('license-plate-recognition-barrier-0001'),
ModelArg('license-plate-recognition-barrier-0007')),
single_option_cases('-m_va', None, ModelArg('vehicle-attributes-recognition-barrier-0039')),
)),
CppDemo(name='segmentation_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-no_show': None, **MONITORS}),
[
TestCase(options={
'-m': ModelArg('road-segmentation-adas-0001'),
'-i': DataPatternArg('road-segmentation-adas'),
}),
*combine_cases(
TestCase(options={'-i': DataPatternArg('semantic-segmentation-adas')}),
single_option_cases('-m',
ModelArg('semantic-segmentation-adas-0001'),
ModelArg('fastseg-large'),
ModelArg('fastseg-small'),
ModelArg('hrnet-v2-c1-segmentation'),
ModelArg('deeplabv3'),
ModelArg('ocrnet-hrnet-w48-paddle'),
ModelArg('pspnet-pytorch'),
ModelArg('drn-d-38'))),
],
)),
CppDemo(name='smart_classroom_demo',
model_keys=['-m_act', '-m_fd', '-m_lm', '-m_reid'],
device_keys=['-d_act', '-d_fd', '-d_lm', '-d_reid'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('smart-classroom-demo'),
'-m_fd': ModelArg('face-detection-adas-0001')}),
[
*combine_cases(
[
TestCase(options={'-m_act': ModelArg('person-detection-action-recognition-0005')}),
TestCase(options={'-m_act': ModelArg('person-detection-action-recognition-0006'),
'-student_ac': 'sitting,writing,raising_hand,standing,turned_around,lie_on_the_desk'}),
# person-detection-action-recognition-teacher-0002 is supposed to be provided with -teacher_id, but
# this would require providing a gallery file with -fg key. Unless -teacher_id is provided
# -teacher_ac is ignored thus run the test just with default actions pretending it's about students
TestCase(options={'-m_act': ModelArg('person-detection-action-recognition-teacher-0002')}),
],
[
TestCase(options={}),
TestCase(options={
'-m_lm': ModelArg('landmarks-regression-retail-0009'),
'-m_reid': ModelArg('Sphereface'),
}),
TestCase(options={
'-m_lm': ModelArg('landmarks-regression-retail-0009'),
'-m_reid': ModelArg('face-recognition-resnet100-arcface-onnx'),
}),
],
),
TestCase(options={'-m_act': ModelArg('person-detection-raisinghand-recognition-0001'), '-a_top': '5'}),
],
)),
CppDemo(name='social_distance_demo', device_keys=['-d_det', '-d_reid'],
model_keys=['-m_det', '-m_reid'], test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataDirectoryArg('person-detection-retail')}),
single_option_cases('-m_det',
ModelArg('person-detection-0200'),
ModelArg('person-detection-0201'),
ModelArg('person-detection-0202'),
ModelArg('person-detection-retail-0013')),
single_option_cases('-m_reid',
ModelArg('person-reidentification-retail-0277'),
# ModelArg('person-reidentification-retail-0286'),
ModelArg('person-reidentification-retail-0287'),
# ModelArg('person-reidentification-retail-0288')
),
)),
CppDemo(name='text_detection_demo', model_keys=['-m_td', '-m_tr'], device_keys=['-d_td', '-d_tr'],
test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('text-detection')}),
single_option_cases('-m_td',
ModelArg('text-detection-0003'),
ModelArg('text-detection-0004'),
ModelArg('horizontal-text-detection-0001')),
[
*combine_cases(
TestCase(options={'-dt': 'ctc'}),
[
*single_option_cases('-m_tr', None, ModelArg('text-recognition-0012')),
TestCase(options={'-m_tr': ModelArg('text-recognition-0014'),
'-tr_pt_first': None,
'-tr_o_blb_nm': 'logits'}),
]),
*combine_cases(
TestCase(options={'-dt': 'simple'}),
[
TestCase(options={'-m_tr': ModelArg('text-recognition-0015-encoder'),
'-tr_pt_first': None,
'-tr_o_blb_nm': 'logits',
'-m_tr_ss': '?0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'},
extra_models=[ModelArg('text-recognition-0015-decoder')]),
TestCase(options={'-m_tr': ModelArg('text-recognition-0016-encoder'),
'-tr_pt_first': None,
'-tr_o_blb_nm': 'logits',
'-m_tr_ss': '?0123456789abcdefghijklmnopqrstuvwxyz'},
extra_models=[ModelArg('text-recognition-0016-decoder')]),
TestCase(options={'-m_tr': ModelArg('text-recognition-resnet-fc'),
'-tr_pt_first': None}),
TestCase(options={'-m_tr': ModelArg('vitstr-small-patch16-224'),
'-tr_pt_first': None,
'-m_tr_ss': str(OMZ_DIR / 'models/public/vitstr-small-patch16-224/vocab.txt'),
'-start_index': '1',
'-pad': " "}),
]),
]
)),
PythonDemo(name='3d_segmentation_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-m': ModelArg('brain-tumor-segmentation-0001'),
'-o': '.'}),
single_option_cases('-i', *DATA_SEQUENCES['brain-tumor-nifti']),
)),
PythonDemo(name='action_recognition_demo', device_keys=['-d'],
model_keys=['-m_en', '-m_de'], test_cases=combine_cases(
TestCase(options={'--no_show': None, **MONITORS, '-i': DataPatternArg('action-recognition')}),
[
TestCase(options={'--architecture_type': 'i3d-rgb',
'-m_en': ModelArg('i3d-rgb-tf')}
),
*combine_cases(
TestCase(options={'--architecture_type': 'en-de'}),
[# TODO monitors_extension wasn't found
# TestCase(options={
# '-m_en': ModelArg('action-recognition-0001-encoder'),
# '-m_de': ModelArg('action-recognition-0001-decoder'),
# }),
TestCase(options={
'-m_en': ModelArg('driver-action-recognition-adas-0002-encoder'),
'-m_de': ModelArg('driver-action-recognition-adas-0002-decoder'),
}),
]
),
],
)),
PythonDemo(name='background_subtraction_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
**MONITORS,
'-i': DataPatternArg('instance-segmentation'),
'--background': DataPatternArg('instance-segmentation'),
}),
single_option_cases('-m',
ModelArg('instance-segmentation-person-0007'),
ModelArg('robust-video-matting-mobilenetv3'),
ModelArg('background-matting-mobilenetv2'),
ModelArg('yolact-resnet50-fpn-pytorch')),
)),
PythonDemo(name='bert_question_answering_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-i': 'https://en.wikipedia.org/wiki/OpenVINO',
'--questions': ['What frameworks does OpenVINO support?', 'Who are developers?']}),
[
TestCase(options={
'-m': ModelArg('bert-small-uncased-whole-word-masking-squad-0001'),
'--input_names': 'input_ids,attention_mask,token_type_ids',
'--output_names': 'output_s,output_e',
'--vocab': ModelFileArg('bert-small-uncased-whole-word-masking-squad-0001', 'vocab.txt'),
}),
TestCase(options={
'-m': ModelArg('bert-small-uncased-whole-word-masking-squad-0002'),
'--input_names': 'input_ids,attention_mask,token_type_ids,position_ids',
'--output_names': 'output_s,output_e',
'--vocab': ModelFileArg('bert-small-uncased-whole-word-masking-squad-0002', 'vocab.txt'),
}),
TestCase(options={
'-m': ModelArg('bert-small-uncased-whole-word-masking-squad-int8-0002'),
'--input_names': 'input_ids,attention_mask,token_type_ids,position_ids',
'--output_names': 'output_s,output_e',
'--vocab': ModelFileArg('bert-small-uncased-whole-word-masking-squad-int8-0002', 'vocab.txt'),
}),
TestCase(options={
'-m': ModelArg('bert-large-uncased-whole-word-masking-squad-0001'),
'--input_names': 'input_ids,attention_mask,token_type_ids',
'--output_names': 'output_s,output_e',
'--vocab': ModelFileArg('bert-large-uncased-whole-word-masking-squad-0001', 'vocab.txt'),
}),
TestCase(options={
'-m': ModelArg('bert-large-uncased-whole-word-masking-squad-int8-0001'),
'--input_names': 'input_ids,attention_mask,token_type_ids',
'--output_names': 'output_s,output_e',
'--vocab': ModelFileArg('bert-large-uncased-whole-word-masking-squad-int8-0001', 'vocab.txt')
}),
]
)),
PythonDemo(name='bert_question_answering_embedding_demo', device_keys=['-d'],
model_keys=['-m_emb', '-m_qa'], test_cases=combine_cases(
TestCase(options={'-i': 'https://en.wikipedia.org/wiki/OpenVINO',
'--questions': ['What frameworks does OpenVINO support?', 'Who are developers?']}),
[
TestCase(options={
'-m_emb': ModelArg('bert-large-uncased-whole-word-masking-squad-emb-0001'),
'--input_names_emb': 'input_ids,attention_mask,token_type_ids,position_ids',
'--vocab': ModelFileArg('bert-large-uncased-whole-word-masking-squad-emb-0001', 'vocab.txt'),
'-m_qa': ModelArg('bert-small-uncased-whole-word-masking-squad-0001'),
'--input_names_qa': 'input_ids,attention_mask,token_type_ids',
'--output_names_qa': 'output_s,output_e',
}),
TestCase(options={
'-m_emb': ModelArg('bert-large-uncased-whole-word-masking-squad-emb-0001'),
'--input_names_emb': 'input_ids,attention_mask,token_type_ids,position_ids',
'--vocab': ModelFileArg('bert-large-uncased-whole-word-masking-squad-emb-0001', 'vocab.txt'),
}),
TestCase(options={
'-m_emb': ModelArg('bert-small-uncased-whole-word-masking-squad-emb-int8-0001'),
'--input_names_emb': 'input_ids,attention_mask,token_type_ids,position_ids',
'--vocab': ModelFileArg('bert-small-uncased-whole-word-masking-squad-emb-int8-0001', 'vocab.txt'),
}),
]
)),
PythonDemo(name='bert_named_entity_recognition_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={
'-nireq': '1', # launch demo in synchronous mode
'-i': 'https://en.wikipedia.org/wiki/OpenVINO',
'-m': ModelArg('bert-base-ner'),
'-v': ModelFileArg('bert-base-ner', 'bert-base-ner/vocab.txt')
}),
)),
PythonDemo(name='classification_demo',
device_keys=['-d'],
test_cases=combine_cases(
TestCase(options={
'--no_show': None,
'-i': DataDirectoryOrigFileNamesArg('classification'),
'--labels': str(OMZ_DIR / 'data/dataset_classes/imagenet_2012.txt')}),
[
*single_option_cases('-m',
ModelArg('alexnet'),
ModelArg('densenet-121-tf'),
ModelArg('googlenet-v1'),
ModelArg('googlenet-v1-tf'),
ModelArg('googlenet-v3'),
ModelArg('googlenet-v3-pytorch'),
ModelArg('mixnet-l'),
ModelArg('mobilenet-v2-pytorch'),
ModelArg('repvgg-a0'),
ModelArg('repvgg-b1'),
ModelArg('repvgg-b3')),
TestCase(options={'-m': ModelFileArg('efficientnet-b0-pytorch', 'efficientnet-b0.onnx'),
'--reverse_input_channels': None,
'--mean_values': ['123.675', '116.28', '103.53'],
'--scale_values': ['58.395', '57.12', '57.375']}),
]
)),
PythonDemo(name='colorization_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={
'--no_show': None,
**MONITORS,
'-i': DataPatternArg('classification'),
'-m': ModelArg('colorization-v2'),
})
)),
PythonDemo(name='deblurring_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-i': DataPatternArg('face-detection-adas'),
**MONITORS,
'--no_show': None,
'-m': ModelArg('deblurgan-v2')}),
)),
PythonDemo(name='face_detection_mtcnn_demo', device_keys=['-d'],
model_keys=['-m_p', '-m_r', '-m_o'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
'-i': image_net_arg('00000002'),
'-m_p': ModelArg('mtcnn-p'),
'-m_r': ModelArg('mtcnn-r'),
'-m_o': ModelArg('mtcnn-o')}),
)),
PythonDemo(name='face_recognition_demo', device_keys=['-d_fd', '-d_lm', '-d_reid'],
model_keys=['-m_fd', '-m_lm', '-m_reid'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
**MONITORS,
'-i': DataPatternArg('face-detection-adas'),
'-fg': DataDirectoryArg('face-recognition-gallery')
}),
single_option_cases('-m_fd',
ModelArg('face-detection-adas-0001'),
ModelArg('face-detection-retail-0004'),
ModelArg('face-detection-retail-0005'),
ModelArg('face-detection-retail-0044')),
single_option_cases('-m_lm', ModelArg('landmarks-regression-retail-0009')),
single_option_cases('-m_reid',
ModelArg('Sphereface'),
ModelArg('face-reidentification-retail-0095'),
ModelArg('face-recognition-resnet100-arcface-onnx'),
ModelArg('facenet-20180408-102900')),
)),
PythonDemo(name='formula_recognition_demo', device_keys=['-d'],
model_keys=['-m_encoder', '-m_decoder'], test_cases=combine_cases(
TestCase(options={'--no_show': None}),
[
TestCase(options={
'-i': str(OMZ_DIR / 'models/intel/formula-recognition-medium-scan-0001/'
'assets/formula-recognition-medium-scan-0001.png'),
'-m_encoder': ModelArg('formula-recognition-medium-scan-0001-im2latex-encoder'),
'-m_decoder': ModelArg('formula-recognition-medium-scan-0001-im2latex-decoder'),
'--vocab': ModelFileArg('formula-recognition-medium-scan-0001-im2latex-decoder', 'vocab.json'),
}),
TestCase(options={
'-i': str(OMZ_DIR / 'models/intel/formula-recognition-polynomials-handwritten-0001/'
'assets/formula-recognition-polynomials-handwritten-0001.png'),
'-m_encoder': ModelArg('formula-recognition-polynomials-handwritten-0001-encoder'),
'-m_decoder': ModelArg('formula-recognition-polynomials-handwritten-0001-decoder'),
'--vocab': ModelFileArg('formula-recognition-polynomials-handwritten-0001-decoder', 'vocab.json'),
})
],
)),
PythonDemo(name='gesture_recognition_demo', device_keys=['-d'],
model_keys=['-m_d', '-m_a'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
'-i': TestDataArg('msasl/global_crops/_nz_sivss20/clip_0017/img_%05d.jpg'),
'-m_d': ModelArg('person-detection-asl-0001')}),
[
TestCase(options={'-m_a': ModelArg('asl-recognition-0004'), '-c': str(OMZ_DIR / 'data/dataset_classes/msasl100.json')}),
TestCase(options={'-m_a': ModelArg('common-sign-language-0001'),
'-c': str(OMZ_DIR / 'data/dataset_classes/jester27.json')}),
TestCase(options={'-m_a': ModelArg('common-sign-language-0002'),
'-c': str(OMZ_DIR / 'data/dataset_classes/common_sign_language12.json')}),
],
)),
PythonDemo(name='gpt2_text_prediction_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={
'-i': ['The poem was written by'],
'-m': ModelArg('gpt-2'),
'-v': ModelFileArg('gpt-2', 'gpt2/vocab.json'),
'--merges': ModelFileArg('gpt-2', 'gpt2/merges.txt'),
}),
)),
PythonDemo(name='handwritten_text_recognition_demo', device_keys=['-d'], test_cases=combine_cases(
[
TestCase(options={
'-i': str(OMZ_DIR / 'models/intel/handwritten-japanese-recognition-0001/assets/handwritten-japanese-recognition-0001.png'),
'-m': ModelArg('handwritten-japanese-recognition-0001'),
'-cl': str(OMZ_DIR / 'data/dataset_classes/kondate_nakayosi.txt')
}),
TestCase(options={
'-i': str(OMZ_DIR / 'models/intel/handwritten-simplified-chinese-recognition-0001/assets/handwritten-simplified-chinese-recognition-0001.png'),
'-m': ModelArg('handwritten-simplified-chinese-recognition-0001'),
'-cl': str(OMZ_DIR / 'data/dataset_classes/scut_ept.txt')
}),
],
)),
# TODO ImportError: Module 'pose_extractor' not found.
# PythonDemo(name='human_pose_estimation_3d_demo', device_keys=['-d'], test_cases=combine_cases(
# TestCase(options={'--no_show': None,
# **MONITORS,
# '-i': DataPatternArg('human-pose-estimation')}),
# TestCase(options={'-m': ModelArg('human-pose-estimation-3d-0001')}),
# )),
PythonDemo(name='human_pose_estimation_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-no_show': None,
**MONITORS,
'-i': DataPatternArg('human-pose-estimation')}),
[
TestCase(options={'-at': 'openpose', '-m': ModelArg('human-pose-estimation-0001')}),
TestCase(options={'-at': 'higherhrnet', '-m': ModelArg('higher-hrnet-w32-human-pose-estimation')}),
*combine_cases(
TestCase(options={'-at': 'ae'}),
single_option_cases('-m',
ModelArg('human-pose-estimation-0005'),
ModelArg('human-pose-estimation-0006'),
ModelArg('human-pose-estimation-0007'))),
],
)),
PythonDemo(name='image_inpainting_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
'-i': image_net_arg('00048311'),
'-m': ModelArg('gmcnn-places2-tf'),
'-ar': None})
)),
PythonDemo(name='image_retrieval_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
**MONITORS,
'-m': ModelArg('image-retrieval-0001')}),
single_option_cases('-i', *DATA_SEQUENCES['image-retrieval-video']),
single_option_cases('-g', image_retrieval_arg('gallery.txt')),
)),
PythonDemo(name='instance_segmentation_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
**MONITORS,
'-i': DataPatternArg('instance-segmentation'),
'--labels': str(OMZ_DIR / 'data/dataset_classes/coco_80cl_bkgr.txt')}),
single_option_cases('-m',
ModelArg('instance-segmentation-security-0002'),
ModelArg('instance-segmentation-security-0091'),
ModelArg('instance-segmentation-security-0228'),
ModelArg('instance-segmentation-security-1039'),
ModelArg('instance-segmentation-security-1040')),
)),
PythonDemo(name='machine_translation_demo', device_keys=[], test_cases=combine_cases(
[
TestCase(options={
'-m': ModelArg('machine-translation-nar-en-ru-0002'),
'--tokenizer-src': ModelFileArg('machine-translation-nar-en-ru-0002', 'tokenizer_src'),
'--tokenizer-tgt': ModelFileArg('machine-translation-nar-en-ru-0002', 'tokenizer_tgt'),
'-i': [
'The quick brown fox jumps over the lazy dog.',
'The five boxing wizards jump quickly.',
'Jackdaws love my big sphinx of quartz.'
],
}),
TestCase(options={
'-m': ModelArg('machine-translation-nar-ru-en-0002'),
'--tokenizer-src': ModelFileArg('machine-translation-nar-ru-en-0002', 'tokenizer_src'),
'--tokenizer-tgt': ModelFileArg('machine-translation-nar-ru-en-0002', 'tokenizer_tgt'),
'-i': [
'ะ ัะฐัะฐั
ัะณะฐ ะถะธะป ะฑั ัะธัััั? ะะฐ, ะฝะพ ัะฐะปััะธะฒัะน ัะบะทะตะผะฟะปัั!',
'ะจะธัะพะบะฐั ัะปะตะบััะธัะธะบะฐัะธั ัะถะฝัั
ะณัะฑะตัะฝะธะน ะดะฐัั ะผะพัะฝัะน ัะพะปัะพะบ ะฟะพะดััะผั ัะตะปััะบะพะณะพ ั
ะพะทัะนััะฒะฐ.',
'ะกัะตัั ะถะต ะตัั ััะธั
ะผัะณะบะธั
ััะฐะฝััะทัะบะธั
ะฑัะปะพะบ ะดะฐ ะฒัะฟะตะน ัะฐั.'
],
}),
]
)),
PythonDemo(name='monodepth_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None, **MONITORS,
'-i': DataPatternArg('object-detection-demo'),
'-m': ModelArg('midasnet')})
)),
PythonDemo(name='multi_camera_multi_target_tracking_demo', device_keys=['-d'],
model_keys=['-m', '--m_reid'], test_cases=combine_cases(
TestCase(options={'--no_show': None,
**MONITORS,
'-i': [DataPatternArg('multi-camera-multi-target-tracking'),
DataPatternArg('multi-camera-multi-target-tracking/repeated')],
'-m': ModelArg('person-detection-retail-0013')}),
single_option_cases('--m_reid',
ModelArg('person-reidentification-retail-0277'),
ModelArg('person-reidentification-retail-0286'),
ModelArg('person-reidentification-retail-0287'),
ModelArg('person-reidentification-retail-0288')
),
)),
PythonDemo(name='noise_suppression_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-i': TestDataArg('how_are_you_doing.wav')}),
single_option_cases('-m',
ModelArg('noise-suppression-denseunet-ll-0001'),
ModelArg('noise-suppression-poconetlike-0001'))
)),
PythonDemo(name='object_detection_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None, **MONITORS, '-i': DataPatternArg('object-detection-demo')}),
[
*combine_cases(
TestCase(options={'--architecture_type': 'centernet'}),
[
*single_option_cases('-m',
ModelArg('ctdet_coco_dlav0_512'),
),
*combine_cases(
TestCase(options={
'--mean_values': ['104.04', '113.985', '119.85'],
'--scale_values': ['73.695', '69.87', '70.89']
}),
single_option_cases('-m',
ModelFileArg('ctdet_coco_dlav0_512', 'ctdet_coco_dlav0_512.onnx'),
),
),
]
),
*combine_cases(
TestCase(options={'--architecture_type': 'faceboxes'}),
[
TestCase(options={'-m': ModelArg('faceboxes-pytorch')}),
TestCase(options={'-m': ModelFileArg('faceboxes-pytorch', 'faceboxes-pytorch.onnx'),
'--mean_values': ['104.0', '117.0', '123.0']}),
]
),
TestCase(options={'--architecture_type': 'ctpn',
'-m': ModelArg('ctpn')}
),
*combine_cases(
TestCase(options={'--architecture_type': 'retinaface-pytorch'}),
[
TestCase(options={'-m': ModelArg('retinaface-resnet50-pytorch')}),
TestCase(options={'-m': ModelFileArg('retinaface-resnet50-pytorch', 'retinaface-resnet50-pytorch.onnx'),
'--mean_values': ['104.0', '117.0', '123.0']}),
]
),
*combine_cases(
TestCase(options={'--architecture_type': 'ssd'}),
[
*single_option_cases('-m',
ModelArg('efficientdet-d0-tf'),
ModelArg('efficientdet-d1-tf'),
ModelArg('face-detection-0200'),
ModelArg('face-detection-0202'),
ModelArg('face-detection-0204'),
ModelArg('face-detection-0205'),
ModelArg('face-detection-0206'),
ModelArg('face-detection-adas-0001'),
ModelArg('face-detection-retail-0004'),
ModelArg('face-detection-retail-0005'),
ModelArg('face-detection-retail-0044'),
ModelArg('faster-rcnn-resnet101-coco-sparse-60-0001'),
ModelArg('pedestrian-and-vehicle-detector-adas-0001'),
ModelArg('pedestrian-detection-adas-0002'),
ModelArg('person-detection-0200'),
ModelArg('person-detection-0201'),
ModelArg('person-detection-0202'),
ModelArg('person-detection-retail-0013'),
ModelArg('person-vehicle-bike-detection-2000'),
ModelArg('person-vehicle-bike-detection-2001'),
ModelArg('person-vehicle-bike-detection-2002'),
ModelArg('person-vehicle-bike-detection-2003'),
ModelArg('person-vehicle-bike-detection-2004'),
ModelArg('pelee-coco'),
ModelArg('product-detection-0001'),
ModelArg('rfcn-resnet101-coco-tf'),
ModelArg('retinanet-tf'),
ModelArg('ssd300'),
ModelArg('ssd512'),
ModelArg('ssd_mobilenet_v1_coco'),
ModelArg('ssd_mobilenet_v1_fpn_coco'),
ModelArg('ssd-resnet34-1200-onnx'),
ModelArg('ssdlite_mobilenet_v2'),
ModelArg('vehicle-detection-0200'),
ModelArg('vehicle-detection-0201'),
ModelArg('vehicle-detection-0201'),
ModelArg('vehicle-detection-adas-0002'),
ModelArg('vehicle-license-plate-detection-barrier-0106'),
ModelArg('person-detection-0106')),
TestCase(options={'-m': ModelFileArg('ssd-resnet34-1200-onnx', 'resnet34-ssd1200.onnx'),
'--reverse_input_channels': None,
'--mean_values': ['123.675', '116.28', '103.53'],
'--scale_values': ['58.395', '57.12', '57.375']}),
]
),
*combine_cases(
TestCase(options={'--architecture_type': 'ultra_lightweight_face_detection'}),
[
*single_option_cases('-m',
ModelArg('ultra-lightweight-face-detection-rfb-320'),
ModelArg('ultra-lightweight-face-detection-slim-320'),
),
*combine_cases(
TestCase(options={
'--mean_values': ['127.0', '127.0', '127.0'],
'--scale_values': ['128.0', '128.0', '128.0']
}),
single_option_cases('-m',
ModelFileArg('ultra-lightweight-face-detection-rfb-320', 'ultra-lightweight-face-detection-rfb-320.onnx'),
ModelFileArg('ultra-lightweight-face-detection-slim-320', 'ultra-lightweight-face-detection-slim-320.onnx'),
),
),
]
),
*combine_cases(
TestCase(options={'--architecture_type': 'yolo'}),
single_option_cases('-m',
ModelArg('mobilefacedet-v1-mxnet'),
ModelArg('mobilenet-yolo-v4-syg'),
ModelArg('person-vehicle-bike-detection-crossroad-yolov3-1020'),
ModelArg('yolo-v1-tiny-tf'),
ModelArg('yolo-v2-ava-0001'),
ModelArg('yolo-v2-ava-sparse-35-0001'),
ModelArg('yolo-v2-ava-sparse-70-0001'),
ModelArg('yolo-v2-tf'),
ModelArg('yolo-v2-tiny-ava-0001'),
ModelArg('yolo-v2-tiny-ava-sparse-30-0001'),
ModelArg('yolo-v2-tiny-ava-sparse-60-0001'),
ModelArg('yolo-v2-tiny-tf'),
ModelArg('yolo-v2-tiny-vehicle-detection-0001'),
ModelArg('yolo-v3-tf'),
ModelArg('yolo-v3-tiny-tf')),
),
TestCase(options={'-at': 'yolov3-onnx', '-m': ModelArg('yolo-v3-onnx')}),
TestCase(options={'-at': 'yolov3-onnx', '-m': ModelArg('yolo-v3-tiny-onnx')}),
TestCase(options={'-at': 'yolov4', '-m': ModelArg('yolo-v4-tf')}),
TestCase(options={'-at': 'yolov4', '-m': ModelArg('yolo-v4-tiny-tf')}),
TestCase(options={'-at': 'yolof', '-m': ModelArg('yolof')}),
*combine_cases(
TestCase(options={'--architecture_type': 'detr'}),
[
TestCase(options={'-m': ModelArg('detr-resnet50')}),
TestCase(options={'-m': ModelFileArg('detr-resnet50', 'detr-resnet50.onnx'),
'--reverse_input_channels': None,
'--mean_values': ['123.675', '116.28', '103.53'],
'--scale_values': ['58.395', '57.12', '57.375']}),
]
),
*combine_cases(
TestCase(options={'--architecture_type': 'yolox'}),
[
TestCase(options={'-m': ModelArg('yolox-tiny')}),
TestCase(options={'-m': ModelFileArg('yolox-tiny', 'yolox-tiny.onnx'),
'--reverse_input_channels': None,
'--mean_values': ['123.675', '116.28', '103.53'],
'--scale_values': ['58.395', '57.12', '57.375']}),
]
),
],
)),
PythonDemo(name='segmentation_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'--no_show': None, **MONITORS}),
[
TestCase(options={
'-m': ModelArg('road-segmentation-adas-0001'),
'-i': DataPatternArg('road-segmentation-adas'),
'-at': 'segmentation',
}),
*combine_cases(
TestCase(options={
'-i': DataPatternArg('semantic-segmentation-adas'),
'-at': 'segmentation',
}),
single_option_cases('-m',
ModelArg('semantic-segmentation-adas-0001'),
ModelArg('fastseg-large'),
ModelArg('fastseg-small'),
ModelArg('hrnet-v2-c1-segmentation'),
ModelArg('icnet-camvid-ava-0001'),
ModelArg('icnet-camvid-ava-sparse-30-0001'),
ModelArg('icnet-camvid-ava-sparse-60-0001'),
ModelArg('unet-camvid-onnx-0001'),
ModelArg('deeplabv3'),
ModelArg('ocrnet-hrnet-w48-paddle'),
ModelArg('pspnet-pytorch'),
ModelArg('drn-d-38'))),
TestCase(options={
'-m': ModelArg('f3net'),
'-i': DataPatternArg('road-segmentation-adas'),
'-at': 'salient_object_detection',
}),
],
)),
PythonDemo(name='single_human_pose_estimation_demo', device_keys=['-d'],
model_keys=['-m_od', '-m_hpe'], test_cases=combine_cases(
TestCase(options={'--no_show': None, **MONITORS,
'-i': DataPatternArg('human-pose-estimation'),
'--person_label': '1'}),
[
*combine_cases(
TestCase(options={'-m_hpe': ModelArg('single-human-pose-estimation-0001')}),
single_option_cases('-m_od',
ModelArg('mobilenet-ssd'),
ModelArg('person-detection-retail-0013'),
ModelArg('ssd_mobilenet_v1_coco'))),
]
)),
PythonDemo(name='smartlab_demo', device_keys=['-d'],
model_keys=['-m_ta', '-m_tm', '-m_fa', '-m_fm', '-m_en', '-m_de'],
test_cases=combine_cases(
[
TestCase(options={'-tv': TestDataArg('data/test_data/videos/smartlab/stream_8_top.mp4'),
'-fv': TestDataArg('data/test_data/videos/smartlab/stream_8_front.mp4'),
'-m_ta': ModelArg('smartlab-object-detection-0001'),
'-m_tm': ModelArg('smartlab-object-detection-0002'),
'-m_fa': ModelArg('smartlab-object-detection-0003'),
'-m_fm': ModelArg('smartlab-object-detection-0004'),
'-m_en': ModelArg('i3d-rgb-tf'),
'-m_de': ModelArg('smartlab-sequence-modelling-0001')}),
],
)),
PythonDemo(name='sound_classification_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-i': TestDataArg('how_are_you_doing.wav'),
'-m': ModelArg('aclnet')}),
)),
# TODO: No module named 'ctcdecode_numpy'
# PythonDemo(name='speech_recognition_deepspeech_demo', device_keys=['-d'], test_cases=combine_cases(
# TestCase(options={'-i': TestDataArg('how_are_you_doing.wav')}),
# [
# TestCase(options={'-p': 'mds08x_en',
# '-m': ModelArg('mozilla-deepspeech-0.8.2'),
# # run_tests.py puts pre-converted files into dl_dir as
# # it always runs converter.py without --output_dir
# '-L': ModelFileArg('mozilla-deepspeech-0.8.2', 'deepspeech-0.8.2-models.kenlm')}),
# TestCase(options={'-p': 'mds06x_en',
# '-m': ModelArg('mozilla-deepspeech-0.6.1'),
# # lm.binary is really in dl_dir
# '-L': ModelFileArg('mozilla-deepspeech-0.6.1', 'deepspeech-0.6.1-models/lm.binary')}),
# TestCase(options={'-p': 'mds08x_en', # test online mode
# '-m': ModelArg('mozilla-deepspeech-0.8.2'),
# # run_tests.py puts pre-converted files into dl_dir as
# # it always runs converter.py without --output_dir
# '-L': ModelFileArg('mozilla-deepspeech-0.8.2', 'deepspeech-0.8.2-models.kenlm'),
# '--realtime': None}),
# TestCase(options={'-p': 'mds08x_en', # test without LM
# '-m': ModelArg('mozilla-deepspeech-0.8.2')}),
# ],
# )),
PythonDemo(name='speech_recognition_quartznet_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-i': TestDataArg('how_are_you_doing.wav')}),
single_option_cases('-m',
ModelArg('quartznet-15x5-en'),
ModelFileArg('quartznet-15x5-en', 'quartznet.onnx'))
)),
PythonDemo(name='speech_recognition_wav2vec_demo', device_keys=['-d'], test_cases=combine_cases(
TestCase(options={'-i': TestDataArg('how_are_you_doing.wav')}),
single_option_cases('-m', ModelArg('wav2vec2-base'))
)),
PythonDemo(name='text_spotting_demo', device_keys=['-d'],
model_keys=['-m_m', '-m_te', '-m_td'], test_cases=combine_cases(
TestCase(options={'--no_show': None, '--delay': '1', **MONITORS,
'-i': DataPatternArg('text-detection')}),
[
TestCase(options={
'-m_m': ModelArg('text-spotting-0005-detector'),
'-m_te': ModelArg('text-spotting-0005-recognizer-encoder'),
'-m_td': ModelArg('text-spotting-0005-recognizer-decoder'),
'--no_track': None
}),
]
)),
PythonDemo(name='text_to_speech_demo', device_keys=['-d'],
model_keys=['-m_duration', '-m_forward', '-m_upsample', '-m_rnn', '-m_melgan'], test_cases=combine_cases(
TestCase(options={'-i': [
'The quick brown fox jumps over the lazy dog.',
'The five boxing wizards jump quickly.'
]}),
[
TestCase(options={
'-m_duration': ModelArg('forward-tacotron-duration-prediction'),
'-m_forward': ModelArg('forward-tacotron-regression'),
'-m_upsample': ModelArg('wavernn-upsampler'),
'-m_rnn': ModelArg('wavernn-rnn')
}),
TestCase(options={
'-m_duration': ModelArg('text-to-speech-en-0001-duration-prediction'),
'-m_forward': ModelArg('text-to-speech-en-0001-regression'),
'-m_melgan': ModelArg('text-to-speech-en-0001-generation')
}),
TestCase(options={
'-m_duration': ModelArg('text-to-speech-en-multi-0001-duration-prediction'),
'-m_forward': ModelArg('text-to-speech-en-multi-0001-regression'),
'-m_melgan': ModelArg('text-to-speech-en-multi-0001-generation')
}),
]
)),
PythonDemo(name='time_series_forecasting_demo', device_keys=[],
model_keys=['-m'], test_cases=[TestCase(options={'-h': ''})]),
PythonDemo(name='whiteboard_inpainting_demo', device_keys=['-d'],
model_keys=['-m_i', '-m_s'], test_cases=combine_cases(
TestCase(options={'-i': TestDataArg('msasl/global_crops/_nz_sivss20/clip_0017/img_%05d.jpg'),
**MONITORS,
'--no_show': None}),
[
*single_option_cases('-m_i',
ModelArg('instance-segmentation-security-0002'),
# ModelArg('instance-segmentation-security-0091'), # Slow model
ModelArg('instance-segmentation-security-0228'),
ModelArg('instance-segmentation-security-1039'),
ModelArg('instance-segmentation-security-1040')),
TestCase(options={'-m_s': ModelArg('semantic-segmentation-adas-0001')}),
]
)),
]
BASE = { demo.subdirectory : demo for demo in DEMOS }
```
#### File: accuracy_checker/data_readers/numpy_readers.py
```python
import re
from pathlib import Path
import numpy as np
from numpy.lib.npyio import NpzFile
from ..config import StringField, BoolField, NumberField, ConfigError
from .data_reader import BaseReader, DataRepresentation
from ..utils import get_path
class NumPyReader(BaseReader):
__provider__ = 'numpy_reader'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'keys': StringField(optional=True, default="", description='Comma-separated model input names.'),
'separator': StringField(optional=True,
description='Separator symbol between input identifier and file identifier.'),
'id_sep': StringField(
optional=True, default="_",
description='Separator symbol between input name and record number in input identifier.'
),
'block': BoolField(optional=True, default=False, description='Allows block mode.'),
'batch': NumberField(optional=True, default=1, description='Batch size'),
'records_mode': BoolField(optional=True, default=False, description='separate data on records'),
})
return parameters
def configure(self):
self.is_text = self.config.get('text_file', False)
self.multi_infer = self.get_value_from_config('multi_infer')
self.keys = self.get_value_from_config('keys')
self.keys = [t.strip() for t in self.keys.split(',')] if len(self.keys) > 0 else []
self.separator = self.get_value_from_config('separator')
self.id_sep = self.get_value_from_config('id_sep')
self.block = self.get_value_from_config('block')
self.batch = int(self.get_value_from_config('batch'))
self.record_mode = self.get_value_from_config('records_mode')
if self.separator and self.is_text:
raise ConfigError('text file reading with numpy does support separation')
if not self.data_source:
if not self._postpone_data_source:
raise ConfigError('data_source parameter is required to create "{}" '
'data reader and read data'.format(self.__provider__))
else:
self.data_source = get_path(self.data_source, is_directory=True)
self.keyRegex = {k: re.compile(k + self.id_sep) for k in self.keys}
self.valRegex = re.compile(r"([^0-9]+)([0-9]+)")
self.data_layout = self.get_value_from_config('data_layout')
def read(self, data_id):
field_id = None
if self.separator:
field_id, data_id = str(data_id).split(self.separator)
data_path = self.data_source / data_id if self.data_source is not None else data_id
data_path = str(data_path).replace('label', 'input')
data = np.load(str(data_path))
if not isinstance(data, NpzFile):
return data
if field_id is not None:
key = [k for k, v in self.keyRegex.items() if v.match(field_id)]
if len(key) > 0:
if self.block:
res = data[key[0]]
else:
recno = field_id.split('_')[-1]
recno = int(recno)
start = Path(data_id).name.split('.')[0]
start = int(start)
res = data[key[0]][recno - start, :]
return res
key = next(iter(data.keys()))
data = data[key]
if self.record_mode and self.id_sep in field_id:
recno = field_id.split(self.id_sep)[-1]
recno = int(recno)
res = data[recno, :]
return res
if self.multi_infer:
return list(data)
return data
class NumpyTXTReader(BaseReader):
__provider__ = 'numpy_txt_reader'
def read(self, data_id):
return np.loadtxt(str(self.data_source / data_id))
class NumpyDictReader(BaseReader):
__provider__ = 'numpy_dict_reader'
def read(self, data_id):
data_path = self.data_source / data_id if self.data_source is not None else data_id
return np.load(str(data_path), allow_pickle=True)[()]
def read_item(self, data_id):
dict_data = self.read_dispatcher(data_id)
identifier = []
data = []
for key, value in dict_data.items():
identifier.append('{}.{}'.format(data_id, key))
data.append(value)
if len(data) == 1:
return DataRepresentation(data[0], identifier=data_id)
return DataRepresentation(data, identifier=identifier)
class NumpyBinReader(BaseReader):
__provider__ = 'numpy_bin_reader'
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
"dtype": StringField(optional=True, default='float32', description='data type for reading'),
'as_buffer': BoolField(optional=True, default=False, description='interpter binary data as buffere'),
'offset': NumberField(optional=True, default=0, value_type=int, min_value=0)
})
return params
def configure(self):
super().configure()
self.dtype = self.get_value_from_config('dtype')
self.as_buffer = self.get_value_from_config('as_buffer')
self.offset = self.get_value_from_config('offset')
self.data_layout = self.get_value_from_config('data_layout')
def read(self, data_id):
data_path = self.data_source / data_id if self.data_source is not None else data_id
if not self.as_buffer:
return np.fromfile(data_path, dtype=self.dtype)
buffer = Path(data_path).open('rb').read()
return np.frombuffer(buffer[self.offset:], dtype=self.dtype)
``` |
{
"source": "jikechong/employment_skill_set_analysis",
"score": 3
} |
#### File: code/data_api/process_request.py
```python
from os import walk
import csv
import sys
import json
import string
import urllib2
from BeautifulSoup import BeautifulSoup
import ConfigParser
LOG_PRODUCTION = 0
LOG_WARNING = 1
LOG_ALL_MESSAGE = 10
# LOGLEVEL = LOG_ALL_MESSAGE
LOGLEVEL = LOG_WARNING
class process_request:
def __init__(self):
self.load_shml("../../config/whitehouse.cfg")
self.load_onet_title_lookup("../../model/title_onet.tsv")
self.load_tree("../../model/")
def load_shml(self, filename):
self.CONFIG = ConfigParser.ConfigParser()
self.CONFIG.read(filename)
self.SHXML_pshid = self.CONFIG.get("simplyhired_xml_api", "pshid")
self.SHXML_auth = self.CONFIG.get("simplyhired_xml_api", "auth" )
self.SHXML_ssty = self.CONFIG.get("simplyhired_xml_api", "ssty" )
self.SHXML_cflg = self.CONFIG.get("simplyhired_xml_api", "cflg" )
def load_onet_title_lookup(self, filename):
# Load Title to ONet lookup
self.title_onet_lookup = {}
f = open(filename, "r")
for line in f:
tokens = (line.strip()).split("\t")
if len(tokens) != 2:
if LOGLEVEL >= LOG_WARNING:
print "WARNING: line \"%s\", %s fields detected, looking for two"%(line,len(tokens))
title = tokens[0]
onet = tokens[1]
self.title_onet_lookup[title] = onet
f.close()
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "INFO: Check loaded title to ONet lookup"
print " %s"%self.title_onet_lookup
def load_tree(self, filepath):
# Load trees for all O*Net
self.trees = {}
self.clusters = {}
for (dirpath, dirnames, filenames) in walk(filepath):
# print "INFO: %s"%dirnames
if len(dirnames) == 0:
continue
onet = ""
if len(dirnames[0]) == 10:
onet = dirnames[0]
else:
continue
# Load tree structure
try:
f = open("%s/%s/tree.json"%(filepath,onet), "r")
j = json.load(f)
f.close()
except:
e = sys.exc_info()[0]
print "Error: in loading tree.json for %s %s" %(onet, e)
continue
self.trees[onet] = j
# Load cluster meta data
try:
f = open("%s/%s/cluster_meta_data.txt"%(filepath,onet), "r")
reader = csv.reader(f, delimiter='\t')
self.clusters[onet] = {"clusters":{}, "questions":{}}
for row in reader:
if len(row) != 3:
if LOGLEVEL >= LOG_WARNING:
print "WARNING: Reading cluster meta data for %s and found %s"%(onet, row)
continue
self.clusters[onet]["clusters"][row[0]] = {"question":row[1], "query":row[2]}
self.clusters[onet]["questions"][row[1]] = {"cluster":row[0], "query":row[2]}
f.close()
except:
e = sys.exc_info()[0]
print "Error: in loading cluster metadata for %s %s" %(onet,e)
continue
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "\nINFO: Check loaded tree json"
print " %s"%self.trees
print "\nINFO: Check loaded cluster meta data"
print " %s"%self.clusters
def process(self, req):
request = json.loads(req)
# Look for Title ONets
keyword = request["keyword"]
if keyword not in self.title_onet_lookup:
msg = "ERROR: Keyword not recognized"
print msg
return msg
else:
onet = self.title_onet_lookup[keyword]
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "INFO: %s matched to %s"%(keyword, onet)
# Process location string
location = request["location"]
if not location: location = ""
location_clean = self.clean_string(location)
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "INFO: Cleaned location %s"%location_clean
context = request["context"]
# Go through decision tree
response = self.traverse_response(onet, keyword, location, location_clean, context)
return response
def traverse_response(self, onet, keyword, location, location_clean, context):
self.yes_list = []
self.no_list = []
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "INFO: onet %s keyword %s loation %s context %s"%(onet,keyword, location_clean, context)
# Start from root node
curr_node = '0'
response = self.traverse_tree(curr_node, onet, keyword, location, location_clean, context)
return response
def traverse_tree(self, curr_node, onet, keyword, location, location_clean, context):
# Check inventory of search results - if less than 10, stop questioning now
# - Yes/No list implicitly passed as class state
n_jobs, jobs = self.check_job_market(onet, keyword, location_clean)
if n_jobs < 30:
response = self.generate_response_in_context(keyword, location, n_jobs=n_jobs, jobs=jobs)
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "\nINFO: only %s jobs found, return %s"%(n_jobs,response)
return response
curr_cluster = str(self.trees[onet][curr_node]["feature"])
# Check for end of tree
if curr_cluster == '-2':
response = self.generate_response_in_context(keyword, location, n_jobs=n_jobs, jobs=jobs)
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "\nINFO: Leaf node %s found, return %s"%(curr_node,response)
return response
# Otherwise continue recursive tree traversal
curr_question = self.clusters[onet]['clusters'][curr_cluster]['question']
curr_keyword = self.clusters[onet]['clusters'][curr_cluster]['query']
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "\nINFO: Traverse Tree at node %s cluster %s question %s keyword %s"%(curr_node, curr_cluster, curr_question, curr_question)
if not context: context = []
context_hash = {}
for ele in context:
context_hash[ele["question"]] = ele["answer"]
if curr_question not in context_hash:
# Ask the question as response
# - Yes/No list implicitly passed as class state
response = self.generate_response_in_context(keyword, location, n_jobs=n_jobs, question = curr_question)
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "\nINFO: Found next question to ask [%s]"%curr_question
print "Response: %s"%response
return response
curr_choice = context_hash[curr_question]
element = {"choice":curr_choice, "question":curr_question, "query":curr_keyword}
if str(curr_choice) == '1':
self.yes_list.append(element)
curr_node = str(self.trees[onet][curr_node]["right_child"])
elif str(curr_choice) == '0':
self.no_list.append(element)
curr_node =str(self.trees[onet][curr_node]["left_child"])
response = self.traverse_tree(curr_node, onet, keyword, location, location_clean, context)
return response
# Check current market place
def check_job_market(self, onet, keyword, location_clean):
req = 'http://api.simplyhired.com/a/jobs-api/xml-v2/q-'
# Construct the query
keyword_clean = self.clean_string(keyword)
req = req+keyword_clean
for element in self.yes_list:
ele_clean = self.clean_string(element["query"])
req = req+'+AND+'+ele_clean
for element in self.no_list:
ele_clean = self.clean_string(element["query"])
req = req+'+AND+NOT+'+ele_clean
req = req+'/l-'+location_clean
req = req+'?pshid='+self.SHXML_pshid
req = req+'&ssty='+self.SHXML_ssty
req = req+'&cflg='+self.SHXML_cflg
req = req+'&auth='+self.SHXML_auth
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "\nINFO: check API with %s"%req
response = urllib2.urlopen(req)
xml = response.read()
parsed_xml = BeautifulSoup(xml)
line = str(parsed_xml.shrs.rq.tr)
line = line.replace('<tr>', "")
line = line.replace('</tr>', "")
n_jobs = string.atoi(line)
jobs = str(parsed_xml.shrs.rs)
if LOGLEVEL >= LOG_ALL_MESSAGE:
print "INFO: Total results %s"%n_jobs
return n_jobs,jobs
# Ask the question as response
def generate_response_in_context(self, keyword, location, n_jobs=-1, question="", jobs=[]):
# Build Response
response = {"keyword":keyword, "location":location, "context":[]}
# n_jobs reporting
if n_jobs != -1:
response["n_jobs"] = n_jobs
# Include "Yes" context
for element in self.yes_list:
response["context"].append({"question":element["question"], "answer":element["choice"]})
if str(element["choice"]) != "1":
print "ERROR: invariant failed in generate_response_in_context for [%s]"%element
exit(1)
# Include "No" context
for element in self.no_list:
response["context"].append({"question":element["question"], "answer":element["choice"]})
if str(element["choice"]) != "0":
print "ERROR: invariant failed in generate_response_in_context for [%s]"%element
exit(1)
# Include questions
if len(question) != 0:
response["question"] = question
return response
elif len(jobs) != 0:
response["jobs"] = jobs
# Todo: Jobs currently in XML format
return response
else:
if len(response["context"]) == 0:
response["error"] = "No %s jobs in %s found"%(keyword, location)
else:
response["error"] = "No %s jobs in %s found with current set of constraints"%(keyword, location)
return response
def clean_string(self, input=""):
output = input.replace(':','%3A')
output = output.replace(',','%2C')
output = output.replace(' ','+')
return output
def main():
pr = process_request()
# test = {"keyword": "retail sales","location":"Washington, DC","context": []}
# test = {"keyword": "retail sales","location":"Washington, DC","context": [{"question":"Can you use devices like scanners?","answer": 1}]}
test = {"keyword": "retail sales","location":"Modesto, CA","context": [{"question":"Can you use devices like scanners?","answer": 1}]}
print pr.process(json.dumps(test))
if __name__ == '__main__':
main()
```
#### File: data_api/unit_test/server_test.py
```python
import json
import urllib2
import requests
url = 'http://192.168.3.11:8080/post'
def main():
f = open("test_vector.json", "r")
test_vec = json.load(f)
f.close()
for d in test_vec:
print "\nRequest:\n %s"%d
data = json.dumps(d)
clen = len(data)
req = urllib2.Request(url, data, {'Content-Type': 'application/json', 'Content-Length': clen})
f = urllib2.urlopen(req)
response = f.read()
print "\nResponse:\n %s"%response
f.close()
if __name__ == '__main__':
main()
``` |
{
"source": "jikelab/jdp-package",
"score": 2
} |
#### File: layer-hive/reactive/hive.py
```python
from charmhelpers.core import hookenv
from charms.layer.apache_bigtop_base import get_layer_opts, get_package_version
from charms.layer.bigtop_hive import Hive
from charms.reactive import (
RelationBase,
is_state,
remove_state,
set_state,
when,
when_not,
)
from charms.reactive.helpers import data_changed
@when('bigtop.available')
def report_status():
hadoop_joined = is_state('hadoop.joined')
hadoop_ready = is_state('hadoop.ready')
hbase_joined = is_state('hbase.joined')
hbase_ready = is_state('hbase.ready')
database_joined = is_state('database.connected')
database_ready = is_state('database.available')
hive_installed = is_state('hive.installed')
if not hadoop_joined:
hookenv.status_set('blocked',
'waiting for relation to hadoop plugin')
elif not hadoop_ready:
hookenv.status_set('waiting',
'waiting for hadoop to become ready')
elif database_joined and not database_ready:
hookenv.status_set('waiting',
'waiting for database to become ready')
elif hbase_joined and not hbase_ready:
hookenv.status_set('waiting',
'waiting for hbase to become ready')
elif hive_installed and not database_ready:
hookenv.status_set('active',
'ready (local metastore)')
elif hive_installed and database_ready:
hookenv.status_set('active',
'ready (remote metastore)')
@when('bigtop.available', 'hadoop.ready')
def install_hive(hadoop):
'''
Anytime our dependencies are available, check to see if we have a valid
reason to (re)install. These include:
- initial install
- HBase has joined/departed
'''
# Hive cannot handle - in the metastore db name and
# mysql uses the service name to name the db
if "-" in hookenv.service_name():
hookenv.status_set('blocked', "application name may not contain '-'; "
"redeploy with a different name")
return
# Get hbase connection dict if it's available
if is_state('hbase.ready'):
hbase = RelationBase.from_state('hbase.ready')
hbserver = hbase.hbase_servers()[0]
else:
hbserver = None
# Use this to determine if we need to reinstall
deployment_matrix = {
'hbase': hbserver,
}
# Handle nuances when installing versus re-installing
if not is_state('hive.installed'):
prefix = "installing"
# On initial install, prime our kv with the current deployment matrix.
# Subsequent calls will use this to determine if a reinstall is needed.
data_changed('deployment_matrix', deployment_matrix)
else:
prefix = "configuring"
# Return if our matrix has not changed
if not data_changed('deployment_matrix', deployment_matrix):
return
hookenv.status_set('maintenance', '{} hive'.format(prefix))
hookenv.log("{} hive with: {}".format(prefix, deployment_matrix))
hive = Hive()
hive.install(hbase=hbserver)
hive.restart()
hive.open_ports()
set_state('hive.installed')
report_status()
# set app version string for juju status output
hive_version = get_package_version('hive') or 'unknown'
hookenv.application_version_set(hive_version)
@when('hive.installed', 'config.changed.heap')
def config_changed():
hookenv.status_set('maintenance', 'configuring with new options')
hive = Hive()
hive.configure_hive()
hive.restart()
report_status()
@when('hive.installed', 'database.available')
@when_not('hive.db.configured')
def configure_with_remote_db(db):
hookenv.status_set('maintenance', 'configuring external database')
hive = Hive()
hive.configure_remote_db(db)
hive.restart()
set_state('hive.db.configured')
report_status()
@when('hive.installed', 'hive.db.configured')
@when_not('database.available')
def configure_with_local_db():
'''
Reconfigure Hive using a local metastore db.
The initial installation will configure Hive with a local metastore_db.
Once an external db becomes available, we reconfigure Hive to use it. If
that external db goes away, we'll use this method to set Hive back into
local mode.
'''
hookenv.status_set('maintenance', 'configuring local database')
hive = Hive()
hive.configure_local_db()
hive.restart()
remove_state('hive.db.configured')
report_status()
@when('hive.installed')
@when_not('hadoop.ready')
def stop_hive():
'''
Hive depends on Hadoop. If we are installed and hadoop goes away, shut down
services and remove our installed state.
'''
hive = Hive()
hive.close_ports()
hive.stop()
remove_state('hive.installed')
report_status()
@when('hive.installed', 'client.joined')
def serve_client(client):
'''
Inform clients when hive is ready to serve.
'''
port = get_layer_opts().port('hive-thrift')
client.send_port(port)
client.set_ready()
@when('client.joined')
@when_not('hive.installed')
def stop_serving_client(client):
'''
Inform connected clients that Hive is no longer ready. This can happen
if Hadoop goes away (the 'installed' state will be removed).
'''
client.clear_ready()
```
#### File: layer-pig/tests/01-deploy.py
```python
import amulet
import re
import unittest
class TestDeploy(unittest.TestCase):
"""
Deployment and smoke test for the Apache Bigtop Pig service.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('pig')
cls.d.setup(timeout=1800)
cls.d.sentry.wait_for_messages({'pig': re.compile('ready')}, timeout=1800)
cls.pig = cls.d.sentry['pig'][0]
def test_pig(self):
"""
Validate Pig by running the smoke-test action.
"""
uuid = self.pig.action_do('smoke-test')
result = self.d.action_fetch(uuid)
# pig smoke-test sets outcome=success on success
if (result['outcome'] != "success"):
error = "Pig smoke-test failed"
amulet.raise_status(amulet.FAIL, msg=error)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Jikol/terracotta",
"score": 2
} |
#### File: tests/scripts/test_optimize_rasters.py
```python
import os
import warnings
import traceback
import rasterio
import numpy as np
from click.testing import CliRunner
import pytest
def format_exception(result):
return ''.join(traceback.format_exception(*result.exc_info))
@pytest.fixture()
def tiny_raster_file(unoptimized_raster_file, tmpdir_factory):
tmpdir = tmpdir_factory.mktemp('tiny-raster')
outfile = tmpdir / 'tiny.tif'
with rasterio.open(str(unoptimized_raster_file)) as src:
profile = src.profile.copy()
profile.update(
width=100,
height=100,
blockxsize=256,
blockysize=256
)
with rasterio.open(str(outfile), 'w', **profile) as dst:
dst.write(src.read()[:100, :100])
yield outfile
@pytest.mark.parametrize('in_memory', [True, None, False])
@pytest.mark.parametrize('reproject', [True, False])
@pytest.mark.parametrize('compression', ['auto', 'lzw', 'none'])
@pytest.mark.parametrize('nproc', [None, 1, 2, -1])
def test_optimize_rasters(unoptimized_raster_file, tmpdir, in_memory,
reproject, compression, nproc):
from terracotta.cog import validate
from terracotta.scripts import cli
input_pattern = str(unoptimized_raster_file.dirpath('*.tif'))
outfile = tmpdir / unoptimized_raster_file.basename
runner = CliRunner()
flags = ['--compression', compression, '-q']
if in_memory is not None:
flags.append('--in-memory' if in_memory else '--no-in-memory')
if reproject:
flags.append('--reproject')
if nproc is not None:
flags.append(f'--nproc={nproc}')
result = runner.invoke(cli.cli, ['optimize-rasters', input_pattern, '-o', str(tmpdir), *flags])
assert result.exit_code == 0, format_exception(result)
assert outfile.check()
# validate files
assert not validate(str(unoptimized_raster_file))
assert validate(str(outfile))
if reproject:
return
# check for data integrity
with rasterio.open(str(unoptimized_raster_file)) as src1, rasterio.open(str(outfile)) as src2:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'invalid value encountered.*')
np.testing.assert_array_equal(src1.read(), src2.read())
def test_optimize_rasters_small(tiny_raster_file, tmpdir):
from terracotta.cog import validate
from terracotta.scripts import cli
input_pattern = str(tiny_raster_file)
outfile = tmpdir / tiny_raster_file.basename
runner = CliRunner()
result = runner.invoke(cli.cli, ['optimize-rasters', input_pattern, '-o', str(tmpdir)])
assert result.exit_code == 0, format_exception(result)
assert outfile.check()
# validate files
# (small rasters don't need overviews, so input file is valid, too)
assert validate(str(tiny_raster_file))
assert validate(str(outfile))
# check for data integrity
with rasterio.open(str(tiny_raster_file)) as src1, rasterio.open(str(outfile)) as src2:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'invalid value encountered.*')
np.testing.assert_array_equal(src1.read(), src2.read())
def test_optimize_rasters_nofiles(tmpdir):
from terracotta.scripts import cli
input_pattern = str(tmpdir.dirpath('*.tif'))
runner = CliRunner()
result = runner.invoke(cli.cli, ['optimize-rasters', input_pattern, '-o', str(tmpdir), '-q'])
assert result.exit_code == 0
assert 'No files given' in result.output
def test_optimize_rasters_invalid(tmpdir):
from terracotta.scripts import cli
runner = CliRunner()
result = runner.invoke(cli.cli, ['optimize-rasters', str(tmpdir), '-o', str(tmpdir), '-q'])
assert result.exit_code != 0
assert 'not a file' in result.output
result = runner.invoke(cli.cli, ['optimize-rasters', str(tmpdir), '-o', str(tmpdir),
'--overwrite', '--skip-existing'])
assert result.exit_code != 0
assert 'mutually exclusive' in result.output
def test_optimize_rasters_multiband(tmpdir, unoptimized_raster_file):
from terracotta.scripts import cli
import rasterio
with rasterio.open(str(unoptimized_raster_file)) as src:
profile = src.profile.copy()
data = src.read(1)
profile['count'] = 3
multiband_file = tmpdir.join(unoptimized_raster_file.basename)
with rasterio.open(str(multiband_file), 'w', **profile) as dst:
dst.write(data, 1)
dst.write(data, 2)
dst.write(data, 3)
input_pattern = str(multiband_file.dirpath('*.tif'))
outfile = tmpdir / 'co' / unoptimized_raster_file.basename
runner = CliRunner()
result = runner.invoke(
cli.cli,
['optimize-rasters', input_pattern, '-o', str(tmpdir / 'co')]
)
assert result.exit_code == 0
assert 'has more than one band' in result.output
with rasterio.open(str(unoptimized_raster_file)) as src1, rasterio.open(str(outfile)) as src2:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'invalid value encountered.*')
np.testing.assert_array_equal(src1.read(), src2.read())
@pytest.mark.parametrize('extra_flag', ['skip-existing', 'overwrite', None])
def test_reoptimize(tmpdir, unoptimized_raster_file, extra_flag):
from terracotta.scripts import cli
infile = str(unoptimized_raster_file.dirpath('*.tif'))
outfile = tmpdir / 'out.tif'
# first time
runner = CliRunner()
args = ['optimize-rasters', infile, '-o', str(outfile)]
result = runner.invoke(cli.cli, args)
assert result.exit_code == 0
ctime = os.path.getmtime(outfile)
# second time
args = ['optimize-rasters', infile, '-o', str(outfile)]
if extra_flag:
args.append(f'--{extra_flag}')
result = runner.invoke(cli.cli, args)
if extra_flag == 'skip-existing':
assert result.exit_code == 0
assert os.path.getmtime(outfile) == ctime
elif extra_flag == 'overwrite':
assert result.exit_code == 0
assert os.path.getmtime(outfile) != ctime
else:
assert result.exit_code == 2
def _throw(*args):
raise RuntimeError('A mock error is raised')
def test_exception_in_subprocess(unoptimized_raster_file, tmpdir, monkeypatch):
from terracotta.scripts import cli
monkeypatch.setattr(
'terracotta.scripts.optimize_rasters._optimize_single_raster',
_throw
)
args = [
'optimize-rasters', str(unoptimized_raster_file), '-o',
str(tmpdir / 'foo.tif'), '--nproc', 2
]
runner = CliRunner()
result = runner.invoke(cli.cli, args)
assert result.exit_code != 0
assert 'Error while optimizing file' in str(result.exception)
```
#### File: terracotta/tests/test_expressions.py
```python
import pytest
import sys
from textwrap import dedent
import numpy as np
OPERANDS = {
'v1': np.ma.masked_array(np.arange(1, 6), dtype='float64'),
'v2': np.ma.masked_array(2 * np.arange(1, 6), dtype='float64', mask=np.array([1, 1, 1, 0, 0])),
}
VALID_EXPR = (
# identity
('v1', OPERANDS['v1']),
# multiline
(
dedent('''
(
v1 +
v1
)
'''),
2 * OPERANDS['v1']
),
# negation
('-v1', -OPERANDS['v1']),
# abs
('abs(v1)', np.abs(OPERANDS['v1'])),
# sqrt and *
('sqrt(v1 * v1)', OPERANDS['v1']),
# sqrt and **
('sqrt(v1 ** 2)', OPERANDS['v1']),
# /
('v1 / v1', np.ones_like(OPERANDS['v1'])),
# %
('v1 % v1', np.zeros_like(OPERANDS['v1'])),
# simple index calculation
(
'(v2 - v1) / (v2 + v1)',
(OPERANDS['v2'] - OPERANDS['v1']) / (OPERANDS['v2'] + OPERANDS['v1'])
),
# conditionals
(
'where(v2 > v1, 100, 0)', np.where(OPERANDS['v2'] > OPERANDS['v1'], 100, 0)
),
# comparisons
('v1 == v2', OPERANDS['v1'] == OPERANDS['v2']),
('v1 != v2', OPERANDS['v1'] != OPERANDS['v2']),
('v1 > v2', OPERANDS['v1'] > OPERANDS['v2']),
('v1 < v2', OPERANDS['v1'] < OPERANDS['v2']),
('v1 >= v2', OPERANDS['v1'] >= OPERANDS['v2']),
('v1 <= v2', OPERANDS['v1'] <= OPERANDS['v2']),
('(v1 < 0.5) & (v2 > 0.5)', (OPERANDS['v1'] < 0.5) & (OPERANDS['v2'] > 0.5)),
('(v1 < 0.5) | (v2 > 0.5)', (OPERANDS['v1'] < 0.5) | (OPERANDS['v2'] > 0.5)),
('~(v1 < 0.5) & (v2 > 0.5)', ~(OPERANDS['v1'] < 0.5) & (OPERANDS['v2'] > 0.5)),
# maximum
(
'maximum(v1, v2)', np.maximum(OPERANDS['v1'], OPERANDS['v2'])
),
# minimum
(
'minimum(v1, v2)', np.minimum(OPERANDS['v1'], OPERANDS['v2'])
),
# sin / arcsin
(
'arcsin(sin(v1))', np.arcsin(np.sin(OPERANDS['v1']))
),
# trigonometry
(
'sin(pi * v1)', np.sin(np.pi * OPERANDS['v1'])
),
# mask operations
(
'setmask(v1, getmask(v2))', np.ma.masked_array(OPERANDS['v1'], mask=OPERANDS['v2'].mask)
),
(
'setmask(v2, nomask)', np.ma.masked_array(OPERANDS['v2'], mask=np.ma.nomask)
),
( # replaces mask
'setmask(v2, ~getmask(v2))', np.ma.masked_array(OPERANDS['v2'], mask=~OPERANDS['v2'].mask)
),
( # adds to mask
'masked_where(~getmask(v2), v2)', np.ma.masked_array(OPERANDS['v2'], mask=True)
),
# long expression
(
'+'.join(['v1'] * 1000), sum(OPERANDS['v1'] for _ in range(1000))
)
)
INVALID_EXPR = (
# haxx
('__builtins__["dir"]', 'not allowed in expressions'),
# uses list
('[0] * 1000000000', 'not allowed in expressions'),
# uses dict
('{}', 'not allowed in expressions'),
# uses string
('"general kenobi!"', 'not allowed in expressions'),
# if construct
('if True: v1', 'is not a valid expression'),
# inline comparison
('v1 if True else 0', 'not allowed in expressions'),
# unsupported unary operator
('not v1', 'unary operator Not'),
# unsupported binary operator
('v1 ^ v1', 'binary operator BitXor'),
# and
('v1 and v2', 'not allowed in expressions'),
# does not return an array
('0', 'does not return an array'),
# more than one expression
('v1; v1', 'is not a valid expression'),
# dunder method
('__name__', 'unrecognized name \'__name__\''),
# builtins
('dir(v1)', 'unrecognized name \'dir\''),
# method call
('v1.mean()', 'not allowed in expressions'),
# attribute access
('v1.size', 'not allowed in expressions'),
# chained comparisons
('where(v1 < v2 == v2, 0, 1)', 'not supported'),
# unknown operand
('v100', 'unrecognized name \'v100\''),
# wrong number of arguments
('maximum(v1, v1, v1, v1, v1)', 'got 5, expected 2'),
# not a valid expression
('k = v1', 'is not a valid expression'),
# internal numpy error (mismatching types)
('v1 & v2', 'unexpected error'),
# code injection (serious haxx)
(
dedent('''
(lambda fc=(
lambda n: [
c for c in
().__class__.__bases__[0].__subclasses__()
if c.__name__ == n
][0]
):
fc("function")(
fc("code")(
0,0,0,0,"KABOOM",(),(),(),"","",0,""
),{}
)()
)()
'''),
'not allowed in expressions'
)
)
@pytest.mark.parametrize('case', VALID_EXPR)
def test_valid_expression(case):
from terracotta.expressions import evaluate_expression
# make sure we have enough recursion depth for long expression
sys.setrecursionlimit(10_000)
expr, result = case
np.testing.assert_array_equal(
evaluate_expression(expr, OPERANDS),
result
)
@pytest.mark.parametrize('case', INVALID_EXPR)
def test_invalid_expression(case):
from terracotta.expressions import evaluate_expression
expr, exc_msg = case
with pytest.raises(ValueError) as raised_exc:
evaluate_expression(expr, OPERANDS)
assert exc_msg in str(raised_exc.value)
def test_invalid_compop(monkeypatch):
from terracotta.expressions import evaluate_expression, ExpressionParser
expr = 'v0 < v1'
exc_msg = 'comparison operator'
with monkeypatch.context() as m:
m.setattr(ExpressionParser, 'NODE_TO_COMPOP', {})
with pytest.raises(ValueError) as raised_exc:
evaluate_expression(expr, OPERANDS)
assert exc_msg in str(raised_exc.value)
def test_timeout():
from terracotta.expressions import evaluate_expression
with pytest.raises(RuntimeError) as raised_exc:
evaluate_expression('+'.join(['v1'] * 100), {'v1': np.ones((256, 256))}, timeout=0)
assert 'timeout' in str(raised_exc.value)
def test_mask_invalid():
from terracotta.expressions import evaluate_expression
res = evaluate_expression('where(v1 + v2 < 10, nan, 0)', OPERANDS)
mask = (OPERANDS['v1'] + OPERANDS['v2'] < 10) | OPERANDS['v1'].mask | OPERANDS['v2'].mask
assert isinstance(res, np.ma.MaskedArray)
assert np.all(res == 0)
assert np.array_equal(res.mask, mask)
def test_out_dtype():
from terracotta.expressions import evaluate_expression
operands = dict(v1=np.ones(10, dtype='int64'), v2=np.zeros(10, dtype='int32'))
res = evaluate_expression('v1 + v2', operands)
assert isinstance(res, np.ma.MaskedArray)
assert res.dtype == np.dtype('int64')
``` |
{
"source": "jikope/ytdl-wxpython",
"score": 3
} |
#### File: src/ui/action_button.py
```python
import wx
ACTION_START = 1
ACTION_PAUSE = 2
ACTION_DELETE = 3
class ActionButton(wx.Menu):
def __init__(self, parent, obj):
super(ActionButton, self).__init__()
self.parent = parent
self.obj = obj
start = wx.MenuItem(self, ACTION_START, "&Start")
pause = wx.MenuItem(self, ACTION_PAUSE, "&Pause")
delete = wx.MenuItem(self, ACTION_DELETE, "&Delete")
self.Bind(wx.EVT_MENU, self.OnStart, id=ACTION_START)
self.Bind(wx.EVT_MENU, self.OnPause, id=ACTION_PAUSE)
self.Bind(wx.EVT_MENU, self.OnDelete, id=ACTION_DELETE)
self.Append(start)
self.Append(pause)
self.Append(delete)
def OnStart(self, e):
for item in self.parent.data["download_list"]:
if item["title"] == self.obj['title'].GetLabel() and item['status'] != 'Completed':
self.parent.CreateThread(item['video_url'], item['format_id'], self.obj)
break
elif item["title"] == self.obj['title'].GetLabel() and item['status'] == 'Completed':
wx.MessageBox('Item already downloaded', 'Info', wx.OK | wx.ICON_INFORMATION)
break
else:
print("test")
def OnPause(self, e):
self.parent.CleanUp(self.obj)
def OnDelete(self, e):
self.parent.remove_item(self.obj)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.