metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "AakankshaAshok/pandas",
"score": 2
} |
#### File: pandas/core/apply.py
```python
import inspect
import numpy as np
from pandas._libs import reduction as libreduction
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import ABCSeries
def frame_apply(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not isinstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwds["axis"] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._data.apply("apply", func=self.f)
return self.obj._constructor(
data=results, index=self.index, columns=self.columns, copy=False
)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
r = self.f(Series([]))
except Exception:
pass
else:
should_reduce = not isinstance(r, Series)
if should_reduce:
if len(self.agg_axis):
r = self.f(Series([]))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (
self.result_type in ["reduce", None]
and not self.dtypes.apply(is_extension_array_dtype).any()
# Disallow complex_internals since libreduction shortcut
# cannot handle MultiIndex
and not self.agg_axis._has_complex_internals
):
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
# Preserve subclass for e.g. test_subclassed_apply
dummy = self.obj._constructor_sliced(
empty_arr, index=index, dtype=values.dtype
)
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
except TypeError:
# e.g. test_apply_ignore_failures we just ignore
if not self.ignore_failures:
raise
except ZeroDivisionError:
# reached via numexpr; fall back to python implementation
pass
else:
return self.obj._constructor_sliced(result, index=labels)
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
except Exception:
pass
else:
keys.append(v.name)
successes.append(i)
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self):
return super().apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(self.res_index):
result.columns = self.res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super().apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (
constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values, self.index))
)
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
``` |
{
"source": "aakanksha-gubbala/STHE_Optimization",
"score": 3
} |
#### File: aakanksha-gubbala/STHE_Optimization/theory.py
```python
import streamlit as st
def main():
st.subheader("Problem Solving Approach")
st.image('diagram.png')
st.image('optim-schematic.png', width=500)
st.write("Objective : Minimize total cost of STHE")
st.write("Constraints : ")
st.write(r"1. $\Delta P_s < \Delta P_{s, permissible}$")
st.write(r"2. $\Delta P_t < \Delta P_{t, permissible}$")
st.write(r"3. $A_{calculated} < A_{available}$")
st.write("Subject to variables : ")
st.write(r"1. Tube layout = [Triangular 30 deg, Rotated square 45 deg, Square 90 deg]")
st.write(r"2. Number of tube passes = [1, 2, 4]")
st.write(r"3. Baffle cut = 15% to 25%")
st.write(r"4. Length of tube = 2.5 to 6 m")
st.write(r"5. Outer diameter of tube = 0.75 in, 1 in, 1.25 in, 1.5 in, 2 in")
st.write(r"6. Shell-baffle clearance = 1 to 5 mm")
st.write(r"7. Tube-baffle clearance = 1 to 5 mm")
st.write("Assumptions")
st.write(r"1. Tube Thickness is 16 BWG")
st.write(r"2. Baffle spacing = 0.5D$_s$")
st.write(r"3. Constant specific heat capacity in the temperature range")
st.write("References")
st.write("1) <NAME>., <NAME>., & <NAME>. (2013). "
"Design of shell-and-tube heat exchangers using multiobjective optimization."
" International Journal of Heat and Mass Transfer, 60, 343–354.")
st.write("2) Hewitt, <NAME>., <NAME>, and <NAME> (1994) Process Heat Transfer. CRC Press.")
``` |
{
"source": "aakanksha-gubbala/unifac",
"score": 3
} |
#### File: aakanksha-gubbala/unifac/UNIFAC.py
```python
import numpy as np
#### Get nomenclature from Introduction to Chemical Engineering Thermodynamics by <NAME>, <NAME>, <NAME>, <NAME>
class UNIFAC:
def __init__(self):
self.x = np.array([[0.2, 0.8]]) # mol fractions
self.T = np.array([[330]]) # K
# Frequency of UNIFAC groups : each row denotes the subgroup and each column denotes the component
self.nu = np.array([[2, 0],
[2, 0],
[1, 0],
[0, 1]])
# R values
self.R = np.array([0.9011, 0.6744, 1.6764, 3.1680])
# Q values
self.Q = np.array([0.8480, 0.5400, 1.4200, 2.4840])
# a_mn values (energy contributions in residual)
self.a = np.array([[0.0000, 0.0000, 232.10, 354.55],
[0.0000, 0.0000, 232.10, 354.55],
[114.80, 114.80, 0.0000, 202.30],
[-25.31, -25.31, -146.3, 0.0000]])
self.r = np.matmul(self.R, self.nu)
self.q = np.matmul(self.Q, self.nu)
def get_gammaC(self):
# Get the combinatorial part of activity coefficient
# J = ri / sum(rj xj)
J = np.zeros((len(self.x), len(self.x[0])))
for i in range(len(self.x)):
J[i] = self.r / np.dot(self.x[i], self.r)
# L = qi / sum(qj xj)
L = np.zeros((len(self.x), len(self.x[0])))
for i in range(len(self.x)):
L[i] = self.q / np.dot(self.x[i], self.q)
lngammaC = 1 - J + np.log(J) - 5 * self.q * (1 - J / L + np.log(J / L))
return np.exp(lngammaC)
def get_gammaR(self):
# Get the residual part of activity coefficient
e = np.zeros(self.nu.transpose().shape)
for i in range(e.shape[0]):
e[i] = self.nu.transpose()[i] * self.Q / self.q[i]
e = e.transpose()
tau = np.exp(-self.a / self.T)
beta = np.matmul(e.transpose(), tau)
theta = np.zeros((len(self.x), len(self.nu)))
for i in range(len(self.x)):
for j in range(len(self.nu)):
theta[i][j] = np.sum(self.x[i] * self.q * e[j, :]) / np.dot(self.x[i], self.q)
s = np.matmul(theta, tau)
lngammaR = np.zeros((len(self.x), len(self.x[0])))
for i in range(len(self.x)):
lngammaR[i] = self.q * (1 -
(np.sum((theta[i, :] * beta / s[i, :]).transpose() -
np.log(beta / s[i, :]).transpose() * e, axis=0))
)
return np.exp(lngammaR)
def get_gamma(self):
# Get the activity coefficent
gammaC = self.get_gammaC()
gammaR = self.get_gammaR()
return gammaC * gammaR
# unifac = UNIFAC()
# print(unifac.get_gamma())
``` |
{
"source": "aakanksha-gubbala/vle-web-app",
"score": 2
} |
#### File: vle-web-app/models/wohls.py
```python
import streamlit as st
import scipy.constants as constants
import numpy as np
import scipy.optimize as opt
from scipy.special import xlogy
from sklearn import metrics
import matplotlib.pyplot as plt
from matplotlib import style
from volume import get_volume
from antoine import get_psat
class Wohls:
def __init__(self, s1, s2, T):
self.q1 = get_volume(s1, T)
self.q2 = get_volume(s2, T)
self.T = T
def Ge(self, x1, A):
x1 = np.asfarray(x1, float)
z1 = x1 * self.q1 / (x1 * self.q1 + (1 - x1) * self.q2)
z2 = (1 - x1) * self.q2 / (x1 * self.q1 + (1 - x1) * self.q2)
return constants.R * self.T * (x1 * self.q1 + (1 - x1) * self.q2) * (2 * A) * (z1 * z2)
def gamma1(self, z, A):
return np.exp(2 * A * self.q1 * (1 - z) ** 2)
def gamma2(self, z, A):
return np.exp(2 * A * self.q2 * z ** 2)
# @st.cache(suppress_st_warning=True)
def get_parameter(self, x, G_e):
A, params_cov = opt.curve_fit(self.Ge, x, G_e, p0=1000, maxfev=10000)
return A
# @st.cache(suppress_st_warning=True)
def get_accuracy(self, G_e, x1):
A = self.get_parameter(x1, G_e)
Ge = self.Ge(x1, A)
return metrics.r2_score(G_e, Ge)
def main(x1, y1, P, G_e, T, s1, s2):
style.use('classic')
w = Wohls(s1, s2, T)
A = w.get_parameter(x1, G_e)
acc = w.get_accuracy(G_e, x1)
x = np.linspace(0, 1, 50)
fig4 = plt.figure(facecolor='white')
plt.title(r"$G^E-x$")
plt.xlim(0, 1)
plt.xlabel(r'$x_1$')
plt.ylabel(r'$G^E\ (J/mol)$')
plt.scatter(x1, G_e)
plt.plot(x, w.Ge(x, A), label=r"$Wohls\ model$", color='red')
plt.axhline(0, color='black')
q1 = get_volume(s1, T)
q2 = get_volume(s2, T)
z = x * q1 / (x * q1 + (1 - x) * q2)
p1_s = get_psat(s1, T)
p2_s = get_psat(s2, T)
P_Wohls = x * p1_s * w.gamma1(z, A) + (1 - x) * p2_s * w.gamma2(z, A)
y_Wohls = x * p1_s * w.gamma1(z, A) / P_Wohls
P_raoult = x * p1_s + (1 - x) * p2_s
fig5 = plt.figure(facecolor='white')
plt.title(r"$P-x$")
plt.xlim(0, 1)
plt.ylim(0, 1.2 * max(P_Wohls))
plt.xlabel(r'$x_1$')
plt.ylabel(r'$P\ (kPa)$')
plt.scatter(x1, P)
plt.plot(x, P_Wohls, label=r"$Wohls\ model$", color='red')
plt.plot(x, P_raoult, color='black', label=r"$Raoult's\ Law$")
plt.legend(loc='best', fontsize=10)
fig6 = plt.figure(facecolor='white')
plt.gca().set_aspect('equal', adjustable='box')
plt.title(r"$y-x$")
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel(r'$x_1$')
plt.ylabel(r'$y_1$')
plt.scatter(x1, y1)
plt.plot(x, y_Wohls, label=r"$Wohls\ model$", color='red')
plt.plot(x, x, color='black')
plt.legend(loc='best', fontsize=10)
return A, acc, fig4, fig5, fig6
``` |
{
"source": "Aakanksha-Rana/HD-BET",
"score": 2
} |
#### File: HD-BET/HD_BET/Train.py
```python
import argparse
import os
import numpy as np
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
import torch.autograd as autograd
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import torchvision.models as models
#from ipywidgets import IntProgress
import lrs
from data_loader import ScanDataset
from network_architecture import Network as net
def main(config):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_transform = transforms.Compose([
transforms.Scale(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
val_transform = transforms.Compose([
transforms.Scale(256),
transforms.RandomCrop(224),
transforms.ToTensor()])
test_transform = transforms.Compose([
transforms.ToTensor()])
trainset = ScanDataset(csv_file=config.train_csv_file, root_dir=config.train_img_path, transform=train_transform)
valset = ScanDataset(csv_file=config.val_csv_file, root_dir=config.val_img_path, transform=val_transform)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=config.train_batch_size,
shuffle=True, num_workers=config.num_workers)
val_loader = torch.utils.data.DataLoader(valset, batch_size=config.val_batch_size,
shuffle=False, num_workers=config.num_workers)
# base_model = models.vgg16(pretrained=True)
base_model = models.resnet101(pretrained=True, progress = False)
# base_model = models.inception_v3(pretrained=True)
model = NIMA(base_model)
# model = NIMA()
if config.warm_start == False:
model.load_state_dict(torch.load(os.path.join(config.ckpt_path, 'epoch-%d.pkl' % config.warm_start_epoch)))
print('Successfully loaded model epoch-%d.pkl' % config.warm_start_epoch)
if config.multi_gpu:
model.features = torch.nn.DataParallel(model.features, device_ids=config.gpu_ids)
model = model.to(device)
else:
model = model.to(device)
conv_base_lr = config.conv_base_lr
dense_lr = config.dense_lr
optimizer = optim.SGD([
{'params': model.features.parameters(), 'lr': conv_base_lr},
{'params': model.classifier.parameters(), 'lr': dense_lr}],
momentum=0.9
)
# optimizer = optim.Adam( model.parameters(), lr = conv_base_lr, betas=(0.9,0.999))
# Loss functions
# criterion = torch.nn.L1Loss()
criterion = torch.nn.BCELoss()
# send hyperparams
lrs.send({
'title': 'EMD Loss',
'train_batch_size': config.train_batch_size,
'val_batch_size': config.val_batch_size,
'optimizer': 'SGD',
'conv_base_lr': config.conv_base_lr,
'dense_lr': config.dense_lr,
'momentum': 0.9
})
param_num = 0
for param in model.parameters():
param_num += int(np.prod(param.shape))
print('Trainable params: %.2f million' % (param_num / 1e6))
if config.train:
# for early stopping
count = 0
init_val_loss = float('inf')
train_losses = []
val_losses = []
for epoch in range(config.warm_start_epoch, config.epochs):
lrs.send('epoch', epoch)
batch_losses = []
for i, data in enumerate(train_loader):
images = data['image'].to(device)
# labels = data['annotations'].to(device).long()
# labels = labels.view(labels.shape[0])
labels = data['annotations'].to(device).float()
labels = labels.view(-1,2)
outputs = model(images)
outputs = outputs.view( -1, 2)
optimizer.zero_grad()
loss = criterion(outputs, labels)
# loss = emd_loss(labels, outputs)
batch_losses.append(loss.item())
loss.backward()
optimizer.step()
lrs.send('train_bce_loss', loss.item())
# print('Epoch: %d/%d | Step: %d/%d | Training EMD loss: %.4f' % (epoch + 1, config.epochs, i + 1, len(trainset) // config.train_batch_size + 1, loss.data[0]))
avg_loss = sum(batch_losses) / (len(trainset) // config.train_batch_size + 1)
train_losses.append(avg_loss)
print('Epoch %d averaged training EMD loss: %.4f' % (epoch + 1, avg_loss))
# exponetial learning rate decay
if (epoch + 1) % 10 == 0:
conv_base_lr = conv_base_lr * config.lr_decay_rate ** ((epoch + 1) / config.lr_decay_freq)
dense_lr = dense_lr * config.lr_decay_rate ** ((epoch + 1) / config.lr_decay_freq)
optimizer = optim.SGD([
{'params': model.features.parameters(), 'lr': conv_base_lr},
{'params': model.classifier.parameters(), 'lr': dense_lr}],
momentum=0.9
)
# send decay hyperparams
lrs.send({
'lr_decay_rate': config.lr_decay_rate,
'lr_decay_freq': config.lr_decay_freq,
'conv_base_lr': config.conv_base_lr,
'dense_lr': config.dense_lr
})
# do validation after each epoch
batch_val_losses = []
for data in val_loader:
images = data['image'].to(device)
labels = data['annotations'].to(device).float()
labels = labels.view(-1,2)
with torch.no_grad():
outputs = model(images)
val_outputs = outputs.view(-1, 2)
val_loss = criterion(val_outputs, labels)
# val_loss = emd_loss(labels, outputs)
batch_val_losses.append(val_loss.item())
avg_val_loss = sum(batch_val_losses) / (len(valset) // config.val_batch_size + 1)
val_losses.append(avg_val_loss)
lrs.send('val_bce_loss', avg_val_loss)
print('Epoch %d completed. Averaged BCE loss on val set: %.4f. Inital val loss : %.4f.' % (epoch + 1, avg_val_loss, init_val_loss))
# Use early stopping to monitor training
if avg_val_loss < init_val_loss:
init_val_loss = avg_val_loss
# save model weights if val loss decreases
print('Saving model...')
torch.save(model.state_dict(), os.path.join(config.ckpt_path, 'epoch-%d.pkl' % (epoch + 1)))
print('Done.\n')
# reset count
count = 0
elif avg_val_loss >= init_val_loss:
count += 1
if count == config.early_stopping_patience:
print('Val BCE loss has not decreased in %d epochs. Training terminated.' % config.early_stopping_patience)
# break
print('Training completed.')
if config.save_fig:
# plot train and val loss
epochs = range(1, epoch + 2)
plt.plot(epochs, train_losses, 'b-', label='train loss')
plt.plot(epochs, val_losses, 'g-', label='val loss')
plt.title('BCE loss')
plt.legend()
plt.savefig('./loss.png')
if config.test:
# start.record()
print('Testing')
# compute mean score
test_transform = test_transform#val_transform
testset = AVADataset(csv_file=config.test_csv_file, root_dir=config.test_img_path, transform=val_transform)
test_loader = torch.utils.data.DataLoader(testset, batch_size=config.test_batch_size, shuffle=False, num_workers=config.num_workers)
mean_preds = np.zeros(45)
mean_labels = np.zeros(45)
# std_preds = []
count = 0
for data in test_loader:
im_id = data['img_id']
image = data['image'].to(device)
labels = data['annotations'].to(device).float()
output = model(image)
output = output.view(1, 1)
bpred = output.to(torch.device("cpu"))
cpred = bpred.data.numpy()
blabel = labels.to(torch.device("cpu"))
clabel = blabel.data.numpy()
# predicted_mean, predicted_std = 0.0, 0.0
# for i, elem in enumerate(output, 1):
# predicted_mean += i * elem
# for j, elem in enumerate(output, 1):
# predicted_std += elem * (i - predicted_mean) ** 2
mean_preds[count] = cpred
mean_labels[count] = clabel
print(im_id,mean_preds[count])
count= count+1
# std_preds.append(predicted_std)
# Do what you want with predicted and std...
end.record()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# input parameters
parser.add_argument('--train_img_path', type=str, default='/path/to/train')
parser.add_argument('--val_img_path', type=str, default='/path/to/val')
parser.add_argument('--test_img_path', type=str, default='/path/to/test')
parser.add_argument('--train_csv_file', type=str, default='./Train_final.csv')
parser.add_argument('--val_csv_file', type=str, default='./Val_final.csv')
parser.add_argument('--test_csv_file', type=str, default='./Test_final.csv')
# training parameters
parser.add_argument('--train', type=bool, default = True)
parser.add_argument('--test', type=bool, default = False)
parser.add_argument('--conv_base_lr', type=float, default=.000001)
parser.add_argument('--dense_lr', type=float, default=.000001)
parser.add_argument('--lr_decay_rate', type=float, default=0.95)
parser.add_argument('--lr_decay_freq', type=int, default=10)
parser.add_argument('--train_batch_size', type=int, default=32)
parser.add_argument('--val_batch_size', type=int, default=16)
parser.add_argument('--test_batch_size', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=1)
parser.add_argument('--epochs', type=int, default=100)
# misc
parser.add_argument('--ckpt_path', type=str, default='./ckpts/')
parser.add_argument('--multi_gpu', type=bool, default=False)
parser.add_argument('--gpu_ids', type=list, default=None)
parser.add_argument('--warm_start', type=bool, default=False)
parser.add_argument('--warm_start_epoch', type=int, default=0)
parser.add_argument('--early_stopping_patience', type=int, default=5)
parser.add_argument('--save_fig', type=bool, default=False)
config = parser.parse_args()
main(config)
``` |
{
"source": "aakanksharathore/yoloTracker",
"score": 3
} |
#### File: yoloTracker/tracking/yoloTrackVid.py
```python
import numpy as np
import cv2, sys
import glob
import ntpath
sys.path.append("..")
from models.yolo_models import get_yolo_model
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1[0], box1[2]], [box2[0], box2[2]])
intersect_h = _interval_overlap([box1[1], box1[3]], [box2[1], box2[3]])
intersect = intersect_w * intersect_h
w1, h1 = box1[2]-box1[0], box1[3]-box1[1]
w2, h2 = box2[2]-box2[0], box2[3]-box2[1]
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
#import tkinter to use diaglogue window for movie name
import tkinter as tk
from tkinter.filedialog import askopenfilename
#Open the video file which needs to be processed
root = tk.Tk()
movieName = askopenfilename(initialdir='/media/aakanksha/f41d5ac2-703c-4b56-a960-cd3a54f21cfb/aakanksha/Documents/Backup/Phd/Analysis/Videos/',filetypes=[("Video files","*")])
cap = cv2.VideoCapture(movieName)
nframe =cap.get(cv2.CAP_PROP_FRAME_COUNT)
step=500
im_width = 3840 #1920#864
im_height = 2176#1088#864
obj_threshold=0.5; max_length=256;
weight_file = '/media/aakanksha/f41d5ac2-703c-4b56-a960-cd3a54f21cfb/aakanksha/Documents/Backup/Phd/Analysis/blackbuckML/yoloTracker/weights/compare-blackbucks-yolo.h5'
model = get_yolo_model(im_width, im_height, num_class=1)
model.load_weights(weight_file,by_name=True)
im_num=0
width=3840#1920
height=2176#1080
count=0
#video = cv2.VideoWriter('/media/aakanksha/f41d5ac2-703c-4b56-a960-cd3a54f21cfb/aakanksha/Documents/Backup/Phd/Analysis/blackbuckML/testOut/video.avi',-1,1,(im_width,im_height))
while(cap.isOpened()):
if (cv2.waitKey(1) & 0xFF == ord('q')) | (count > 32):
break
cap.set(cv2.CAP_PROP_POS_FRAMES,im_num)
ret, img = cap.read()
im_num+=step
image_h, image_w, _ = img.shape
img = cv2.resize(img, (im_width,im_height))
new_image = img[:,:,::-1]/255.
new_image = np.expand_dims(new_image, 0)
# get detections
preds = model.predict(new_image)
#print('yolo time: ', (stop-start)/batches)
new_boxes = np.zeros((0,5))
for i in range(3):
netout=preds[i][0]
grid_h, grid_w = netout.shape[:2]
xpos = netout[...,0]
ypos = netout[...,1]
wpos = netout[...,2]
hpos = netout[...,3]
objectness = netout[...,4]
indexes = (objectness > obj_threshold) & (wpos<max_length) & (hpos<max_length)
if np.sum(indexes)==0:
continue
corner1 = np.column_stack((xpos[indexes]-wpos[indexes]/2.0, ypos[indexes]-hpos[indexes]/2.0))
corner2 = np.column_stack((xpos[indexes]+wpos[indexes]/2.0, ypos[indexes]+hpos[indexes]/2.0))
new_boxes = np.append(new_boxes, np.column_stack((corner1, corner2, objectness[indexes])),axis=0)
# do nms
sorted_indices = np.argsort(-new_boxes[:,4])
boxes=new_boxes.tolist()
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if new_boxes[index_i,4] == 0:
continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i][0:4], boxes[index_j][0:4]) >= obj_threshold:
new_boxes[index_j,4] = 0
new_boxes = new_boxes[new_boxes[:,4]>0]
detection_list = []
for row in new_boxes:
stacker = (row[0],row[1],row[2],row[3], row[4])
detection_list.append(stacker)
#Display the detections
for detect in detection_list:
bbox = detect[0:4]
#if 1:
#iwarp = (full_warp)
#corner1 = np.expand_dims([bbox[0],bbox[1]], axis=0)
#corner1 = np.expand_dims(corner1,axis=0)
#corner1 = cv2.perspectiveTransform(corner1,iwarp)[0,0,:]
minx = bbox[0]
miny = bbox[1]
#corner2 = np.expand_dims([bbox[2],bbox[3]], axis=0)
#corner2 = np.expand_dims(corner2,axis=0)
#corner2 = cv2.perspectiveTransform(corner2,iwarp)[0,0,:]
maxx = bbox[2]
maxy = bbox[3]
cv2.rectangle(img, (int(minx)-2, int(miny)-2), (int(maxx)+2, int(maxy)+2),(0,0,0), 1)
#write output image
cv2.imwrite('/media/aakanksha/f41d5ac2-703c-4b56-a960-cd3a54f21cfb/aakanksha/Documents/Backup/Phd/Analysis/blackbuckML/testOut/'+ntpath.basename(movieName[0:len(movieName)-4])+'_'+str(im_num)+'.png',img)
count+=1
#video.write(img)
cv2.destroyAllWindows()
#video.release()
```
#### File: train/prepare_samples/makeTrain.py
```python
import numpy as np
import pandas as pd
import os,sys,glob
import cv2
import yaml
sys.path.append('../..')
sys.path.append('..')
from models.yolo_models import get_yolo_model
from utils.decoder import decode
from random import shuffle
def main(argv):
if(len(sys.argv) != 3):
print('Usage ./makeTrain.py [root_dir] [config.yml]')
sys.exit(1)
#Load data
root_dir = argv[1] + '/' #in case we forgot
print('Opening file' + root_dir + argv[2])
with open(root_dir + argv[2], 'r') as configfile:
config = yaml.safe_load(configfile)
image_dir = root_dir + config['data_dir']
train_dir = root_dir + config['data_dir']
your_weights = root_dir + config['weights_dir'] + config['specific_weights']
trained_annotations_fname = train_dir + config['trained_annotations_fname']
train_files_regex = config['specific_train_files_regex']
train_images = glob.glob( train_dir + train_files_regex )
shuffle(train_images)
max_l=100
min_l=10
im_size=864 #size of training imageas for yolo
##################################################
#im_size=416 #size of training imageas for yolo
yolov3 = get_yolo_model(im_size,im_size,num_class=1,trainable=False)
yolov3.load_weights(your_weights)
########################################
im_num=1
all_imgs = []
for imagename in train_images:
img = cv2.imread(imagename)
print('processing image ' + imagename + ', ' + str(im_num) + ' of ' + str(len(train_images)) + '...')
im_num+=1
img_data = {'object':[]} #dictionary? key-value pair to store image data
head, tail = os.path.split(imagename)
noext, ext = os.path.splitext(tail)
box_name = train_dir + '/bbox/' + tail
img_data['filename'] = tail
img_data['width'] = im_size
img_data['height'] = im_size
# use the trained yolov3 model to predict
# preprocess the image
image_h, image_w, _ = img.shape
new_image = img[:,:,::-1]/255.
new_image = np.expand_dims(new_image, 0)
# run the prediction
yolos = yolov3.predict(new_image)
boxes = decode(yolos, obj_thresh=0.2, nms_thresh=0.3)
for b in boxes:
xmin=int(b[0])
xmax=int(b[2])
ymin=int(b[1])
ymax=int(b[3])
obj = {}
obj['name'] = 'aoi'
if xmin<0: continue
if ymin<0: continue
if xmax>im_size: continue
if ymax>im_size: continue
if (xmax-xmin)<min_l: continue
if (xmax-xmin)>max_l: continue
if (ymax-ymin)<min_l: continue
if (ymax-ymin)>max_l: continue
obj['xmin'] = xmin
obj['ymin'] = ymin
obj['xmax'] = xmax
obj['ymax'] = ymax
img_data['object'] += [obj]
cv2.rectangle(img, (xmin,ymin), (xmax,ymax), (0,255,0), 2)
cv2.imwrite(box_name, img)
all_imgs += [img_data]
#print(all_imgs)
with open(trained_annotations_fname, 'w') as handle:
yaml.dump(all_imgs, handle)
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "Aakansha99/AI-Therapist",
"score": 2
} |
#### File: AI-Therapist/main_app/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.http import JsonResponse
from datetime import date
from django.contrib import messages
from django.contrib.auth.models import User , auth
from .models import patient , doctor , diseaseinfo , consultation ,rating_review
from chats.models import Chat,Feedback
from .models import SentimentModel
from .forms import SentimentForm
from code import SentimentAnalyzer
import text2emotion as te
# Create your views here.
#loading trained_model
import joblib as jb
model = jb.load('trained_model')
def home(request):
if request.method == 'GET':
if request.user.is_authenticated:
return render(request,'homepage/index.html')
else :
return render(request,'homepage/index.html')
def admin_ui(request):
if request.method == 'GET':
if request.user.is_authenticated:
auser = request.user
Feedbackobj = Feedback.objects.all()
return render(request,'admin/admin_ui/admin_ui.html' , {"auser":auser,"Feedback":Feedbackobj})
else :
return redirect('home')
if request.method == 'POST':
return render(request,'patient/patient_ui/profile.html')
def patient_ui(request):
if request.method == 'GET':
if request.user.is_authenticated:
patientusername = request.session['patientusername']
puser = User.objects.get(username=patientusername)
return render(request,'patient/patient_ui/profile.html' , {"puser":puser})
else :
return redirect('home')
if request.method == 'POST':
return render(request,'patient/patient_ui/profile.html')
def pviewprofile(request, patientusername):
if request.method == 'GET':
puser = User.objects.get(username=patientusername)
return render(request,'patient/view_profile/view_profile.html', {"puser":puser})
def checkdisease(request):
diseaselist=['Fungal infection','Allergy','GERD','Chronic cholestasis','Drug Reaction','Peptic ulcer diseae','AIDS','Diabetes ',
'Gastroenteritis','Bronchial Asthma','Hypertension ','Migraine','Cervical spondylosis','Paralysis (brain hemorrhage)',
'Jaundice','Malaria','Chicken pox','Dengue','Typhoid','hepatitis A', 'Hepatitis B', 'Hepatitis C', 'Hepatitis D',
'Hepatitis E', 'Alcoholic hepatitis','Tuberculosis', 'Common Cold', 'Pneumonia', 'Dimorphic hemmorhoids(piles)',
'Heart attack', 'Varicose veins','Hypothyroidism', 'Hyperthyroidism', 'Hypoglycemia', 'Osteoarthristis',
'Arthritis', '(vertigo) Paroymsal Positional Vertigo','Acne', 'Urinary tract infection', 'Psoriasis', 'Impetigo']
symptomslist=['itching','skin_rash','nodal_skin_eruptions','continuous_sneezing','shivering','chills','joint_pain',
'stomach_pain','acidity','ulcers_on_tongue','muscle_wasting','vomiting','burning_micturition','spotting_ urination',
'fatigue','weight_gain','anxiety','cold_hands_and_feets','mood_swings','weight_loss','restlessness','lethargy',
'patches_in_throat','irregular_sugar_level','cough','high_fever','sunken_eyes','breathlessness','sweating',
'dehydration','indigestion','headache','yellowish_skin','dark_urine','nausea','loss_of_appetite','pain_behind_the_eyes',
'back_pain','constipation','abdominal_pain','diarrhoea','mild_fever','yellow_urine',
'yellowing_of_eyes','acute_liver_failure','fluid_overload','swelling_of_stomach',
'swelled_lymph_nodes','malaise','blurred_and_distorted_vision','phlegm','throat_irritation',
'redness_of_eyes','sinus_pressure','runny_nose','congestion','chest_pain','weakness_in_limbs',
'fast_heart_rate','pain_during_bowel_movements','pain_in_anal_region','bloody_stool',
'irritation_in_anus','neck_pain','dizziness','cramps','bruising','obesity','swollen_legs',
'swollen_blood_vessels','puffy_face_and_eyes','enlarged_thyroid','brittle_nails',
'swollen_extremeties','excessive_hunger','extra_marital_contacts','drying_and_tingling_lips',
'slurred_speech','knee_pain','hip_joint_pain','muscle_weakness','stiff_neck','swelling_joints',
'movement_stiffness','spinning_movements','loss_of_balance','unsteadiness',
'weakness_of_one_body_side','loss_of_smell','bladder_discomfort','foul_smell_of urine',
'continuous_feel_of_urine','passage_of_gases','internal_itching','toxic_look_(typhos)',
'depression','irritability','muscle_pain','altered_sensorium','red_spots_over_body','belly_pain',
'abnormal_menstruation','dischromic _patches','watering_from_eyes','increased_appetite','polyuria','family_history','mucoid_sputum',
'rusty_sputum','lack_of_concentration','visual_disturbances','receiving_blood_transfusion',
'receiving_unsterile_injections','coma','stomach_bleeding','distention_of_abdomen',
'history_of_alcohol_consumption','fluid_overload','blood_in_sputum','prominent_veins_on_calf',
'palpitations','painful_walking','pus_filled_pimples','blackheads','scurring','skin_peeling',
'silver_like_dusting','small_dents_in_nails','inflammatory_nails','blister','red_sore_around_nose',
'yellow_crust_ooze']
alphabaticsymptomslist = sorted(symptomslist)
if request.method == 'GET':
return render(request,'patient/checkdisease/checkdisease.html', {"list2":alphabaticsymptomslist})
elif request.method == 'POST':
## access you data by playing around with the request.POST object
inputno = int(request.POST["noofsym"])
print(inputno)
if (inputno == 0 ) :
return JsonResponse({'predicteddisease': "none",'confidencescore': 0 })
else :
psymptoms = []
psymptoms = request.POST.getlist("symptoms[]")
print(psymptoms)
""" #main code start from here...
"""
testingsymptoms = []
#append zero in all coloumn fields...
for x in range(0, len(symptomslist)):
testingsymptoms.append(0)
#update 1 where symptoms gets matched...
for k in range(0, len(symptomslist)):
for z in psymptoms:
if (z == symptomslist[k]):
testingsymptoms[k] = 1
inputtest = [testingsymptoms]
print(inputtest)
predicted = model.predict(inputtest)
print("predicted disease is : ")
print(predicted)
y_pred_2 = model.predict_proba(inputtest)
confidencescore=y_pred_2.max() * 100
print(" confidence score of : = {0} ".format(confidencescore))
confidencescore = format(confidencescore, '.0f')
predicted_disease = predicted[0]
#consult_doctor codes----------
# doctor_specialization = ["Rheumatologist","Cardiologist","ENT specialist","Orthopedist","Neurologist",
# "Allergist/Immunologist","Urologist","Dermatologist","Gastroenterologist"]
Rheumatologist = [ 'Osteoarthristis','Arthritis']
Cardiologist = [ 'Heart attack','Bronchial Asthma','Hypertension ']
ENT_specialist = ['(vertigo) Paroymsal Positional Vertigo','Hypothyroidism' ]
Orthopedist = []
Neurologist = ['Varicose veins','Paralysis (brain hemorrhage)','Migraine','Cervical spondylosis']
Allergist_Immunologist = ['Allergy','Pneumonia',
'AIDS','Common Cold','Tuberculosis','Malaria','Dengue','Typhoid']
Urologist = [ 'Urinary tract infection',
'Dimorphic hemmorhoids(piles)']
Dermatologist = [ 'Acne','Chicken pox','Fungal infection','Psoriasis','Impetigo']
Gastroenterologist = ['Peptic ulcer diseae', 'GERD','Chronic cholestasis','Drug Reaction','Gastroenteritis','Hepatitis E',
'Alcoholic hepatitis','Jaundice','hepatitis A',
'Hepatitis B', 'Hepatitis C', 'Hepatitis D','Diabetes ','Hypoglycemia']
if predicted_disease in Rheumatologist :
consultdoctor = "Rheumatologist"
if predicted_disease in Cardiologist :
consultdoctor = "Cardiologist"
elif predicted_disease in ENT_specialist :
consultdoctor = "ENT specialist"
elif predicted_disease in Orthopedist :
consultdoctor = "Orthopedist"
elif predicted_disease in Neurologist :
consultdoctor = "Neurologist"
elif predicted_disease in Allergist_Immunologist :
consultdoctor = "Allergist/Immunologist"
elif predicted_disease in Urologist :
consultdoctor = "Urologist"
elif predicted_disease in Dermatologist :
consultdoctor = "Dermatologist"
elif predicted_disease in Gastroenterologist :
consultdoctor = "Gastroenterologist"
else :
consultdoctor = "other"
request.session['doctortype'] = consultdoctor
patientusername = request.session['patientusername']
puser = User.objects.get(username=patientusername)
#saving to database.....................
patient = puser.patient
diseasename = predicted_disease
no_of_symp = inputno
symptomsname = psymptoms
confidence = confidencescore
diseaseinfo_new = diseaseinfo(patient=patient,diseasename=diseasename,no_of_symp=no_of_symp,symptomsname=symptomsname,confidence=confidence,consultdoctor=consultdoctor)
diseaseinfo_new.save()
request.session['diseaseinfo_id'] = diseaseinfo_new.id
print("disease record saved sucessfully.............................")
return JsonResponse({'predicteddisease': predicted_disease ,'confidencescore':confidencescore , "consultdoctor": consultdoctor})
def pconsultation_history(request):
if request.method == 'GET':
patientusername = request.session['patientusername']
puser = User.objects.get(username=patientusername)
patient_obj = puser.patient
consultationnew = consultation.objects.filter(patient = patient_obj)
return render(request,'patient/consultation_history/consultation_history.html',{"consultation":consultationnew})
def dconsultation_history(request):
if request.method == 'GET':
doctorusername = request.session['doctorusername']
duser = User.objects.get(username=doctorusername)
doctor_obj = duser.doctor
consultationnew = consultation.objects.filter(doctor = doctor_obj)
return render(request,'doctor/consultation_history/consultation_history.html',{"consultation":consultationnew})
def doctor_ui(request):
if request.method == 'GET':
doctorid = request.session['doctorusername']
duser = User.objects.get(username=doctorid)
return render(request,'doctor/doctor_ui/profile.html',{"duser":duser})
def dviewprofile(request, doctorusername):
if request.method == 'GET':
duser = User.objects.get(username=doctorusername)
r = rating_review.objects.filter(doctor=duser.doctor)
return render(request,'doctor/view_profile/view_profile.html', {"duser":duser, "rate":r} )
def consult_a_doctor(request):
if request.method == 'GET':
doctortype = request.session['doctortype']
print(doctortype)
dobj = doctor.objects.all()
#dobj = doctor.objects.filter(specialization=doctortype)
return render(request,'patient/consult_a_doctor/consult_a_doctor.html',{"dobj":dobj})
def make_consultation(request, doctorusername):
if request.method == 'POST':
patientusername = request.session['patientusername']
puser = User.objects.get(username=patientusername)
patient_obj = puser.patient
#doctorusername = request.session['doctorusername']
duser = User.objects.get(username=doctorusername)
doctor_obj = duser.doctor
request.session['doctorusername'] = doctorusername
diseaseinfo_id = request.session['diseaseinfo_id']
diseaseinfo_obj = diseaseinfo.objects.get(id=diseaseinfo_id)
consultation_date = date.today()
status = "active"
consultation_new = consultation( patient=patient_obj, doctor=doctor_obj, diseaseinfo=diseaseinfo_obj, consultation_date=consultation_date,status=status)
consultation_new.save()
request.session['consultation_id'] = consultation_new.id
print("consultation record is saved sucessfully.............................")
return redirect('consultationview',consultation_new.id)
def consultationview(request,consultation_id):
if request.method == 'GET':
request.session['consultation_id'] = consultation_id
consultation_obj = consultation.objects.get(id=consultation_id)
return render(request,'consultation/consultation.html', {"consultation":consultation_obj })
# if request.method == 'POST':
# return render(request,'consultation/consultation.html' )
def rate_review(request,consultation_id):
if request.method == "POST":
consultation_obj = consultation.objects.get(id=consultation_id)
patient = consultation_obj.patient
doctor1 = consultation_obj.doctor
rating = request.POST.get('rating')
review = request.POST.get('review')
rating_obj = rating_review(patient=patient,doctor=doctor1,rating=rating,review=review)
rating_obj.save()
rate = int(rating_obj.rating_is)
doctor.objects.filter(pk=doctor1).update(rating=rate)
return redirect('consultationview',consultation_id)
def close_consultation(request,consultation_id):
if request.method == "POST":
consultation.objects.filter(pk=consultation_id).update(status="closed")
return redirect('home')
#-----------------------------chatting system ---------------------------------------------------
def post(request):
if request.method == "POST":
msg = request.POST.get('msgbox', None)
consultation_id = request.session['consultation_id']
consultation_obj = consultation.objects.get(id=consultation_id)
c = Chat(consultation_id=consultation_obj,sender=request.user, message=msg)
#msg = c.user.username+": "+msg
if msg != '':
c.save()
print("msg saved"+ msg )
return JsonResponse({ 'msg': msg })
else:
return HttpResponse('Request must be POST.')
def chat_messages(request):
if request.method == "GET":
consultation_id = request.session['consultation_id']
c = Chat.objects.filter(consultation_id=consultation_id)
return render(request, 'consultation/chat_body.html', {'chat': c})
def checksentiment(request):
form = SentimentForm(request.POST or None)
context = {}
if request.method == 'POST':
if form.is_valid():
sent = form.cleaned_data.get('Sentence') # got the sentence
if sent == "I am bored":
textAns = "Bored"
context['text'] = 'Bored'
else:
textAns = te.get_emotion(sent)
print(textAns)
for key,value in textAns.items():
if value>0:
print(key)
context['text'] = key
else:
form = SentimentForm()
context['form'] = form
return render(request, 'patient/checksentiment/checksentiment.html', context=context)
def solution1(request):
if request.method == "GET":
return render(request, 'patient/checksentiment/solution/solution1.html')
#-----------------------------chatting system ---------------------------------------------------
``` |
{
"source": "AakanshaDhawan/Algorithms-2",
"score": 4
} |
#### File: AakanshaDhawan/Algorithms-2/naive_string_matching.py
```python
def naive(word, text):
m = len(word)
n = len(text)
counter = 0
for i in range(0,n-m+1):
found = True
for j in range(0, m):
if word[j] != text[i+j]:
found = False
break
if found:
counter += 1
if counter == 0:
print "No match"
else:
print "The word appears: ",counter, "times!"
naive("ata","ata")
naive("ata","atata")
naive("ata","aaaaaaa")
naive("ata","")
naive("125","1259784651887125987894125")
``` |
{
"source": "aakar77/Docker-isolation-benchmarking",
"score": 3
} |
#### File: Docker-isolation-benchmarking/docker-python-sdk/DockerApiClass.py
```python
import docker
from itertools import izip
import json
import datetime
import time
import csv
class docker_sdk_abstraction():
def __init__(self):
'''
<Purpose>
Initializes the Docker API client object.
Initializes the Docker API container class object to None.
container_obj will be initialized while container_create / container_run methods.
<Arguments>
None
'''
self.docker_api_obj = docker.from_env()
self.container_obj = None
self.process_set = set()
self.container_start_timestamp = None
self.stats_dict = None
# Following are the Getter methods for getting Docker Api Container Object attributes
def get_container_id(self):
'''
<Purpose>
For returning the container object id attribute. id attribute is container's id
<Arguments>
None
<Return>
Returns the container ID to which the object is pointing to
'''
return self.container_obj.id
def get_container_id_short(self):
'''
<Purpose>
Get method for container object short id(truncated to 10 character) attribute.
<Arguments>
None
<Return>
Returns the 10 charcter container ID to which the object is pointing to
'''
return self.container_obj.short_id
def get_container_name(self):
'''
<Purpose>
Get method for container object name attribute.
It is by default assigned by the docker container if not specified while docker run / docker create
<Arguments>
None
<Return>
Returns the 10 charcter container ID to which the object is pointing to
'''
return self.container_obj.name
def get_container_image_name(self):
'''
<Purpose>
Get method for the container's object image attribute.
<Arguments>
None
<Return>
Returns the container image name for example <Image: 'python-prog:latest'>
'''
return str(self.container_obj.image)
def get_container_status(self):
return self.container_obj.status
def get_container_process(self):
return self.process_set
def set_container_process(self):
if(self.get_container_status() != "exited"):
# docker container object top method, it gives process ids currently running in the form of a list
process_dict = self.container_obj.top()
nested_list = process_dict.get("Processes")
for list_a in nested_list:
self.process_set.add(list_a[1]) # Process ID
self.process_set.add(list_a[2]) # Parent Process
print self.get_container_process()
"""
It gives the process ID of processes running inside the container in format like
{u'Processes': [[u'root', u'27138', u'27121', u'30', u'16:36', u'?', u'00:00:01',
u'mplayer -benchmark -vo null -ao null ./Sintel.mp4']],
u'Titles': [u'UID', u'PID', u'PPID', u'C', u'STIME', u'TTY', u'TIME', u'CMD']}
Made a set attribute that stores the process running inside the docker container.
"""
# Following are the class methods
def container_create(self, docker_image_tag_name, container_arguments):
'''
<Purpose>
Create a docker container using containers.create method.
Inigtializes the docker API container class object.
<Arguments>
1) Image name for which container is to created.
2) A Dictonary which can be used for setting up the arguments for the containers.create() method.
'''
print container_arguments
self.container_obj = self.docker_api_obj.containers.create(docker_image_tag_name, **container_arguments)
def container_start(self):
'''
<Purpose>
Invoke Docker API container class object start method.
Starts the docker container
<Arguments>
None
'''
self.container_obj.start()
def container_run(self, docker_image_tag_name, detach_mode):
if (detach_mode == False):
# Docker container will run on foreground
# Output = docker container logs
# Will not return untill container execution completes
container_run_log = self.docker_api_obj.containers.run(docker_image_tag_name, detach=detach_mode)
return container_run_log
else:
# Docker container won't run in foreground
# Output of the containers.run method = Container class object
self.container_obj = self.docker_ai_obj.containers.run(docker_image_tag_name, detach=detach_mode)
def container_log(self):
'''
<Purpose>
This method is for getting the container log after container has stopped running.
It creates a log file with the filename as container short id + output-file.log
<Arguments>
None
<Return>
None
'''
#Updating container's processes
self.set_container_process()
#Calling container object logs method - stream is False and Follow is True
container_end_log = self.container_obj.logs(stdout = True, stderr = True, stream = False, follow = True)
# Formatting the log output
container_end_log.replace("\r", "\n")
# Creating file name
filename = self.get_container_id_short() +"-"+self.get_container_image_name()+"-output-file.log"
# Creating and writting into the log file
log_file_obj = open(filename, "w+")
log_file_obj.write(container_end_log)
log_file_obj.close()
def container_log_stream(self):
'''
<Purpose>
This method is for getting the container log throughout the container execution.
It creates a log file with the filename as container short id + image name + output-file.log.
This method will return back only after the container has completed its execution. i.e. status = exited.
<Arguments>
None
<Return>
None
'''
# Reloading the container object attributes, especially needed for the status
self.container_obj.reload()
# Creating file name for the log file
filename = self.get_container_id_short()+"-"+self.get_container_image_name()+"-output-file.log"
log_file_obj = open(filename, "w+")
#update container procees set attribute
self.container_obj.set_container_process()
# Gives generator stream object helper
log_stream = self.container_obj.logs(stdout = True, stderr = True, stream = True, follow = True)
for data in izip(log_stream):
# Reloading the container object atrributes, more concerned for container status = exited
self.container_obj.reload()
# Formatting the stream data tuple
data = "".join(data)
data.replace("\r", "\n")
# Dumping the data into file
json.dump(data, log_file_obj)
#update container procees set attribute
self.set_container_process()
"""
Break the generator stream, once the container status turns exicted.
If not, it will produce 0 values for all the other fields.
"""
if(self.get_container_status() == "exited"):
stat_file_obj.close()
break
def container_stats_stream(self):
'''
<Purpose>
This method is for getting the statistics stream during the container execution.
It creates a stats file with the filename as container short id + stat-file.log.
This method will return back only after the container has completed its execution.
i.e. status = exited
Next Task would be: Manually logging cpu and memory data and calculating average
over them.
A suggestion by Lukas to log these stats into a CSV file and parse the file after
completion.
<Arguments>
None
<Return>
None
'''
self.get_container_process()
# Updating the container object attributes
self.container_obj.reload()
# Creating file name.
filename = self.get_container_id_short() +"-"+self.get_container_image_name()+"-stats-file.csv"
#stat_file_obj = open(filename, "w+")
# Gives generator stream object helper
stats_stream = self.container_obj.stats(decode=True, stream = True)
writer = csv.writer(open(filename, 'w+'))
for stats_tuple in izip(stats_stream):
# Updating the container object attributes, especially the container status
self.container_obj.reload()
# Getting memory stat dictionary
read_timestamp = stats_tuple[0]['read']
preread_timestamp = stats_tuple[0]['preread']
memory_stat = stats_tuple[0]['memory_stats']
memory_usage = memory_stat.get('usage')
memory_limit = memory_stat.get('limit')
memory_max_usage = memory_stat.get('max_usage')
csv_row = [read_timestamp, preread_timestamp, memory_usage, memory_limit, memory_max_usage ]
#print csv_row
writer.writerow(csv_row)
# If the container has exited, close the file object and break the for loopn
if(self.get_container_status() == "exited"):
stat_file_obj.close()
break
"""
# Getting CPU stats dictonary
cpu_stat = stats_tuple[0]['cpu_stats']
# Dumping the stats stream data, in the file
json.dump(stats_tuple, stat_file_obj, indent = 4)
#update container procees set attribute
self.set_container_process()
"""
def list_containers(self, method_options):
# sending a dict of container arguments. It returns a list of containers
list_containers = self.docker_api_obj(**method_options)
#############################
object1 = docker_sdk_abstraction()
"""
Important note here
-------------------------
cpu_cpus Datatype = int or String
cpu_shares Datatype = int only
mem_limit = if int specify memory limit in bytes or can specify values like 200m 4000k 1g
More options available at https://docker-py.readthedocs.io/en/stable/containers.html
"""
container_arguments = { 'cpuset_cpus': "1", 'cpu_shares': 2000, 'mem_limit': "200m" }
object1.container_create("docker-mplayer-i", container_arguments)
object1.container_start()
#print object1.get_container_image_name()
#object1.container_log_stream()
object1.container_stats_stream()
object1.container_log()
"""
#print object1.get_container_image()
#while(object1.get_container_status == "running"):
# pass
# object1.start_container()
# print object1.get_container_name()
"""
``` |
{
"source": "aakardwivedi/MusicComposer_A-Machine-Learning-Model",
"score": 3
} |
#### File: MusicComposer_A-Machine-Learning-Model/python-scripts/model.py
```python
import os
import logging
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import rnn, seq2seq
import nottingham_util
class Model(object):
"""
Cross-Entropy Naive Formulation
A single time step may have multiple notes active, so a sigmoid cross entropy loss
is used to match targets.
seq_input: a [ T x B x D ] matrix, where T is the time steps in the batch, B is the
batch size, and D is the amount of dimensions
"""
def __init__(self, config, training=False):
self.config = config
self.time_batch_len = time_batch_len = config.time_batch_len
self.input_dim = input_dim = config.input_dim
hidden_size = config.hidden_size
num_layers = config.num_layers
dropout_prob = config.dropout_prob
input_dropout_prob = config.input_dropout_prob
cell_type = config.cell_type
self.seq_input = \
tf.placeholder(tf.float32, shape=[self.time_batch_len, None, input_dim])
if (dropout_prob <= 0.0 or dropout_prob > 1.0):
raise Exception("Invalid dropout probability: {}".format(dropout_prob))
if (input_dropout_prob <= 0.0 or input_dropout_prob > 1.0):
raise Exception("Invalid input dropout probability: {}".format(input_dropout_prob))
# setup variables
with tf.variable_scope("rnnlstm"):
output_W = tf.get_variable("output_w", [hidden_size, input_dim])
output_b = tf.get_variable("output_b", [input_dim])
self.lr = tf.constant(config.learning_rate, name="learning_rate")
self.lr_decay = tf.constant(config.learning_rate_decay, name="learning_rate_decay")
def create_cell(input_size):
if cell_type == "vanilla":
cell_class = rnn_cell.BasicRNNCell
elif cell_type == "gru":
cell_class = rnn_cell.BasicGRUCell
elif cell_type == "lstm":
cell_class = rnn_cell.BasicLSTMCell
else:
raise Exception("Invalid cell type: {}".format(cell_type))
cell = cell_class(hidden_size, input_size = input_size)
if training:
return rnn_cell.DropoutWrapper(cell, output_keep_prob = dropout_prob)
else:
return cell
if training:
self.seq_input_dropout = tf.nn.dropout(self.seq_input, keep_prob = input_dropout_prob)
else:
self.seq_input_dropout = self.seq_input
self.cell = rnn_cell.MultiRNNCell(
[create_cell(input_dim)] + [create_cell(hidden_size) for i in range(1, num_layers)])
batch_size = tf.shape(self.seq_input_dropout)[0]
self.initial_state = self.cell.zero_state(batch_size, tf.float32)
inputs_list = tf.unpack(self.seq_input_dropout)
# rnn outputs a list of [batch_size x H] outputs
outputs_list, self.final_state = rnn.rnn(self.cell, inputs_list,
initial_state=self.initial_state)
outputs = tf.pack(outputs_list)
outputs_concat = tf.reshape(outputs, [-1, hidden_size])
logits_concat = tf.matmul(outputs_concat, output_W) + output_b
logits = tf.reshape(logits_concat, [self.time_batch_len, -1, input_dim])
# probabilities of each note
self.probs = self.calculate_probs(logits)
self.loss = self.init_loss(logits, logits_concat)
self.train_step = tf.train.RMSPropOptimizer(self.lr, decay = self.lr_decay) \
.minimize(self.loss)
def init_loss(self, outputs, _):
self.seq_targets = \
tf.placeholder(tf.float32, [self.time_batch_len, None, self.input_dim])
batch_size = tf.shape(self.seq_input_dropout)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(outputs, self.seq_targets)
return tf.reduce_sum(cross_ent) / self.time_batch_len / tf.to_float(batch_size)
def calculate_probs(self, logits):
return tf.sigmoid(logits)
def get_cell_zero_state(self, session, batch_size):
return self.cell.zero_state(batch_size, tf.float32).eval(session=session)
class NottinghamModel(Model):
"""
Dual softmax formulation
A single time step should be a concatenation of two one-hot-encoding binary vectors.
Loss function is a sum of two softmax loss functions over [:r] and [r:] respectively,
where r is the number of melody classes
"""
def init_loss(self, outputs, outputs_concat):
self.seq_targets = \
tf.placeholder(tf.int64, [self.time_batch_len, None, 2])
batch_size = tf.shape(self.seq_targets)[1]
with tf.variable_scope("rnnlstm"):
self.melody_coeff = tf.constant(self.config.melody_coeff)
r = nottingham_util.NOTTINGHAM_MELODY_RANGE
targets_concat = tf.reshape(self.seq_targets, [-1, 2])
melody_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( \
outputs_concat[:, :r], \
targets_concat[:, 0])
harmony_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( \
outputs_concat[:, r:], \
targets_concat[:, 1])
losses = tf.add(self.melody_coeff * melody_loss, (1 - self.melody_coeff) * harmony_loss)
return tf.reduce_sum(losses) / self.time_batch_len / tf.to_float(batch_size)
def calculate_probs(self, logits):
steps = []
for t in range(self.time_batch_len):
melody_softmax = tf.nn.softmax(logits[t, :, :nottingham_util.NOTTINGHAM_MELODY_RANGE])
harmony_softmax = tf.nn.softmax(logits[t, :, nottingham_util.NOTTINGHAM_MELODY_RANGE:])
steps.append(tf.concat(1, [melody_softmax, harmony_softmax]))
return tf.pack(steps)
def assign_melody_coeff(self, session, melody_coeff):
if melody_coeff < 0.0 or melody_coeff > 1.0:
raise Exception("Invalid melody coeffecient")
session.run(tf.assign(self.melody_coeff, melody_coeff))
class NottinghamSeparate(Model):
"""
Single softmax formulation
Regular single classification formulation, used to train baseline models
where the melody and harmony are trained separately
"""
def init_loss(self, outputs, outputs_concat):
self.seq_targets = \
tf.placeholder(tf.int64, [self.time_batch_len, None])
batch_size = tf.shape(self.seq_targets)[1]
with tf.variable_scope("rnnlstm"):
self.melody_coeff = tf.constant(self.config.melody_coeff)
targets_concat = tf.reshape(self.seq_targets, [-1])
losses = tf.nn.sparse_softmax_cross_entropy_with_logits( \
outputs_concat, targets_concat)
return tf.reduce_sum(losses) / self.time_batch_len / tf.to_float(batch_size)
def calculate_probs(self, logits):
steps = []
for t in range(self.time_batch_len):
softmax = tf.nn.softmax(logits[t, :, :])
steps.append(softmax)
return tf.pack(steps)
``` |
{
"source": "aakarsh2126/HacktoberFest_2021",
"score": 2
} |
#### File: phishingurldetector/phishingapp/views.py
```python
from django.shortcuts import render
import joblib,os
#pkl
def index(request):
phish_model = open('templates/phishing.pkl','rb')
phish_model_ls = joblib.load(phish_model)
if request.method == 'GET':
X_predict = []
search_text = request.GET.get("search_box")
print(search_text)
X_predict.append(str(search_text))
y_Predict = phish_model_ls.predict(X_predict)
result=""
if y_Predict == 'bad':
result = "Not Secure"
else:
result = "Secure"
return render(request, "index.html", {'search_text':result,'text':search_text})
``` |
{
"source": "aakarshg/scribe",
"score": 3
} |
#### File: transcribe/scribe_modules/base_scribe_module.py
```python
from abc import ABCMeta, abstractmethod ## noqa
class ScribeModuleBaseClass(metaclass=ABCMeta):
# we can do something like this:
# def __init__(self, dictionary):
# for k, v in dictionary.items():
# setattr(self, k, v)
# if we are sure that there's no transformation needed on the
# input dict.
def __init__(self, input_dict=None, module_name=None, input_type=None,
host_name=None, scribe_uuid=None):
if module_name:
self.module = module_name
self.source_type = input_type
self.host = host_name
self.scribe_uuid = scribe_uuid
# self.value = list(input_dict.values())[0]
# Ideally we'd be using abstractmethod
# however since we dont want children classes to write their
# own methods, we'll be not be using it
# This also allows users to implement their own __iter__ function
# in their classes where they can make the object's entities iterable
# as they please
# @abstractmethod
def __iter__(self):
for attr, value in self.__dict__.items():
yield attr, value
```
#### File: transcribe/scribe_modules/cpu_vulnerabilities.py
```python
from . import ScribeModuleBaseClass
from . lib.util import format_url
class Cpu_vulnerabilities(ScribeModuleBaseClass):
def __init__(self, input_dict=None, module_name=None, host_name=None,
input_type=None, scribe_uuid=None):
ScribeModuleBaseClass.__init__(self, module_name=module_name,
input_dict=input_dict,
host_name=host_name,
input_type=input_type,
scribe_uuid=scribe_uuid)
if input_dict:
self._assign_properties(self._parse(input_dict))
def __iter__(self):
for attr, value in self.__dict__.items():
yield attr, value
def _assign_properties(self, initial_data):
for key in initial_data:
setattr(self, key, initial_data[key])
def _parse(self, input_parse):
output_dict = {}
if '/' in input_parse:
input_string = input_parse.split('/')[-1]
else:
input_string = input_parse
if ':' in input_string:
name_string = input_string.split(':')
output_dict['vulnerability_name'] = name_string[0]
if 'Vulnerable' in input_string:
output_dict['current_vulnerability_status'] = True
vulnerable_string = input_string.split('Vulnerable: ')
if len(vulnerable_string) >= 2:
output_dict['current_vulnerability_type'] = \
vulnerable_string[1]
else:
output_dict['current_vulnerability_status'] = False
if 'Mitigation' in input_string:
output_dict['mitigation_exists'] = True
mitigation_string = input_string.split('Mitigation: ')
if len(mitigation_string) >= 2:
output_dict['mitigation_type'] = \
mitigation_string[1]
else:
output_dict['mitigation_exists'] = False
return output_dict
```
#### File: transcribe/scribe_modules/yum_repos.py
```python
from . import ScribeModuleBaseClass
from . lib.util import format_url
base_url = "http://mirror.centos.org/centos/$releasever/{}/$basearch/"
# object_dict = {}
class Yum_repos(ScribeModuleBaseClass):
def __init__(self, input_dict=None, module_name=None, host_name=None,
input_type=None, scribe_uuid=None):
ScribeModuleBaseClass.__init__(self, module_name=module_name,
input_dict=input_dict,
host_name=host_name,
input_type=input_type,
scribe_uuid=scribe_uuid)
if input_dict:
# object_dict['repo_name'] = input_dict['repoid']
# object_dict['repo_state'] = self.update_repo_state(input_dict)
# object_dict['base_url'] = format_url(base_url, self.repo_name)
self.repo_name = input_dict['repoid']
self.repo_state = self.update_repo_state(input_dict)
# This is just for the sake of it
self.base_url = format_url(base_url, self.repo_name)
def update_repo_state(self, value):
if value['state'] == 'enabled':
return 1
return 0
def __iter__(self):
for attr, value in self.__dict__.items():
yield attr, value
``` |
{
"source": "aakarshg/sincgars",
"score": 2
} |
#### File: aakarshg/sincgars/deploy.py
```python
import argparse
import sys
import subprocess
import time
def _run(cmd):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout,stderr = process.communicate()
return stdout.strip(), process.returncode
def main():
parser = argparse.ArgumentParser(description='enable remotewrite',prog='sincgars')
parser.add_argument(
'-c', '--clustername', nargs=1,
help='clustername to be added as an externallabel')
parser.add_argument(
'-u', '--url', nargs=1,
help='the endpoint for the remotewrite')
args = parser.parse_args()
_json = '{ "data": { "config.yaml": "prometheusK8s:\\n externalLabels:\\n clustername: placeholder_name\\n remoteWrite:\\n - url: placeholder_url\\n" }}'
_count , _rc = _run('oc -n openshift-monitoring get configmap | grep cluster-monitoring-config -i -c')
if int(_count) == 0:
print("cluster-monitoring-config configmap doesn't already exist, so creating one")
print(subprocess.check_output('oc -n openshift-monitoring create configmap cluster-monitoring-config'.split(' ')))
_temp_array = 'oc patch configmap/cluster-monitoring-config -n openshift-monitoring --patch'.split(' ')
new_json = _json.replace("placeholder_name",str(args.clustername[0]).strip()).replace("placeholder_url",str(args.url[0]).strip())
_temp_array.append(new_json)
print(subprocess.check_output(_temp_array))
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "aakarshg/touchstone",
"score": 2
} |
#### File: touchstone/benchmarks/pgbench.py
```python
import logging
from . import BenchmarkBaseClass
_logger = logging.getLogger("touchstone")
class Pgbench(BenchmarkBaseClass):
def _build_search(self):
_logger.debug("Building search array for PGBENCH")
return self._search_dict[self._source_type][self._harness_type]
def _build_compare_keys(self):
_logger.debug("Building compare map")
_temp_dict = {}
for index in self._search_map:
_temp_dict[index] = self._search_map[index]['compare']
return _temp_dict
def _build_compute(self):
_logger.debug("Building compute map")
_temp_dict = {}
for index in self._search_map:
_temp_dict[index] = self._search_map[index]['compute']
return _temp_dict
def __init__(self, source_type=None, harness_type=None):
_logger.debug("Initializing PGBENCH instance")
BenchmarkBaseClass.__init__(self, source_type=source_type,
harness_type=harness_type)
self._search_dict = {
'elasticsearch': {
'ripsaw': {
'ripsaw-pgbench-summary': {
'compare': ['uuid', 'user', 'cluster_name',
'scaling_factor', 'query_mode',
'number_of_threads', 'number_of_clients',
'duration_seconds'
],
'compute': [{
'filter': {
'workload': 'pgbench'
},
'buckets': ['iteration'],
'aggregations': {},
'collate': ['tps_incl_con_est',
'number_of_transactions_actually_processed', # noqa
'latency_average_ms'
]
}, ]
},
'ripsaw-pgbench-results': {
'compare': ['transaction_type'],
'compute': [{
'filter': {
'workload': 'pgbench'
},
'buckets': ['iteration'],
'aggregations': {
'latency_ms': [{
'percentiles': {
'percents': [95]
}
}]
},
'collate': []
}, ]
}
}
}
}
self._search_map = self._build_search()
self._compute_map = self._build_compute()
self._compare_map = self._build_compare_keys()
_logger.debug("Finished initializing pgbench instance")
def emit_compute_map(self):
_logger.debug("Emitting built compute map ")
_logger.info("Compute map is {} in the database \
{}".format(self._compute_map, self._source_type))
return self._compute_map
def emit_compare_map(self):
_logger.debug("Emitting built compare map ")
_logger.info("compare map is {} in the database \
{}".format(self._compare_map, self._source_type))
return self._compare_map
def emit_indices(self):
return self._search_map.keys()
``` |
{
"source": "Aakarshit-Sharma19/django-quill-editor",
"score": 2
} |
#### File: django-quill-editor/django_quill/widgets.py
```python
from collections.abc import Mapping
from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.serializers.json import DjangoJSONEncoder
from django.forms.renderers import get_default_renderer
from django.forms.utils import flatatt
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils.safestring import mark_safe
from .config import DEFAULT_CONFIG
__all__ = (
'LazyEncoder',
'QuillWidget',
)
class LazyEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, Promise):
return force_text(obj)
return super(LazyEncoder, self).default(obj)
json_encode = LazyEncoder().encode
class QuillWidget(forms.Textarea):
class Media:
js = (
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js',
'django_quill/django_quill.js',
'https://cdn.quilljs.com/1.3.7/quill.min.js',
'https://unpkg.com/[email protected]/dist/quill.imageUploader.min.js'
)
css = {
'all': (
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/darcula.min.css',
'django_quill/django_quill.css',
'https://cdn.quilljs.com/1.3.7/quill.snow.css',
'https://unpkg.com/[email protected]/dist/quill.imageUploader.min.css'
)
}
def __init__(self, config_name='default', *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = DEFAULT_CONFIG.copy()
self.image_upload_url = getattr(
settings, 'QUILL_IMAGE_UPLOAD_URL', None)
configs = getattr(settings, 'QUILL_CONFIGS', None)
if configs:
if isinstance(configs, Mapping):
if config_name in configs:
config = configs[config_name]
if not isinstance(config, Mapping):
raise ImproperlyConfigured(
'QUILL_CONFIGS["%s"] setting must be a Mapping object' % config_name)
self.config.update(config)
else:
raise ImproperlyConfigured(
'No configuration named "%s" found in your QUILL_CONFIGS' % config_name)
else:
raise ImproperlyConfigured(
'QUILL_CONFIGS settings must be a Mapping object')
def render(self, name, value, attrs=None, renderer=None):
if renderer is None:
renderer = get_default_renderer()
if value is None:
value = ''
attrs = attrs or {}
attrs['name'] = name
if hasattr(value, 'quill'):
attrs['quill'] = value.quill
else:
attrs['value'] = value
final_attrs = self.build_attrs(self.attrs, attrs)
return mark_safe(renderer.render('django_quill/widget.html', {
'final_attrs': flatatt(final_attrs),
'id': final_attrs['id'],
'name': final_attrs['name'],
'config': json_encode(self.config),
'imageUploadURL': self.image_upload_url,
'quill': final_attrs.get('quill', None),
'value': final_attrs.get('value', None),
}))
``` |
{
"source": "aakaruimahesh/tic-tac-toe",
"score": 4
} |
#### File: aakaruimahesh/tic-tac-toe/player.py
```python
class Player:
# declaring class variables
name = ''
# constructor: takes valid player name
def __init__(self, player_number):
is_valid_name = False
while not is_valid_name:
name = input('Enter the Player {0} name: '.format(player_number))
if name:
name += '{0}'.format(player_number)
self.set_name(name)
is_valid_name = True
else:
print('Please enter the valid name.')
# getter method to return name from class variable
def get_name(self):
return self.name
# setter method to set name in class variable
def set_name(self, name):
self.name = name
``` |
{
"source": "aakash0017/BI-MOO",
"score": 2
} |
#### File: BI-MOO/examples/main.py
```python
import sys
from plotter import Plotter
from metrics.problems.zdt import ZDT3Metrics
import plotly.express as px
from nsga2.evolution import Evolution
from nsga2.problems.zdt import ZDT
from nsga2.problems.zdt.zdt3_definitions import ZDT3Definitions
import matplotlib.pyplot as plt
from plotter import Plotter
import examples.triclusteringPlusAffiramationScore as tr
from examples.triclusteringPlusAffiramationScore import Tricluster
def print_generation(population, generation_num):
print("Generation: {}".format(generation_num))
# def print_metrics(population, generation_num):
# pareto_front = population.fronts[0]
# metrics = ZDT3Metrics()
# # hv = metrics.HV(pareto_front)
# # hvr = metrics.HVR(pareto_front)
# # print("HV: {}".format(hv))
# print("HVR: {}".format(hvr))
collected_metrics = {}
# def collect_metrics(population, generation_num):
# pareto_front = population.fronts[0]
# metrics = ZDT3Metrics()
# hv = metrics.HV(pareto_front)
# hvr = metrics.HVR(pareto_front)
# collected_metrics[generation_num] = hv, hvr
# delta = 100
path1 = 'yeast_output.txt'
# names1 =[path1+'tp1',path1+'tp2', path1+'tp3',path1+'tp4']
#bcl dataste
data = tr.readFiles(path1)
# zdt_definitions = ZDT3Definitions(data, delta)
zdt_definitions = ZDT3Definitions(data)
# plotter = Plotter(zdt_definitions)
problem = ZDT(zdt_definitions)
evolution = Evolution(problem, 100, 10)
# evolution.register_on_new_generation(plotter.plot_population_best_front)
evolution.register_on_new_generation(print_generation)
# evolution.register_on_new_generation(print_metrics)
# evolution.register_on_new_generation(collect_metrics)
pareto_front = evolution.evolve()
# plotter = Plotter(problem)
# plotter.plot_x_y(collected_metrics.keys(), map(lambda x: x[1], collected_metrics.values()), 'generation', 'HVR', 'HVR metric for ZDT3 problem', 'hvr-zdt3')
# function1 = [i[0] for i in problem.min_objectives[0]]
# function2 = [i[1] for i in problem.min_objectives[1]]
# plt.xlabel('Function 1', fontsize=15)
# plt.ylabel('Function 2', fontsize=15)
# plt.scatter(function1, function2)
# plt.show()
print(problem.function1)
print(problem.function2)
plt.xlabel('MSR', fontsize=15)
plt.ylabel('Area', fontsize=15)
print(problem.function2)
print(problem.function1)
plt.scatter(problem.function1, problem.function2)
plt.show()
print("'generation 100 individual: 20 dataset: som_yeast ")
print('Lowest MSR is ' ,problem.min_objectives[0])
print('heighest msr is ', problem.max_objectives[0])
fig = px.scatter(x=problem.function1, y=problem.function2, )
fig2 = px.line(y=problem.function1)
fig3 = px.line(y=problem.function2)
fig.show()
fig2.show()
fig3.show()
``` |
{
"source": "aakash0017/covid-bot",
"score": 2
} |
#### File: covid-bot/data/default_dict.py
```python
def default_dict():
dict_ = {
'Name': '',
'Mobile': '',
'Email': '',
'City': '',
'State': '',
'Resources': '',
'Description': ''
}
return dict_
def default_chat_dict():
dict_ = {
'updateID': '',
'chatID': '',
'Text': ''
}
return dict_
```
#### File: aakash0017/covid-bot/user.py
```python
import numpy as np
from user_utility import user_utility
from data.res_list import res_list
from utility import _utility
class user:
def __init__(self, name, email_Id, mobile):
self.name = name
self.email_Id = email_Id
self.mobile = mobile
# user's categories
self.helper = False
self.aid = False
# user's location details
self.state = ''
self.city = ''
# list of resources
self.resource = []
# list of user_resources
self.user_res = []
self.description = ''
# remove later
# resource file path
self.file_path = 'data/res.npy'
# tokens
tokens = ['state', 'city', 'resources', 'blood_grp']
# user_providance for plasma
self.has_plasma = False
# blood_grp for plasma users
self.blood_grps = []
# load resource list saved under data/ directory
# self.load_res()
# initialize resource dict
self.create_res_dict()
def resource_provider(self):
self.helper = True
def help_required(self):
self.aid = True
def create_res_dict(self):
# list of zeros of same lenght as of resources
self.resource = user_utility.load_file(self.file_path)
# self.resource = res_list()
indicator = [0] * len(self.resource)
self.res_dict = dict(zip(self.resource, indicator))
def get_details(self):
if self.has_plasma:
self.user_res = user_utility.user_res(self.res_dict)
details = [self.name, self.mobile, self.email_Id, self.city, self.state, self.user_res, '']
else:
self.factor_res()
details = [self.name, self.mobile, self.email_Id, self.city, self.state, self.user_res, '']
return details
# update user attributes
def update_attributes(self, token, update_key):
if token == 'state':
self.state = update_key
elif token == 'city':
self.city = update_key
elif token == 'resources':
self.res_dict = user_utility.update_dict(self.res_dict, update_key)
elif token == 'blood_grp':
self.blood_grps = user_utility.plasma_handler(self.res_dict, self.blood_grps, update_key)
# update user resource list based on mentioned resources.
# def factor_res(self):
# self.user_res = user_utility.user_res(self.res_dict)
# if user_utility.check_plasma(self.user_res):
# # concate blood groups
# user_utility.concat_grps(self.user_res, self.blood_grps)
def factor_res(self):
self.user_res = user_utility.user_res(self.res_dict)
# concate blood groups
user_utility.concat_grps(self.user_res, self.blood_grps)
```
#### File: covid-bot/user_utility/user_utility.py
```python
import numpy as np
import re
from cms_queries.queries import post_request
# load files from data directory
def load_file(file_path):
return np.load(file_path, allow_pickle=True)
# update resource dict
def update_dict(res_dict, update_key):
for key, value in res_dict.items():
if key == update_key:
res_dict[key] = 1
return res_dict
def validate_email(email):
regex = '^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$'
if re.search(regex, email):
return True
else:
return False
def validate_mobile(mobile):
Pattern = re.compile("(0/91)?[7-9][0-9]{9}")
if (Pattern.match(mobile)):
return True
else :
return False
# def plasma_handler(res_dict, blood_grps, user_input):
# grps = user_input.split(' ')
# for bg in grps:
# if res_dict['Plasma'] == 1:
# blood_grps.append(bg.lower())
# return blood_grps
def plasma_handler(res_dict, blood_grps, user_input):
grps = user_input.split(' ')
for bg in grps:
blood_grps.append(bg.lower())
return blood_grps
def check_plasma(res_list):
if 'Plasma' in res_list:
return True
else:
return False
def user_res(res_dict):
return [key for key, val in res_dict.items() if val == 1]
def concat_grps(res_list, blood_grps):
# remove plasma
# res_list.remove('Plasma')
string = 'Plasma_{0}'
# concat blood grps to string
return [res_list.append(string.format(bg)) for bg in blood_grps]
def save_details(details_dict, chat_id, hasplasma_Flag):
details_dict['chat_id'] = chat_id
details_dict['has_plasma'] = hasplasma_Flag
details_dict['Resources'].remove("Plasma")
tmp_string = ''
for i in details_dict['Resources']:
tmp_string += i + ','
details_dict['Resources'] = tmp_string
print("67", details_dict)
dict_body = def_dict_2_post_mapping(details_dict)
url = 'https://covid-bot-cms.herokuapp.com'
res = post_request(endpoint='/Beta-objects', body=dict_body, url=url)
print(res)
return res
def save_details_app(details_dict, chat_id, hasplasma_Flag):
details_dict['chat_id'] = chat_id
details_dict['has_plasma'] = hasplasma_Flag
print("67", details_dict)
dict_body = def_dict_2_post_mapping(details_dict)
url = 'https://covid-bot-cms.herokuapp.com'
res = post_request(endpoint='/Beta-objects', body=dict_body, url=url)
print(res)
return res
def after_bg_save(details_dict, chat_id, hasplasma_Flag=False):
details_dict['chat_id'] = chat_id
details_dict['has_plasma'] = hasplasma_Flag
tmp_string = ''
for i in details_dict['Resources']:
tmp_string += i + ','
details_dict['Resources'] = tmp_string
print("60", details_dict)
dict_body = def_dict_2_post_mapping(details_dict)
url = 'https://covid-bot-cms.herokuapp.com'
res = post_request(endpoint='/Beta-objects', body=dict_body, url=url)
print(res)
return res
def def_dict_2_post_mapping(details_dict):
dict_ = {'name': '', 'mobile': '', 'email': '', 'city': '', 'state': '', 'resources': '', 'description': '', 'chatID': '', 'hasPlasma': ''}
dict_['name'] = details_dict['Name']
dict_['mobile'] = details_dict['Mobile']
dict_['email'] = details_dict['Email']
dict_['city'] = details_dict['City']
dict_['state'] = details_dict['State']
dict_['resources'] = details_dict['Resources']
dict_['description'] = details_dict['Description']
dict_['chatID'] = details_dict['chat_id']
dict_['hasPlasma'] = details_dict['has_plasma']
return dict_
``` |
{
"source": "aakash0017/DeepCon-Frontend",
"score": 2
} |
#### File: aakash0017/DeepCon-Frontend/app.py
```python
from concurrent.futures import process
import emails
import random
import string
from flask import Flask, render_template, request, redirect
import requests
from mail_generator import send_email
from utilities import upload_to_aws
import os
import asyncio
import httpx
from dotenv import load_dotenv
from database_handler import insert_values, get_result, authenticate_login
import jinja2
env = jinja2.Environment()
env.globals.update(zip=zip)
load_dotenv()
app = Flask(__name__)
app.config["MP3_UPLOADS"] = "static/images/uploads"
def generate_process_code():
x = ''.join(random.choice(string.ascii_uppercase +
string.ascii_lowercase + string.digits) for _ in range(16))
return x
@app.route('/')
def index():
return render_template('login.html')
@app.route('/success', methods=["POST", "GET"])
def success():
template = 'nologin.html'
if request.method == "POST":
email = request.form.get('email')
password = request.form.get('password')
print("email: ", email)
print("password: ", password)
auth = authenticate_login(email, password)
print('auth:', auth)
if auth:
template = 'home.html'
return render_template(template)
@app.route('/minutes', methods=["POST", "GET"])
async def result():
if request.method == "POST":
sender = "<EMAIL>"
receivers = request.form.get("email")
file = request.files["audio"]
translation = ' '.join(request.form.getlist('Translation'))
length = request.form.get('length')
num_speakers = request.form.get('num-speakers')
print('length', length)
print('translation checkbox ', translation)
file.save(os.path.join(app.config["MP3_UPLOADS"], file.filename))
file_path = "static/images/uploads/{}".format(file.filename)
print("-------------file path -------------- ", file_path)
receivers_name = request.form.get("name")
process_code = generate_process_code()
res = upload_to_aws(file_path, file_name=process_code)
try:
async with httpx.AsyncClient() as client:
params_dict = {'process_code': process_code,
'receiver_email': receivers,
'receiver_name': receivers_name,
'translation': translation,
'length': length,
'num_speakers': num_speakers
}
res = await asyncio.gather(
client.post('http://localhost:8000/getcode',
params=params_dict)
)
except:
print("microservice request not processed")
insert_values(process_code=process_code,
receiver_name=receivers_name,
receiver_email=receivers,
)
email_res = send_email(process_code=process_code,
receiver_email=receivers,
receivers_name=receivers_name,
sender=sender)
return render_template('greetings.html')
@app.route('/get-transcript')
def sample():
return render_template('result.html')
@app.route('/get-transcript', methods=["POST", "GET"])
def my_form_post():
if request.method == 'POST':
process_code = request.form.get('Process_code')
ziped_list, process_code, user_name = get_result(process_code)
return render_template('result.html', list=ziped_list, process_code=process_code, user_name=user_name)
# @app.route('/get-transcript', methods=["POST", "GET"])
if __name__ == '__main__':
app.run(host='0.0.0.0', port=3000)
``` |
{
"source": "aakash0017/DeepCon-Server",
"score": 2
} |
#### File: aakash0017/DeepCon-Server/isometric_translation.py
```python
import os
import boto3
import shutil
# from tqdm.notebook import tqdm
from tqdm import tqdm
import pandas as pd
from torch.utils.data import DataLoader
from datasets import load_dataset
import transformers
import subprocess
from transformers import MarianMTModel, MarianTokenizer, AutoTokenizer, AutoModel, MBartForConditionalGeneration
tqdm.pandas()
# utilities
def read_minute(path_to_file):
with open(path_to_file, 'r') as f:
minute = [line.strip() for line in f]
return minute
def translate_minutes(
minute: list,
tokenizer: transformers.models.marian.tokenization_marian.MarianTokenizer,
model: transformers.models.marian.modeling_marian.MarianMTModel
):
translated_minutes = []
for source_sentence in tqdm(minute, total=len(minute)):
translated = model.generate(**tokenizer(source_sentence, return_tensors="pt", padding=True))
translated_minutes.extend([tokenizer.decode(t, skip_special_tokens=True) for t in translated])
return translated_minutes
def save_2_text(translated_minutes: list, path_to_file: str):
with open(path_to_file, 'w') as filehandle:
for listitem in translated_minutes:
filehandle.write('%s\n' % listitem)
def check_for_verbosity(input_text, target_text):
if not input_text or not target_text:
return False
ts_ratio = len(target_text)/len(input_text)
if not (ts_ratio >= 0.90 and ts_ratio <= 1.10):
return True
return False
def append_paraphrase_prompt(input_text, target_text):
ts_ratio = len(target_text)/len(input_text)
prefix = None
if ts_ratio < 0.90:
prefix = "paraphrase long"
elif ts_ratio > 1.10:
prefix = "paraphrase short"
target_text = prefix + " " + target_text
return target_text
# -------------------------------------------------------- #
language_2_mt_model_mappings = {
'de': 'enimai/opus-mt-en-de-finetuned-en-to-de',
'fr': 'enimai/OPUS-mt-en-fr-finetuned-MUST-C',
'ru': 'enimai/opus-mt-en-ru-finetuned-en-to-ru',
'it': 'enimai/opus-mt-en-it-finetuned-en-to-it',
'hi': 'enimai/opus-mt-en-hi-finetuned-en-to-hi',
}
language_2_para_model_mappings = {
'de': 'enimai/mbart-large-50-paraphrase-finetuned-for-de',
'fr': 'enimai/mbart-large-50-paraphrase-finetuned-for-fr',
'ru': 'enimai/mbart-large-50-paraphrase-finetuned-for-ru'
}
def simple_translation(
text: str,
tokenizer: transformers.models.marian.tokenization_marian.MarianTokenizer,
model: transformers.models.marian.modeling_marian.MarianMTModel
):
output = []
translated = model.generate(**tokenizer(text, return_tensors="pt", padding=True))
output.extend([tokenizer.decode(t, skip_special_tokens=True) for t in translated])
return output
def translate_keywords(languages, process_code):
current_directory = os.getcwd()
path_to_output_folder = os.path.join(current_directory, "output")
path_to_keywords_folder = os.path.join(path_to_output_folder, 'processed-keywords')
path_to_translated_keywords_folder = os.path.join(path_to_output_folder, 'keywords-translated')
path_to_keyword_file = [os.path.join(path_to_keywords_folder, file) for file in os.listdir(path_to_keywords_folder) if file == f"{process_code}.csv"][0]
for language in languages:
df = pd.read_csv(path_to_keyword_file)
model_name = language_2_mt_model_mappings[language]
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
translated_texts = []
for index, row in df.iterrows():
translated_texts.extend(
simple_translation(row['text'], tokenizer, model))
translated_df = pd.DataFrame({'text': translated_texts})
path_to_translated_keyword_file = os.path.join(path_to_translated_keywords_folder, f"translated_{language}_{process_code}.csv")
translated_df.to_csv(path_to_translated_keyword_file, index=False)
def process_transcripts_translation(languages, process_code):
current_directory = os.getcwd()
path_to_output_folder = os.path.join(current_directory, "output")
path_to_transcript_folder = os.path.join(path_to_output_folder, 'processed-transcripts')
path_to_translated_transcripts_folder = os.path.join(path_to_output_folder, 'transcripts-translated')
path_to_transcripts_file = [os.path.join(path_to_transcript_folder, file) for file in os.listdir(path_to_transcript_folder) if file == f"{process_code}.txt"][0]
for language in languages:
transcripts = read_minute(path_to_transcripts_file)
model_name = language_2_mt_model_mappings[language]
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
translated = translate_minutes(transcripts, tokenizer, model)
path_to_translated_file = os.path.join(path_to_translated_transcripts_folder, f"translated_{language}_{process_code}.txt")
save_2_text(translated, path_to_translated_file)
def process_translation(languages, process_code):
current_directory = os.getcwd()
path_to_output_folder = os.path.join(current_directory, "output")
path_to_minutes_folder = os.path.join(path_to_output_folder, 'meeting-minutes')
path_to_translated_minutes_folder = os.path.join(path_to_output_folder, 'meeting-minutes-translated')
path_to_minute_file = [os.path.join(path_to_minutes_folder, file) for file in os.listdir(path_to_minutes_folder) if file == f"{process_code}.txt"][0]
for language in languages:
minutes = read_minute(path_to_minute_file)
model_name = language_2_mt_model_mappings[language]
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
translated = translate_minutes(minutes, tokenizer, model)
path_to_translated_file = os.path.join(path_to_translated_minutes_folder, f"translated_{language}_{process_code}.txt")
save_2_text(translated, path_to_translated_file)
def process_paraphrase(languages, process_code):
para_languages = list(language_2_para_model_mappings.keys())
for language in languages:
if language not in para_languages:
continue
current_directory = os.getcwd()
path_to_output_folder = os.path.join(current_directory, "output")
path_to_minutes_folder = os.path.join(path_to_output_folder, 'meeting-minutes')
path_to_translated_minutes_folder = os.path.join(path_to_output_folder, 'meeting-minutes-translated')
path_to_minute_file = [os.path.join(path_to_minutes_folder, file) for file in os.listdir(path_to_minutes_folder) if file == f"{process_code}.txt"][0]
path_to_translated_file = [os.path.join(path_to_translated_minutes_folder, file) for file in os.listdir(path_to_translated_minutes_folder) if file == f"translated_{language}_{process_code}.txt"][0]
minutes = read_minute(path_to_minute_file)
translated = read_minute(path_to_translated_file)
model_name = language_2_para_model_mappings[language]
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = MBartForConditionalGeneration.from_pretrained(model_name)
pred_df = pd.DataFrame({
'input_text': minutes,
'target_text': translated
})
# check if mt_prediction -> input length ratio is normal
pred_df["is_normal"] = pred_df.progress_apply(
lambda row: check_for_verbosity(row['input_text'], row['target_text']),
axis=1
)
not_normal_seq_index = pred_df.index[pred_df['is_normal'] == True].to_list()
columns = ["input_text", "target_text"]
pred_normal_df = pred_df[~pred_df.index.isin(not_normal_seq_index)][columns]
pred_not_normal_df = pred_df[pred_df.index.isin(not_normal_seq_index)][columns]
# apply paraphrase prompt
pred_not_normal_df["target_text"] = pred_not_normal_df.progress_apply(
lambda row: append_paraphrase_prompt(row['input_text'], row['target_text']),
axis=1
)
# temp folder to store files
path_to_temp_folder = os.path.join(current_directory, f"temp_{process_code}")
if not os.path.exists(path_to_temp_folder):
os.makedirs(path_to_temp_folder)
path_to_not_normal_file = os.path.join(path_to_temp_folder, "test_not_normal.csv")
path_to_normal_file = os.path.join(path_to_temp_folder, "test_normal.csv")
pred_not_normal_df.to_csv(path_to_not_normal_file, index=False)
pred_normal_df.to_csv(path_to_normal_file, index=False)
processed_raw_test_dataset = load_dataset('csv', data_files={"test": path_to_not_normal_file})
test_dataloader = DataLoader(processed_raw_test_dataset["test"], batch_size=1, num_workers=0)
predictions = []
for batch in tqdm(test_dataloader):
translated = model.generate(**tokenizer(batch['target_text'], return_tensors="pt", padding=True))
predictions.extend([tokenizer.decode(t, skip_special_tokens=True) for t in translated])
pred_not_normal_df["target_text"] = predictions
processed_pred_df = pd.concat([pred_normal_df, pred_not_normal_df]).sort_index()
shutil.rmtree(path_to_temp_folder)
processed_translated = processed_pred_df["target_text"].to_list()
save_2_text(processed_translated, path_to_translated_file)
def process_transcripts_paraphrase(languages, process_code):
para_languages = list(language_2_para_model_mappings.keys())
for language in languages:
if language not in para_languages:
continue
current_directory = os.getcwd()
path_to_output_folder = os.path.join(current_directory, "output")
path_to_transcript_folder = os.path.join(path_to_output_folder, 'processed-transcripts')
path_to_translated_transcripts_folder = os.path.join(path_to_output_folder, 'transcripts-translated')
path_to_transcripts_file = [os.path.join(path_to_transcript_folder, file) for file in os.listdir(path_to_transcript_folder) if file == f"{process_code}.txt"][0]
path_to_translated_file = [os.path.join(path_to_translated_transcripts_folder, file) for file in os.listdir(path_to_translated_transcripts_folder) if file == f"translated_{language}_{process_code}.txt"][0]
transcripts = read_minute(path_to_transcripts_file)
translated = read_minute(path_to_translated_file)
model_name = language_2_para_model_mappings[language]
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = MBartForConditionalGeneration.from_pretrained(model_name)
pred_df = pd.DataFrame({
'input_text': transcripts,
'target_text': translated
})
# check if mt_prediction -> input length ratio is normal
pred_df["is_normal"] = pred_df.progress_apply(
lambda row: check_for_verbosity(row['input_text'], row['target_text']),
axis=1
)
not_normal_seq_index = pred_df.index[pred_df['is_normal'] == True].to_list()
columns = ["input_text", "target_text"]
pred_normal_df = pred_df[~pred_df.index.isin(not_normal_seq_index)][columns]
pred_not_normal_df = pred_df[pred_df.index.isin(not_normal_seq_index)][columns]
# apply paraphrase prompt
pred_not_normal_df["target_text"] = pred_not_normal_df.progress_apply(
lambda row: append_paraphrase_prompt(row['input_text'], row['target_text']),
axis=1
)
# temp folder to store files
path_to_temp_folder = os.path.join(current_directory, f"temp_{process_code}")
if not os.path.exists(path_to_temp_folder):
os.makedirs(path_to_temp_folder)
path_to_not_normal_file = os.path.join(path_to_temp_folder, "test_not_normal.csv")
path_to_normal_file = os.path.join(path_to_temp_folder, "test_normal.csv")
pred_not_normal_df.to_csv(path_to_not_normal_file, index=False)
pred_normal_df.to_csv(path_to_normal_file, index=False)
processed_raw_test_dataset = load_dataset('csv', data_files={"test": path_to_not_normal_file})
test_dataloader = DataLoader(processed_raw_test_dataset["test"], batch_size=1, num_workers=0)
predictions = []
for batch in tqdm(test_dataloader):
translated = model.generate(**tokenizer(batch['target_text'], return_tensors="pt", padding=True))
predictions.extend([tokenizer.decode(t, skip_special_tokens=True) for t in translated])
pred_not_normal_df["target_text"] = predictions
processed_pred_df = pd.concat([pred_normal_df, pred_not_normal_df]).sort_index()
shutil.rmtree(path_to_temp_folder)
processed_translated = processed_pred_df["target_text"].to_list()
save_2_text(processed_translated, path_to_translated_file)
def generate_translated_document(languages, process_code, process_type="min"):
if process_type == "min":
# generate translation
process_translation(languages=languages, process_code=process_code)
# rephrase translation for isometric
process_paraphrase(languages=languages, process_code=process_code)
elif process_type == "trans":
# generate translation
process_transcripts_translation(languages=languages, process_code=process_code)
# rephrase translation for isometric
process_transcripts_paraphrase(languages=languages, process_code=process_code)
```
#### File: aakash0017/DeepCon-Server/mailing_module.py
```python
import random, string
import emails
import os
EMAIL_USER = os.getenv("EMAIL_USER")
EMAIL_USER_PASSWORD = os.getenv("EMAIL_USER_PASSWORD")
def generate_process_code():
x = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(16))
return x
def generate_mail(transcript_link, translated_link, minutes_link,user_name="user", process_code=""):
# process_code = generate_process_code()
SUBJECT = "[ALERT] DeepCon Processing Complete."
TEXT = f"""Hello {user_name}, We're glad that you've chosen DeepCon. your processing of request is completed and you can download files from our website. Please find the information below: \n
1. Your Request Number: {process_code}\n
2. Transcript Link: {transcript_link}\n
3. Minutes Link: {minutes_link}\n
4. Translated Minuted Link: {translated_link}"""
return SUBJECT, TEXT
def send_email(receivers_name, process_code, receiver_email, sender, minutes_link, translated_link, transcript_link):
print(receivers_name)
print(receiver_email)
print(sender)
subject, text = generate_mail(
transcript_link=transcript_link,
translated_link= translated_link,
minutes_link= minutes_link,
user_name = receivers_name,
process_code= process_code)
message = emails.html(
text=text,
subject=subject,
mail_from=sender,
)
r = message.send(
to=receiver_email,
smtp={
"host": "email-smtp.ap-south-1.amazonaws.com",
"port": 587,
"timeout": 5,
"user": EMAIL_USER,
"password": <PASSWORD>,
"tls": True,
}
)
return r
``` |
{
"source": "Aakash10399/ebBiller",
"score": 2
} |
#### File: Aakash10399/ebBiller/main.py
```python
from flask import Flask, render_template, request, url_for, flash, redirect, session, send_file
import pyodbc
app = Flask(__name__)
@app.route('/')
def index():
try:
id_arg = request.args.get('id',type=str)
mobile = request.args.get('mobile',type=str)
server = ''
database = ''
username = ''
password = ''
driver= '{ODBC Driver 13 for SQL Server}'
cnxn = pyodbc.connect('DRIVER='+driver+';PORT=1433;SERVER='+server+';PORT=1443;DATABASE='+database+';UID='+username+';PWD='+ password)
cursor = cnxn.cursor()
cursor.execute("SELECT * FROM billTable WHERE id="+id_arg)
row = cursor.fetchone()
GSTIN = str(row[1])
products = row[2].split(";")
sgst = row[3].split(";")
cgst = row[4].split(";")
price = row[5].split(";")
quantity = row[6].split(";")
mobile_sql = str(row[7])
prod_f = []
price_f = []
cgst_f = []
sgst_f = []
quantity_f = []
if mobile==mobile_sql:
for i in range(0,len(quantity)):
if quantity[i]!=0:
prod_f.append(products[i])
cgst_f.append(cgst[i])
sgst_f.append(sgst[i])
temp = float(price[i]) + ((float(cgst[i])/100.0)*float(price[i])) + ((float(sgst[i])/100.0)*float(price[i]))
price_f.append(temp*float(quantity[i]))
quantity_f.append(quantity[i])
return render_template("index.html",len=len(prod_f),GSTIN=GSTIN,prod_f=prod_f,cgst_f=cgst_f,sgst_f=sgst_f,price_f=price_f,quantity_f=quantity_f)
else:
return "error"
except Exception as e:
return str(e)
if __name__ == '__main__':
app.run()
``` |
{
"source": "Aakash10399/simple-health-glucheck",
"score": 2
} |
#### File: kivy/lib/mtdev.py
```python
import os
from ctypes import cdll, Structure, c_ulong, c_int, c_ushort, \
c_void_p, pointer, POINTER, byref
# load library
if 'KIVY_DOC' not in os.environ:
libmtdev = cdll.LoadLibrary('libmtdev.so.1')
# from linux/input.h
MTDEV_CODE_SLOT = 0x2f # MT slot being modified
MTDEV_CODE_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse
MTDEV_CODE_TOUCH_MINOR = 0x31 # Minor axis (omit if circular)
MTDEV_CODE_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse
MTDEV_CODE_WIDTH_MINOR = 0x33 # Minor axis (omit if circular)
MTDEV_CODE_ORIENTATION = 0x34 # Ellipse orientation
MTDEV_CODE_POSITION_X = 0x35 # Center X ellipse position
MTDEV_CODE_POSITION_Y = 0x36 # Center Y ellipse position
MTDEV_CODE_TOOL_TYPE = 0x37 # Type of touching device
MTDEV_CODE_BLOB_ID = 0x38 # Group a set of packets as a blob
MTDEV_CODE_TRACKING_ID = 0x39 # Unique ID of initiated contact
MTDEV_CODE_PRESSURE = 0x3a # Pressure on contact area
MTDEV_CODE_ABS_X = 0x00
MTDEV_CODE_ABS_Y = 0x01
MTDEV_CODE_ABS_Z = 0x02
MTDEV_CODE_BTN_DIGI = 0x140
MTDEV_CODE_BTN_TOOL_PEN = 0x140
MTDEV_CODE_BTN_TOOL_RUBBER = 0x141
MTDEV_CODE_BTN_TOOL_BRUSH = 0x142
MTDEV_CODE_BTN_TOOL_PENCIL = 0x143
MTDEV_CODE_BTN_TOOL_AIRBRUSH = 0x144
MTDEV_CODE_BTN_TOOL_FINGER = 0x145
MTDEV_CODE_BTN_TOOL_MOUSE = 0x146
MTDEV_CODE_BTN_TOOL_LENS = 0x147
MTDEV_CODE_BTN_TOUCH = 0x14a
MTDEV_CODE_BTN_STYLUS = 0x14b
MTDEV_CODE_BTN_STYLUS2 = 0x14c
MTDEV_CODE_BTN_TOOL_DOUBLETAP = 0x14d
MTDEV_CODE_BTN_TOOL_TRIPLETAP = 0x14e
MTDEV_CODE_BTN_TOOL_QUADTAP = 0x14f # Four fingers on trackpad
MTDEV_TYPE_EV_ABS = 0x03
MTDEV_TYPE_EV_SYN = 0x00
MTDEV_TYPE_EV_KEY = 0x01
MTDEV_TYPE_EV_REL = 0x02
MTDEV_TYPE_EV_ABS = 0x03
MTDEV_TYPE_EV_MSC = 0x04
MTDEV_TYPE_EV_SW = 0x05
MTDEV_TYPE_EV_LED = 0x11
MTDEV_TYPE_EV_SND = 0x12
MTDEV_TYPE_EV_REP = 0x14
MTDEV_TYPE_EV_FF = 0x15
MTDEV_TYPE_EV_PWR = 0x16
MTDEV_TYPE_EV_FF_STATUS = 0x17
MTDEV_ABS_TRACKING_ID = 9
MTDEV_ABS_POSITION_X = 5
MTDEV_ABS_POSITION_Y = 6
MTDEV_ABS_TOUCH_MAJOR = 0
MTDEV_ABS_TOUCH_MINOR = 1
MTDEV_ABS_WIDTH_MAJOR = 2
MTDEV_ABS_WIDTH_MINOR = 3
MTDEV_ABS_ORIENTATION = 4
MTDEV_ABS_SIZE = 11
class timeval(Structure):
_fields_ = [
('tv_sec', c_ulong),
('tv_usec', c_ulong)
]
class input_event(Structure):
_fields_ = [
('time', timeval),
('type', c_ushort),
('code', c_ushort),
('value', c_int)
]
class input_absinfo(Structure):
_fields_ = [
('value', c_int),
('minimum', c_int),
('maximum', c_int),
('fuzz', c_int),
('flat', c_int),
('resolution', c_int)
]
class mtdev_caps(Structure):
_fields_ = [
('has_mtdata', c_int),
('has_slot', c_int),
('has_abs', c_int * MTDEV_ABS_SIZE),
('slot', input_absinfo),
('abs', input_absinfo * MTDEV_ABS_SIZE)
]
class mtdev(Structure):
_fields_ = [
('caps', mtdev_caps),
('state', c_void_p)
]
# binding
if 'KIVY_DOC' not in os.environ:
mtdev_open = libmtdev.mtdev_open
mtdev_open.argtypes = [POINTER(mtdev), c_int]
mtdev_get = libmtdev.mtdev_get
mtdev_get.argtypes = [POINTER(mtdev), c_int, POINTER(input_event), c_int]
mtdev_idle = libmtdev.mtdev_idle
mtdev_idle.argtypes = [POINTER(mtdev), c_int, c_int]
mtdev_close = libmtdev.mtdev_close
mtdev_close.argtypes = [POINTER(mtdev)]
class Device:
def __init__(self, filename):
self._filename = filename
self._fd = -1
self._device = mtdev()
self._fd = os.open(filename, os.O_NONBLOCK | os.O_RDONLY)
ret = mtdev_open(pointer(self._device), self._fd)
if ret != 0:
os.close(self._fd)
self._fd = -1
raise Exception('Unable to open device')
def close(self):
'''Close the mtdev converter
'''
if self._fd == -1:
return
mtdev_close(POINTER(self._device))
os.close(self._fd)
self._fd = -1
def idle(self, ms):
'''Check state of kernel device
:Parameters:
`ms`: int
Number of milliseconds to wait for activity
:Return:
Return True if the device is idle, i.e, there are no fetched events
in the pipe and there is nothing to fetch from the device.
'''
if self._fd == -1:
raise Exception('Device closed')
return bool(mtdev_idle(pointer(self._device), self._fd, ms))
def get(self):
if self._fd == -1:
raise Exception('Device closed')
ev = input_event()
if mtdev_get(pointer(self._device), self._fd, byref(ev), 1) <= 0:
return None
return ev
def has_mtdata(self):
'''Return True if the device has multitouch data.
'''
if self._fd == -1:
raise Exception('Device closed')
return bool(self._device.caps.has_mtdata)
def has_slot(self):
'''Return True if the device has slot information.
'''
if self._fd == -1:
raise Exception('Device closed')
return bool(self._device.caps.has_slot)
def has_abs(self, index):
'''Return True if the device has abs data.
:Parameters:
`index`: int
One of const starting with a name ABS_MT_
'''
if self._fd == -1:
raise Exception('Device closed')
if index < 0 or index >= MTDEV_ABS_SIZE:
raise IndexError('Invalid index')
return bool(self._device.caps.has_abs[index])
def get_max_abs(self):
'''Return the maximum number of abs information available.
'''
return MTDEV_ABS_SIZE
def get_slot(self):
'''Return the slot data.
'''
if self._fd == -1:
raise Exception('Device closed')
if self._device.caps.has_slot == 0:
return
return self._device.caps.slot
def get_abs(self, index):
'''Return the abs data.
:Parameters:
`index`: int
One of const starting with a name ABS_MT_
'''
if self._fd == -1:
raise Exception('Device closed')
if index < 0 or index >= MTDEV_ABS_SIZE:
raise IndexError('Invalid index')
return self._device.caps.abs[index]
```
#### File: kivy/storage/jsonstore.py
```python
__all__ = ('JsonStore', )
from os.path import exists
from kivy.compat import iteritems
from kivy.storage import AbstractStore
from json import loads, dump
class JsonStore(AbstractStore):
'''Store implementation using a json file for storing the key-value pairs.
See the :mod:`kivy.storage` module documentation for more information.
'''
def __init__(self, filename, indent=None, sort_keys=False, **kwargs):
self.filename = filename
self.indent = indent
self.sort_keys = sort_keys
self._data = {}
self._is_changed = True
super(JsonStore, self).__init__(**kwargs)
def store_load(self):
if not exists(self.filename):
return
with open(self.filename) as fd:
data = fd.read()
if len(data) == 0:
return
self._data = loads(data)
def store_sync(self):
if not self._is_changed:
return
with open(self.filename, 'w') as fd:
dump(self._data, fd, indent=self.indent, sort_keys=self.sort_keys)
self._is_changed = False
def store_exists(self, key):
return key in self._data
def store_get(self, key):
return self._data[key]
def store_put(self, key, value):
self._data[key] = value
self._is_changed = True
return True
def store_delete(self, key):
del self._data[key]
self._is_changed = True
return True
def store_find(self, filters):
for key, values in iteritems(self._data):
found = True
for fkey, fvalue in iteritems(filters):
if fkey not in values:
found = False
break
if values[fkey] != fvalue:
found = False
break
if found:
yield key, values
def store_count(self):
return len(self._data)
def store_keys(self):
return self._data.keys()
```
#### File: uix/behaviors/drag.py
```python
__all__ = ('DragBehavior', )
from kivy.clock import Clock
from kivy.properties import NumericProperty, ReferenceListProperty
from kivy.config import Config
from kivy.metrics import sp
from functools import partial
# When we are generating documentation, Config doesn't exist
_scroll_timeout = _scroll_distance = 0
if Config:
_scroll_timeout = Config.getint('widgets', 'scroll_timeout')
_scroll_distance = Config.getint('widgets', 'scroll_distance')
class DragBehavior(object):
'''
The DragBehavior `mixin <https://en.wikipedia.org/wiki/Mixin>`_ provides
Drag behavior. When combined with a widget, dragging in the rectangle
defined by :attr:`drag_rectangle` will drag the widget. Please see
the :mod:`drag behaviors module <kivy.uix.behaviors.drag>` documentation
for more information.
.. versionadded:: 1.8.0
'''
drag_distance = NumericProperty(_scroll_distance)
'''Distance to move before dragging the :class:`DragBehavior`, in pixels.
As soon as the distance has been traveled, the :class:`DragBehavior` will
start to drag, and no touch event will be dispatched to the children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`drag_distance` is a :class:`~kivy.properties.NumericProperty` and
defaults to the `scroll_distance` as defined in the user
:class:`~kivy.config.Config` (20 pixels by default).
'''
drag_timeout = NumericProperty(_scroll_timeout)
'''Timeout allowed to trigger the :attr:`drag_distance`, in milliseconds.
If the user has not moved :attr:`drag_distance` within the timeout,
dragging will be disabled, and the touch event will be dispatched to the
children.
:attr:`drag_timeout` is a :class:`~kivy.properties.NumericProperty` and
defaults to the `scroll_timeout` as defined in the user
:class:`~kivy.config.Config` (55 milliseconds by default).
'''
drag_rect_x = NumericProperty(0)
'''X position of the axis aligned bounding rectangle where dragging
is allowed (in window coordinates).
:attr:`drag_rect_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
drag_rect_y = NumericProperty(0)
'''Y position of the axis aligned bounding rectangle where dragging
is allowed (in window coordinates).
:attr:`drag_rect_Y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
drag_rect_width = NumericProperty(100)
'''Width of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 100.
'''
drag_rect_height = NumericProperty(100)
'''Height of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 100.
'''
drag_rectangle = ReferenceListProperty(drag_rect_x, drag_rect_y,
drag_rect_width, drag_rect_height)
'''Position and size of the axis aligned bounding rectangle where dragging
is allowed.
:attr:`drag_rectangle` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`drag_rect_x`, :attr:`drag_rect_y`, :attr:`drag_rect_width`,
:attr:`drag_rect_height`) properties.
'''
def __init__(self, **kwargs):
self._drag_touch = None
super(DragBehavior, self).__init__(**kwargs)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
xx, yy, w, h = self.drag_rectangle
x, y = touch.pos
if not self.collide_point(x, y):
touch.ud[self._get_uid('svavoid')] = True
return super(DragBehavior, self).on_touch_down(touch)
if self._drag_touch or ('button' in touch.profile and
touch.button.startswith('scroll')) or\
not ((xx < x <= xx + w) and (yy < y <= yy + h)):
return super(DragBehavior, self).on_touch_down(touch)
# no mouse scrolling, so the user is going to drag with this touch.
self._drag_touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'dx': 0,
'dy': 0}
Clock.schedule_once(self._change_touch_mode,
self.drag_timeout / 1000.)
return True
def on_touch_move(self, touch):
if self._get_uid('svavoid') in touch.ud or\
self._drag_touch is not touch:
return super(DragBehavior, self).on_touch_move(touch) or\
self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
uid = self._get_uid()
ud = touch.ud[uid]
mode = ud['mode']
if mode == 'unknown':
ud['dx'] += abs(touch.dx)
ud['dy'] += abs(touch.dy)
if ud['dx'] > sp(self.drag_distance):
mode = 'drag'
if ud['dy'] > sp(self.drag_distance):
mode = 'drag'
ud['mode'] = mode
if mode == 'drag':
self.x += touch.dx
self.y += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('svavoid') in touch.ud:
return super(DragBehavior, self).on_touch_up(touch)
if self._drag_touch and self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._drag_touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
super(DragBehavior, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
if self._drag_touch is not touch:
super(DragBehavior, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(DragBehavior, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(DragBehavior, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._drag_touch:
return
uid = self._get_uid()
touch = self._drag_touch
ud = touch.ud[uid]
if ud['mode'] != 'unknown':
return
touch.ungrab(self)
self._drag_touch = None
super(DragBehavior, self).on_touch_down(touch)
return
```
#### File: core/tests/test_arrayprint.py
```python
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_equal, assert_raises, assert_warns
)
import textwrap
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', 'i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]], dtype=[('a', '<i4')])"
)
def test_self_containing(self):
arr0d = np.array(None)
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
first = np.array(None)
second = np.array(None)
first[()] = second
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
arr1d = np.array([None, None])
arr1d[0] = [1, 2]
arr1d[1] = [3]
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
def test_void_scalar_recursion(self):
# gh-9345
repr(np.void(b'test')) # RecursionError ?
class TestComplexArray(object):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
'[0.+infj]', '[0.+infj]', '[0.+infj]',
'[0.-infj]', '[0.-infj]', '[0.-infj]',
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
'[1.+infj]', '[1.+infj]', '[1.+infj]',
'[1.-infj]', '[1.-infj]', '[1.-infj]',
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
'[inf+infj]', '[inf+infj]', '[inf+infj]',
'[inf-infj]', '[inf-infj]', '[inf-infj]',
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
'[nan+infj]', '[nan+infj]', '[nan+infj]',
'[nan-infj]', '[nan-infj]', '[nan-infj]',
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
for res, val in zip(actual, wanted):
assert_equal(res, val)
class TestArray2String(object):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
if sys.version_info[0] >= 3:
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
else:
x_hex = "[0x0L 0x1L 0x2L]"
x_oct = "[0L 01L 02L]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
# check for backcompat that using FloatFormat works and emits warning
with assert_warns(DeprecationWarning):
fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False)
assert_equal(np.array2string(x, formatter={'float_kind': fmt}),
'[0. 1. 2.]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
np.set_printoptions(legacy='1.13')
try:
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
('NaT',) ('NaT',) ('NaT',)]""")
)
finally:
np.set_printoptions(legacy=False)
# same again, but with non-legacy behavior
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ( 'NaT',)
( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',)]""")
)
# and again, with timedeltas
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
)
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
def test_unstructured_void_repr(self):
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
assert_equal(repr(a),
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
assert_equal(eval(repr(a), vars(np)), a)
assert_equal(eval(repr(a[0]), vars(np)), a[0])
def test_edgeitems_kwarg(self):
# previously the global print options would be taken over the kwarg
arr = np.zeros(3, int)
assert_equal(
np.array2string(arr, edgeitems=1, threshold=0),
"[0 ... 0]"
)
def test_summarize_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ... 998 999 1000]'
assert_equal(str(A), strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_equal(repr(A), reprA)
def test_summarize_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ... 498 499 500]\n' \
' [ 501 502 503 ... 999 1000 1001]]'
assert_equal(str(A), strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_equal(repr(A), reprA)
def test_linewidth(self):
a = np.full(6, 1)
def make_str(a, width, **kw):
return np.array2string(a, separator="", max_line_width=width, **kw)
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
' 11]')
assert_equal(make_str(a, 8), '[111111]')
assert_equal(make_str(a, 7), '[11111\n'
' 1]')
assert_equal(make_str(a, 5), '[111\n'
' 111]')
b = a[None,None,:]
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
' 1]]]')
assert_equal(make_str(b, 12), '[[[111111]]]')
assert_equal(make_str(b, 9), '[[[111\n'
' 111]]]')
assert_equal(make_str(b, 8), '[[[11\n'
' 11\n'
' 11]]]')
def test_wide_element(self):
a = np.array(['xxxxx'])
assert_equal(
np.array2string(a, max_line_width=5),
"['xxxxx']"
)
assert_equal(
np.array2string(a, max_line_width=5, legacy='1.13'),
"[ 'xxxxx']"
)
class TestPrintOptions(object):
"""Test getting and setting global print options."""
def setup(self):
self.oldopts = np.get_printoptions()
def teardown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([0., 1., 2.])")
def test_0d_arrays(self):
unicode = type(u'')
assert_equal(unicode(np.array(u'café', np.unicode_)), u'café')
if sys.version_info[0] >= 3:
assert_equal(repr(np.array('café', np.unicode_)),
"array('café', dtype='<U4')")
else:
assert_equal(repr(np.array(u'café', np.unicode_)),
"array(u'caf\\xe9', dtype='<U4')")
assert_equal(str(np.array('test', np.str_)), 'test')
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
assert_equal(str(a[0]), '([0, 0, 0],)')
assert_equal(repr(np.datetime64('2005-02-25')[...]),
"array('2005-02-25', dtype='datetime64[D]')")
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
"array(10, dtype='timedelta64[Y]')")
# repr of 0d arrays is affected by printoptions
x = np.array(1)
np.set_printoptions(formatter={'all':lambda x: "test"})
assert_equal(repr(x), "array(test)")
# str is unaffected
assert_equal(str(x), "1")
# check `style` arg raises
assert_warns(DeprecationWarning, np.array2string,
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
y = np.array([1., 2., -10.])
z = np.array([100., 2., -1.])
w = np.array([-100., 2., 1.])
assert_equal(repr(x), 'array([1., 2., 3.])')
assert_equal(repr(y), 'array([ 1., 2., -10.])')
assert_equal(repr(np.array(y[0])), 'array(1.)')
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
assert_equal(repr(z), 'array([100., 2., -1.])')
assert_equal(repr(w), 'array([-100., 2., 1.])')
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
x = np.array([np.inf, 100000, 1.1234])
y = np.array([np.inf, 100000, -1.1234])
z = np.array([np.inf, 1.1234, -1e120])
np.set_printoptions(precision=2)
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
def test_bool_spacing(self):
assert_equal(repr(np.array([True, True])),
'array([ True, True])')
assert_equal(repr(np.array([True, False])),
'array([ True, False])')
assert_equal(repr(np.array([True])),
'array([ True])')
assert_equal(repr(np.array(True)),
'array(True)')
assert_equal(repr(np.array(False)),
'array(False)')
def test_sign_spacing(self):
a = np.arange(4.)
b = np.array([1.234e9])
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array(1.)')
assert_equal(repr(b), 'array([1.234e+09])')
assert_equal(repr(np.array([0.])), 'array([0.])')
np.set_printoptions(sign=' ')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array( 1.)')
assert_equal(repr(b), 'array([ 1.234e+09])')
np.set_printoptions(sign='+')
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
assert_equal(repr(np.array(1.)), 'array(+1.)')
assert_equal(repr(b), 'array([+1.234e+09])')
np.set_printoptions(legacy='1.13')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(b), 'array([ 1.23400000e+09])')
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
assert_equal(repr(np.array(1.)), 'array(1.0)')
assert_equal(repr(np.array([0.])), 'array([ 0.])')
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
def test_float_overflow_nowarn(self):
# make sure internal computations in FloatingFormat don't
# warn about overflow
repr(np.array([1e4, 0.1], dtype='f2'))
def test_sign_spacing_structured(self):
a = np.ones(2, dtype='f,f')
assert_equal(repr(a),
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
assert_equal(repr(a[0]), "(1., 1.)")
def test_floatmode(self):
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
y = np.array([0.2918820979355541, 0.5064172631089138,
0.2848750619642916, 0.4342965294660567,
0.7326538397312751, 0.3459503329096204,
0.0862072768214508, 0.39112753029631175],
dtype=np.float64)
z = np.arange(6, dtype=np.float16)/10
# also make sure 1e23 is right (is between two fp numbers)
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
# note: we construct w from the strings `1eXX` instead of doing
# `10.**arange(24)` because it turns out the two are not equivalent in
# python. On some architectures `1e23 != 10.**23`.
wp = np.array([1.234e1, 1e2, 1e123])
# unique mode
np.set_printoptions(floatmode='unique')
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
" 0.0862072768214508 , 0.39112753029631175])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w),
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
" 1.e+24])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
# maxprec mode, precision=8
np.set_printoptions(floatmode='maxprec', precision=8)
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
# fixed mode, precision=4
np.set_printoptions(floatmode='fixed', precision=4)
assert_equal(repr(x),
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
" 0.2383, 0.4226], dtype=float16)")
assert_equal(repr(y),
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
assert_equal(repr(z),
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
# for larger precision, representation error becomes more apparent:
np.set_printoptions(floatmode='fixed', precision=8)
assert_equal(repr(z),
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
" 0.50000000], dtype=float16)")
# maxprec_equal mode, precision=8
np.set_printoptions(floatmode='maxprec_equal', precision=8)
assert_equal(repr(x),
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
assert_equal(repr(y),
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
def test_legacy_mode_scalars(self):
# in legacy mode, str of floats get truncated, and complex scalars
# use * for non-finite imaginary part
np.set_printoptions(legacy='1.13')
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
np.set_printoptions(legacy=False)
assert_equal(str(np.float64(1.123456789123456789)),
'1.1234567891234568')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
def test_legacy_stray_comma(self):
np.set_printoptions(legacy='1.13')
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
np.set_printoptions(legacy=False)
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
def test_dtype_linewidth_wrapping(self):
np.set_printoptions(linewidth=75)
assert_equal(repr(np.arange(10,20., dtype='f4')),
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
dtype=float32)"""))
styp = '<U4' if sys.version_info[0] >= 3 else '|S4'
assert_equal(repr(np.ones(3, dtype=styp)),
"array(['1', '1', '1'], dtype='{}')".format(styp))
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
dtype='{}')""".format(styp)))
def test_linewidth_repr(self):
a = np.full(7, fill_value=2)
np.set_printoptions(linewidth=17)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2])""")
)
np.set_printoptions(linewidth=17, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2, 2])""")
)
a = np.full(8, fill_value=2)
np.set_printoptions(linewidth=18, legacy=False)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2, 2])""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2, 2,
2, 2, 2, 2])""")
)
def test_linewidth_str(self):
a = np.full(18, fill_value=2)
np.set_printoptions(linewidth=18)
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2
2 2]""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2]""")
)
def test_edgeitems(self):
np.set_printoptions(edgeitems=1, threshold=1)
a = np.arange(27).reshape((3, 3, 3))
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
b = np.zeros((3, 3, 1, 1))
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[0.]],
...,
[[0.]]],
...,
[[[0.]],
...,
[[0.]]]])""")
)
# 1.13 had extra trailing spaces, and was missing newlines
np.set_printoptions(legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[ 0.]],
...,
[[ 0.]]],
...,
[[[ 0.]],
...,
[[ 0.]]]])""")
)
def test_unicode_object_array():
import sys
if sys.version_info[0] >= 3:
expected = "array(['é'], dtype=object)"
else:
expected = "array([u'\\xe9'], dtype=object)"
x = np.array([u'\xe9'], dtype=object)
assert_equal(repr(x), expected)
if __name__ == "__main__":
run_module_suite()
```
#### File: Aakash10399/simple-health-glucheck/main.py
```python
import numpy as np
import cv2
import kivy
from kivy.app import App
from kivy.uix.label import Label
from kivy.core.window import Window
from kivy.config import Config
from kivy.uix.widget import Widget
b_cfactor = 0.0722
g_cfactor = 0.7152
r_cfactor = 0.2126
b_0 = 0
g_0 = 0
r_0 = 0
img_0 = cv2.imread("0.jpg")
height,width,channels = img_0.shape
for i in range(0,height):
for j in range(0,width):
(b,g,r) = img_0[i,j]
b_0 = b_0 + b
g_0 = g_0 + g
r_0 = r_0 + r
b_0 = b_0/(height*width)
g_0 = g_0/(height*width)
r_0 = r_0/(height*width)
#print b_0
#print g_0
#print r_0
#print "*************************"
b_100 = 0
g_100 = 0
r_100 = 0
img_100 = cv2.imread("100.jpg")
height,width,channels = img_100.shape
for i in range(0,height):
for j in range(0,width):
(b,g,r) = img_100[i,j]
b_100 = b_100 + b
g_100 = g_100 + g
r_100 = r_100 + r
b_100 = b_100/(height*width)
g_100 = g_100/(height*width)
r_100 = r_100/(height*width)
#print b_100
#print g_100
#print r_100
#print "*************************"
b_300 = 0
g_300 = 0
r_300 = 0
img_300 = cv2.imread("300.jpg")
height,width,channels = img_300.shape
for i in range(0,height):
for j in range(0,width):
(b,g,r) = img_300[i,j]
b_300 = b_300 + b
g_300 = g_300 + g
r_300 = r_300 + r
b_300 = b_300/(height*width)
g_300 = g_300/(height*width)
r_300 = r_300/(height*width)
#print b_300
#print g_300
#print r_300
#print "*************************"
b_1000 = 0
g_1000 = 0
r_1000 = 0
img_1000 = cv2.imread("1000.jpg")
height,width,channels = img_1000.shape
for i in range(0,height):
for j in range(0,width):
(b,g,r) = img_1000[i,j]
b_1000 = b_1000 + b
g_1000 = g_1000 + g
r_1000 = r_1000 + r
b_1000 = b_1000/(height*width)
g_1000 = g_1000/(height*width)
r_1000 = r_1000/(height*width)
#print b_1000
#print g_1000
#print r_1000
#print "*************************"
b_3000 = 0
g_3000 = 0
r_3000 = 0
img_3000 = cv2.imread("3000.jpg")
height,width,channels = img_3000.shape
for i in range(0,height):
for j in range(0,width):
(b,g,r) = img_3000[i,j]
b_3000 = b_3000 + b
g_3000 = g_3000 + g
r_3000 = r_3000 + r
b_3000 = b_3000/(height*width)
g_3000 = g_3000/(height*width)
r_3000 = r_3000/(height*width)
#print b_3000
#print g_3000
#print r_3000
#print "*************************"
cam = cv2.VideoCapture(0)
while True:
if cam.grab():
flag, frame = cam.retrieve()
if not flag:
continue
else:
cv2.imshow('INITIAL', frame)
gray_counter = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_counter2 = cv2.bitwise_not(cv2.Canny(gray_counter,100,200))
ret,thresh = cv2.threshold(gray_counter2,127,255,1)
_,contours,_ = cv2.findContours(thresh,1,2)
if len(contours)==1:
captured_frame = frame
cv2.destroyAllWindows()
break
key = cv2.waitKey(1) & 0xFF
if key == ord("c"):
captured_frame = frame
cv2.destroyAllWindows()
break
#captured_frame = cv2.imread("test.jpg") #testing
canny = cv2.bitwise_not(cv2.Canny(captured_frame,100,200))
height,width = canny.shape
count = 0
centroid_x = 0
centroid_y = 0
for i in range(0,height):
for j in range(0,width):
if canny[i,j] == 0:
count = count + 1
centroid_x = centroid_x + i
centroid_y = centroid_y + j
centroid_x = centroid_x / count
centroid_y = centroid_y / count
#print str(centroid_x) + " , " + str(centroid_y)
#print str(height) + " , " + str(width)
#cv2.imshow("Captured Frame",captured_frame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
b_var,g_var,r_var = captured_frame[centroid_x,centroid_y]
#print str(r_var) + " , " + str(g_var) + " , " + str(b_var)
x0 = (b_cfactor*b_0)+(g_cfactor*g_0)+(r_cfactor*r_0)
y0 = 0
x1 = (b_cfactor*b_100)+(g_cfactor*g_100)+(r_cfactor*r_100)
y1 = 100
x2 = (b_cfactor*b_300)+(g_cfactor*g_300)+(r_cfactor*r_300)
y2 = 300
x3 = (b_cfactor*b_1000)+(g_cfactor*g_1000)+(r_cfactor*r_1000)
y3 = 1000
x4 = (b_cfactor*b_3000)+(g_cfactor*g_3000)+(r_cfactor*r_3000)
y4 = 3000
x_tofind = (b_cfactor*b_var)+(g_cfactor*g_var)+(r_cfactor*r_var)
term1 = (((x_tofind-x1)*(x_tofind-x2)*(x_tofind-x3)*(x_tofind-x4)*y0)/((x0-x1)*(x0-x2)*(x0-x3)*(x0-x4)))
term2 = (((x_tofind-x0)*(x_tofind-x2)*(x_tofind-x3)*(x_tofind-x4)*y1)/((x1-x0)*(x1-x2)*(x1-x3)*(x1-x4)))
term3 = (((x_tofind-x0)*(x_tofind-x1)*(x_tofind-x3)*(x_tofind-x4)*y2)/((x2-x0)*(x2-x1)*(x2-x3)*(x2-x4)))
term4 = (((x_tofind-x0)*(x_tofind-x1)*(x_tofind-x2)*(x_tofind-x4)*y3)/((x3-x0)*(x3-x1)*(x3-x2)*(x3-x4)))
term5 = (((x_tofind-x0)*(x_tofind-x1)*(x_tofind-x2)*(x_tofind-x3)*y4)/((x4-x0)*(x4-x1)*(x4-x2)*(x4-x3)))
y_tofind = term1 + term2 + term3 + term4 + term5
#print y_tofind
#app = tk.Tk()
#text1 = tk.Label(app,text="Glucose Test Level",font=("Helvetica", 16))
#text1.pack()
#text2 = tk.Label(app,text=str(y_tofind)+" mg/dL",font=("Helvetica", 16))
#text2.pack()
#app.minsize(500,80)
#app.geometry("500x80")
#app.mainloop()
Config.set('graphics', 'width', '500')
Config.set('graphics', 'height', '80')
Config.write()
class Layout(Widget):
def draw(self):
with self.canvas:
Label(text="Glucose Test Level",font=("Helvetica", 16),pos=(200,0))
Label(text=str(y_tofind)+" mg/dL",font=("Helvetica", 16),pos=(200,-20))
class GlucoseLevel(App):
def build(self):
app = Layout()
app.draw()
return app
GlucoseLevel().run()
``` |
{
"source": "Aakash10975/Kafka_Avro",
"score": 3
} |
#### File: Aakash10975/Kafka_Avro/consume_records.py
```python
from confluent_kafka.avro import AvroConsumer
from argparse import ArgumentParser
def parse_cmd_line_args():
arg_parser = ArgumentParser()
arg_parser.add_argument("--topic", required=True, help="Topic Name")
arg_parser.add_argument("--bootstrap-servers", required=False, default="localhost:9092", help="Bootstrap server address")
arg_parser.add_argument("--schema-registry", required=False, default="http://localhost:8081", help="Schema Registry URL")
arg_parser.add_argument("--schema-file", required=False, help="File name of Avro schema to use")
arg_parser.add_argument("--record-key", required=False, type=str, help="Record key. If not provided, will be a random UUID")
arg_parser.add_argument("--record-value", required=False, help="Record value")
return arg_parser.parse_args()
def consume_record(args):
default_group_name = "default-consumer-group"
consumer_config = {"bootstrap.servers": args.bootstrap_servers,
"schema.registry.url": args.schema_registry,
"group.id": default_group_name,
"auto.offset.reset": "earliest"}
consumer = AvroConsumer(consumer_config)
consumer.subscribe([args.topic])
try:
message = consumer.poll(5)
except Exception as e:
print("Exception while trying to poll messages - {}".format(e))
else:
if message:
print("Successfully poll a record from Kafka topic: {}, partition: {}, \
offset: {}\nmessage key: {} || message value: {}"\
.format(message.topic(), message.partition(), message.offset(),
message.key(), message.value()))
consumer.commit()
else:
print("No new messages at this point. Try again later.")
consumer.close()
if __name__ == "__main__":
consume_record(parse_cmd_line_args())
```
#### File: Aakash10975/Kafka_Avro/send_records.py
```python
import json, uuid, ast
from confluent_kafka.avro import AvroProducer
from confluent_kafka import avro
from argparse import ArgumentParser
# from util.load_avro_schema_from_file import load_avro_schema_from_file
# from util.parse_command_line_args import parse_cmd_line_args
def load_avro_schema_from_file(schema_file):
key_schema_string = """
{"type": "string"}
"""
key_schema = avro.loads(key_schema_string)
value_schema = avro.load("./avro/" + schema_file)
return key_schema, value_schema
def parse_cmd_line_args():
arg_parser = ArgumentParser()
arg_parser.add_argument("--topic", required=True, help="Topic Name")
arg_parser.add_argument("--bootstrap-servers", required=False, default="localhost:9092", help="Bootstrap server address")
arg_parser.add_argument("--schema-registry", required=False, default="http://localhost:8081", help="Schema Registry URL")
arg_parser.add_argument("--schema-file", required=False, help="File name of Avro schema to use")
arg_parser.add_argument("--record-key", required=False, type=str, help="Record key. If not provided, will be a random UUID")
arg_parser.add_argument("--record-value", required=False, help="Record value")
return arg_parser.parse_args()
def send_records(args):
if args.record_value is None:
raise AttributeError("--record-value is not provided.")
if args.schema_file is None:
raise AttributeError("--schema-file is not provided.")
key_schema, value_schema = load_avro_schema_from_file(args.schema_file)
producer_config = { "bootstrap.servers": args.bootstrap_servers,
"schema.registry.url": args.schema_registry}
# producer = AvroProducer(producer_config, default_key_schema=key_schema, default_value_schema=value_schema)
producer = AvroProducer(producer_config)
key = args.record_key if args.record_key else str(uuid.uuid4())
# value = json.loads(args.record_value)
value = ast.literal_eval(args.record_value)
try:
producer.produce(topic=args.topic, key=key, value=value)
except Exception as e:
print("Exception while producing record key = {} value = {} to topic = {}: {}"\
.format(key, value, args.topic, e))
else:
print("Successfully producing record value - {} to topic - {}"\
.format(value, args.topic))
producer.flush()
if __name__ == "__main__":
send_records(parse_cmd_line_args())
``` |
{
"source": "Aakash1312/appleseed",
"score": 2
} |
#### File: many light sampling/generators/grid_point_lights.py
```python
from __future__ import print_function
import colorsys
import math
import random
import signal
import sys
import time
import threading
import os
import numpy as np
import appleseed as asr
# Initial parameters for generating grid light scene
grid_lights_count = 20
color = "white"
plane_size = 100
output_scene_name = "{0}x{0}_{1}_point_lights".format(grid_lights_count, color)
def build_project():
# Create an empty project.
project = asr.Project("grid-point-lights-generator")
paths = project.get_search_paths()
paths.append("data")
project.set_search_paths(paths)
# Add default configurations to the project.
project.add_default_configurations()
# Set the number of samples. This is basically the quality parameter: the higher the number
# of samples, the smoother the image but the longer the rendering time.
# todo: fix.
conf = project.configurations()["final"]
conf.insert_path("uniform_pixel_renderer.samples", 1)
# Create a scene.
scene = asr.Scene()
# Create an assembly.
assembly = asr.Assembly("assembly")
# Prepare the orientation of all the objects in the scene.
orientation = asr.Matrix4d.make_rotation(asr.Vector3d(1.0, 0.0, 0.0), math.radians(-90.0))
#------------------------------------------------------------------------
# Materials
#------------------------------------------------------------------------
# Create a material called "01 - Default_mat" and insert it into the assembly.
assembly.materials().insert(asr.Material(
"disney_material",
"01 - Default_mat",
{
"alpha_map": "1",
"layer1": {
"anisotropic": "0",
"base_color": "[1, 1, 1]",
"clearcoat": "0",
"clearcoat_gloss": "0",
"layer_name": "layer1",
"layer_number": "0",
"mask": "1.0",
"metallic": "0",
"roughness": "1",
"sheen": "0",
"sheen_tint": "0",
"specular": "0",
"specular_tint": "0",
"subsurface": "0.0"
}
}))
#------------------------------------------------------------------------
# Geometry
#------------------------------------------------------------------------
# Load the scene geometry from disk.
objects = asr.MeshObjectReader.read(project.get_search_paths(), "plane", {"filename": "Plane001.binarymesh"})
# Insert all the objects into the assembly.
for object in objects:
# Create an instance of this object and insert it into the assembly.
instance_name = object.get_name() + "_inst"
material_name = {"material_slot_0": "01 - Default_mat"}
mat = orientation * asr.Matrix4d.make_translation(asr.Vector3d(0.0, 0.0, 0.0))
instance = asr.ObjectInstance(
instance_name,
{"visibility":
{
"camera": "true",
"diffuse": "true",
"glossy": "true",
"light": "true",
"probe": "true",
"shadow": "true",
"specular": "true",
"subsurface": "true",
"transparency": "true"
}},
object.get_name(),
asr.Transformd(mat),
material_name,
material_name)
assembly.object_instances().insert(instance)
# Insert this object into the scene.
assembly.objects().insert(object)
#------------------------------------------------------------------------
# Lights
#------------------------------------------------------------------------
light_z_distance = 1.0
if color == "white":
assembly.colors().insert(asr.ColorEntity("white",
{
"color_space": "linear_rgb",
"multiplier": 1.0
},
[1.0, 1.0, 1.0]))
step = float(plane_size) / grid_lights_count
light_count = 0
grid_range = np.linspace(-plane_size / 2 + step, plane_size / 2 - step, grid_lights_count)
for j in grid_range:
for i in grid_range:
# Create a point light called "light" and insert it into the assembly.
light_name = "light_" + str(light_count)
light_count = light_count + 1
light = asr.Light("point_light", light_name, {
"intensity": "white",
"intensity_multiplier": "3"
})
light_position = asr.Vector3d(i, j, light_z_distance)
mat = orientation * asr.Matrix4d.make_translation(light_position)
light.set_transform(asr.Transformd(mat))
assembly.lights().insert(light)
elif color == "mix":
for i in xrange(0, grid_lights_count * grid_lights_count):
s = random.uniform(0, 1)
if s < 0.65:
ran = random.gauss(1, 0.01)
elif s < 0.9:
ran = random.gauss(0.3, 0.1)
else:
ran = random.gauss(0.7, 0.01)
random_color = list(colorsys.hls_to_rgb(ran, 0.5, 1.0))
assembly.colors().insert(asr.ColorEntity("color_" + str(i),
{
"color_space": "linear_rgb",
"multiplier": 1.0
},
random_color))
step = float(plane_size) / grid_lights_count
light_count = 0
grid_range = np.linspace(-plane_size / 2 + step, plane_size / 2 - step, grid_lights_count)
for j in grid_range:
for i in grid_range:
# Create a point light called "light" and insert it into the assembly.
light_name = "light_" + str(light_count)
color_name = "color_" + str(light_count)
light_count = light_count + 1
light = asr.Light("point_light", light_name, {
"intensity": color_name,
"intensity_multiplier": "3"
})
light_position = asr.Vector3d(i, j, light_z_distance)
mat = orientation * asr.Matrix4d.make_translation(light_position)
light.set_transform(asr.Transformd(mat))
assembly.lights().insert(light)
else:
print("Unknown color: {0}".format(color))
return
#------------------------------------------------------------------------
# Assembly instance
#------------------------------------------------------------------------
# Create an instance of the assembly and insert it into the scene.
assembly_inst = asr.AssemblyInstance("assembly_inst", {}, assembly.get_name())
assembly_inst.transform_sequence().set_transform(0.0, asr.Transformd(asr.Matrix4d.identity()))
scene.assembly_instances().insert(assembly_inst)
# Insert the assembly into the scene.
scene.assemblies().insert(assembly)
#------------------------------------------------------------------------
# Environment
#------------------------------------------------------------------------
# Create an environment called "env" and bind it to the scene.
scene.set_environment(asr.Environment("env", {}))
#------------------------------------------------------------------------
# Camera
#------------------------------------------------------------------------
# Create an orthographic camera.
params = {
"controller_target": "0 0 0",
"film_dimensions": "128 128",
"near_z": "-0.1",
"shutter_close_time": "1.0",
"shutter_open_time": "0.0"
}
camera = asr.Camera("orthographic_camera", "camera", params)
# Place and orient the camera.
mat = orientation * asr.Matrix4d.make_translation(asr.Vector3d(0.0, 0.0, 0.0))
camera.transform_sequence().set_transform(0.0, asr.Transformd(mat))
# Bind the camera to the scene.
scene.cameras().insert(camera)
#------------------------------------------------------------------------
# Frame
#------------------------------------------------------------------------
# Create a frame and bind it to the project.
params = {
"camera": "camera",
"clamping": "false",
"color_space": "srgb",
"filter": "box",
"filter_size": "0.5",
"gamma_correction": "1.0",
"pixel_format": "float",
"premultiplied_alpha": "true",
"resolution": "512 512",
"tile_size": "64 64"}
project.set_frame(asr.Frame("beauty", params))
# Bind the scene to the project.
project.set_scene(scene)
return project
def main():
# Build the project.
project = build_project()
# Save the project to disk.
asr.ProjectFileWriter().write(project, output_scene_name + ".appleseed")
if __name__ == "__main__":
main()
```
#### File: appleseed.python/test/testentitymap.py
```python
import unittest
import appleseed as asr
class TestEntityMap(unittest.TestCase):
"""
Basic entity map tests.
"""
def setUp(self):
self.scn = asr.Scene()
self.assembly_map = self.scn.assemblies()
def test_get_by_name(self):
ass = asr.Assembly("assembly", {})
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly", {})
self.assembly_map.insert(ass)
a = self.assembly_map.get_by_name("assembly")
self.assertEqual(a.get_name(), "assembly")
a = self.assembly_map.get_by_name("another_assembly")
self.assertEqual(a.get_name(), "another_assembly")
a = self.assembly_map.get_by_name("no_such_assembly")
self.assertEqual(a, None)
def test_get_by_uuid(self):
ass = asr.Assembly("assembly")
uid1 = ass.get_uid()
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly")
uid2 = ass.get_uid()
self.assembly_map.insert(ass)
a = self.assembly_map.get_by_uid(uid1)
self.assertEqual(a.get_name(), "assembly")
a = self.assembly_map.get_by_uid(uid2)
self.assertEqual(a.get_name(), "another_assembly")
a = self.assembly_map.get_by_uid(77567)
self.assertEqual(a, None)
def test_get_item(self):
ass = asr.Assembly("assembly")
uid1 = ass.get_uid()
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly")
uid2 = ass.get_uid()
self.assembly_map.insert(ass)
self.assertEqual(self.assembly_map["assembly"].get_uid(), uid1)
self.assertEqual(self.assembly_map["another_assembly"].get_uid(), uid2)
def test_insert_remove_by_uid(self):
ass = asr.Assembly("assembly")
self.assembly_map.insert(ass)
self.assertEqual(len(self.assembly_map), 1)
a = self.assembly_map.get_by_name("assembly")
ass = self.assembly_map.remove_by_uid(a.get_uid())
self.assertEqual(len(self.assembly_map), 0)
self.assembly_map.insert(ass)
self.assertEqual(len(self.assembly_map), 1)
def test_keys(self):
self.assertEqual(self.assembly_map.keys(), [])
ass = asr.Assembly("assembly")
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly")
self.assembly_map.insert(ass)
self.assertEqual(self.assembly_map.keys(), ["assembly", "another_assembly"])
def test_values(self):
ass = asr.Assembly("assembly")
uid1 = ass.get_uid()
self.assembly_map.insert(ass)
ass = asr.Assembly("another_assembly")
uid2 = ass.get_uid()
self.assembly_map.insert(ass)
values = self.assembly_map.values()
self.assertEqual(len(values), 2)
self.assertEqual(values[0].get_uid(), uid1)
self.assertEqual(values[1].get_uid(), uid2)
def test_iters(self):
names = ['assembly', 'assembly2', 'assembly3']
uids = []
for name in names:
ass = asr.Assembly(name)
uids.append(ass.get_uid())
self.assembly_map.insert(ass)
result_names = []
result_uids = []
for ass in self.assembly_map:
result_names.append(ass)
result_uids.append(self.assembly_map[ass].get_uid())
self.assertEqual(sorted(names), sorted(result_names))
self.assertEqual(sorted(uids), sorted(result_uids))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "aakash140799/ScreenCapture",
"score": 2
} |
#### File: aakash140799/ScreenCapture/capture_scripts.py
```python
import key_scripts
import video_scripts
import log_scripts
from vidgear.gears import WriteGear
import win32api as wapi
import os
import threading
import time
"""
press f11 to start captuing frames
press f11 again to stop captuing
press f10 to delete current capture, and restart immidately
press f9 to stop program immidately
"""
output_params = {"-vcodec":"MPEG", "-input_framerate": 25}
capture_state = 0
del_it = 0
start = len(os.listdir('data'))//2 + 1
def capture():
global capture_state
global del_it
global start
# while in not_exit mode
while capture_state != 2:
out_file = str(start)
# capture mode
if capture_state == 1:
log_scripts.log_msg('capturing : '+out_file)
out_file = 'data\\'+out_file
vid_out = WriteGear(out_file+'.avi',compression_mode=False,
custom_ffmpeg='C:\Program Files (x86)\ffmpeg\bin',**output_params)
txt_out = open(out_file+'.txt', 'w')
# capture 512 frames, or stop if altered
cnt = 0
while cnt <= 512 and not del_it:
vid_out.write(video_scripts.get_state())
txt_out.write(key_scripts.get_state())
cnt = cnt + 1
vid_out.close()
txt_out.close()
# if delete
if del_it:
os.remove(out_file+'.avi')
os.remove(out_file+'.txt')
del_it = 0
capture_state = 0
log_scripts.log_msg('deleting : '+out_file)
log_scripts.log_msg('state : False')
log_scripts.log_msg('Capturing : Stop')
else:
log_scripts.log_msg('saving : '+out_file)
start = start + 1
else:
log_scripts.log_msg('at hold')
time.sleep(2)
log_scripts.log_msg('capture thread exited')
exit()
try:
capture_thread = threading.Thread(target=capture)
capture_thread.start()
f11_state = 0
f10_state = 0
f9_state = 0
while capture_state != 2:
f11 = wapi.GetAsyncKeyState(0x7A)
f10 = wapi.GetAsyncKeyState(0x79)
f9 = wapi.GetAsyncKeyState(0x78)
if f11 and f11_state == 0:
print('alter')
capture_state = 1 if capture_state == 0 else 0
if f10 and f10_state == 0:
del_it = 1
if f9 and f9_state == 0:
capture_state = 2
f11_state = f11
f10_state = f10
f9_state = f9
log_scripts.log_msg('exiting')
capture_thread.join()
except KeyboardInterrupt:
exit()
```
#### File: aakash140799/ScreenCapture/key_scripts.py
```python
import win32api as wapi
import threading
import time
"""
press f11 to start capture
press f11 again to stop catpure
press f9 to end
"""
key_str = "<KEY>".lower()
keys_state = {k : 0 for k in key_str}
mouse_state = [0, 0, 0, 0]
def get_state():
kstate = ""
for k in key_str:
kstate = kstate + str(keys_state[k]) + ", "
kstate = kstate + str(mouse_state[2]) + ", "
kstate = kstate + str(mouse_state[3]) + "\n"
return kstate
def update_state():
while True:
for k in key_str:
keys_state[k] = wapi.GetAsyncKeyState(ord(k))
(x, y) = wapi.GetCursorPos()
mouse_state[2:4] = [x-mouse_state[0],y-mouse_state[1]]
mouse_state[0:2] = [x,y]
time.sleep(0.01)
key_thread = threading.Thread(target=update_state)
key_thread.start()
print('key_script done')
``` |
{
"source": "aakash2007/CodeExtract-CodeChef",
"score": 3
} |
#### File: aakash2007/CodeExtract-CodeChef/codechef.py
```python
from util import *
from bs4 import BeautifulSoup
class Profile(object):
"""Profile Class For User"""
def __init__(self, handle=""):
super(Profile, self).__init__()
self.handle = handle
self.domain_url = "https://www.codechef.com"
def get_user_handle(self):
usr_name = ''
first_time = True
while len(usr_name) == 0:
if first_time:
first_time = False
else:
print("You haven't entered any handle. Please try again.")
usr_name = input("Enter Your CodeChef Handle: ").strip()
self.handle = usr_name
def get_solved_problems(self):
"""Function Processes the user page and return a dictionary object containing all successfully solved problems"""
"""Return format { problem_code : problem_link }"""
domain_url = self.domain_url
handle = self.handle
user_url = domain_url + "/users/" + handle
user_page = get_url_data(user_url)
soup = BeautifulSoup(user_page,"lxml")
# Segregate problems table
sp = soup.find_all('table')[2]
prob_list = {}
for cell in sp.find_all('td'):
if cell.text == "Problems Successfully Solved:":
n_soup = cell.nextSibling.nextSibling
for s in n_soup.find_all('a'):
ques_link = (str(s.get('href'))).strip()
ques_url = ques_link
prob_list[s.text.strip()] = ques_url
return prob_list
def get_AC_submission(self, sub_link):
"""Function extracts the correct submission from list of user submissions for a problem"""
domain_url = self.domain_url
sub_page_url = domain_url + sub_link
sub_page = get_url_data(sub_page_url)
soup = BeautifulSoup(sub_page, "lxml")
obj = soup.find('tbody')
code_link = ""
for s in obj.find_all('tr'):
if s.find('img').get('src') == "/misc/tick-icon.gif" :
code_link = s.find_all('a')[-1].get('href')
break
code_url = code_link
return code_url
def extract_code(self, code_link):
"""Function extracts the user submitted code from submission page"""
"""Return format @tuple (code_lang, code_str)"""
domain_url = self.domain_url
code_page_url = domain_url + code_link
code_page = get_url_data(code_page_url)
soup = BeautifulSoup(code_page,"lxml")
lang = (soup.find('pre')).get("class")[-1]
code = soup.find_all('ol')[-1]
# in some cases code contains stray /xa0 character
# it needs to be removed
cod_str = str(code)
cod_str = cod_str.replace(u'\xa0',u' ')
# replacing </li> with </li>\n for pretty-printing
# then recreate a bs4 object
cod_str = cod_str.replace("</li>", "</li>\n")
code = BeautifulSoup(cod_str,"lxml")
code_str = code.text
return (lang, code_str)
```
#### File: aakash2007/CodeExtract-CodeChef/util.py
```python
from urllib.request import urlopen
import urllib.request
def get_url_data(url1):
req = urllib.request.Request(url1, headers={'User-Agent': 'Mozilla/5.0'})
html_pg = urllib.request.urlopen(req)
return html_pg
def write_to_file(code, location, prob_code, lang):
file_name = "./" + location + "/" + prob_code + "." + lang
with open(file_name,'w') as fl:
fl.write(code)
``` |
{
"source": "aakash2007/Laboratory-Inventory",
"score": 3
} |
#### File: Laboratory-Inventory/inventory/models.py
```python
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Laboratory(models.Model):
lab_name = models.CharField("Laboratory Name", max_length=200)
lab_incharge = models.CharField("Laboratory Incharge", max_length=200)
def __str__(self):
return self.lab_name
class Meta:
verbose_name = "Laboratory"
verbose_name_plural = "Laboratories"
class Item(models.Model):
lab = models.ForeignKey(Laboratory, on_delete=models.CASCADE)
item_name = models.CharField("Item Name", max_length=200)
item_count = models.IntegerField("Count", default=0)
item_cost = models.DecimalField("Cost (In INR)", max_digits=20, decimal_places=2)
total_cost = models.DecimalField("Total Cost (In INR)", max_digits=10, decimal_places=2)
def save(self, *args, **kwargs):
self.total_cost = self.item_count*self.item_cost
return super(Item, self).save(*args, **kwargs)
def __str__(self):
return self.item_name
``` |
{
"source": "aakash2007/Rapid-Visual-Survey",
"score": 2
} |
#### File: Rapid-Visual-Survey/survey/rc_score.py
```python
def RC_score(bd):
# Plan Irregularities
if bd.ir_plc is 1 and bd.re_crn is 1:
bd.pl_irr = 2
elif (bd.ir_plc is 1 and bd.re_crn is 0) or (bd.ir_plc is 0 and bd.re_crn is 1):
bd.pl_irr = 1
else:
bd.pl_irr = 0
# Soft Storey
if bd.op_prk is 1 or bd.ab_prt is 1 or bd.st_shp is 1 or bd.tl_htg is 1:
bd.soft_st = 1
else:
bd.soft_st = 0
# Vertical Irregularity
if bd.pr_stb is 1 or bd.bl_slp is 1:
bd.vrt_irr = 1
else:
bd.vrt_irr = 0
# Heavy Overhangs
if bd.md_hrp is 1 or bd.sb_hrp is 1:
bd.hvy_ovh = 1
else:
bd.hvy_ovh = 0
# Apparent Quality
if bd.ql_mat is 2 and bd.maintc is 2:
bd.ap_qlt = 2
elif bd.ql_mat is 0 and bd.maintc is 0:
bd.ap_qlt = 0
else:
bd.ap_qlt = 1
# Pounding
if bd.un_flr is 1 or bd.pr_qlt is 1:
bd.pnding = 1
else:
bd.pnding = 0
buil_flr = int(bd.no_floor)
if buil_flr is 2:
flr = 1
elif buil_flr > 5:
flr = 6
else:
flr = buil_flr
base_table = {
1: {1:150, 2:130, 3:100},
3: {1:140, 2:120, 3:90},
4: {1:120, 2:100, 3:75},
5: {1:100, 2:85, 3:65},
6: {1:90, 2:80, 3:60}
}
base_score = base_table[flr][bd.s_zone]
soft_st_f = {1:0, 3:-15, 4:-20, 5:-25, 6:-30}
hvy_ovh_f = {1:-5, 3:-10, 4:-10, 5:-15, 6:-15}
ap_qlt_f = {1:-5, 3:-10, 4:-10, 5:-15, 6:-15}
pnding_f = {1:0, 3:-2, 4:-3, 5:-5, 6:-5}
bas_prsnt_f = {1:0, 3:3, 4:4, 5:5, 6:5}
sft = bd.soft_st*soft_st_f[flr]
vrt = bd.vrt_irr*(-10)
plir = bd.pl_irr*(-5)
hvyov = bd.hvy_ovh*hvy_ovh_f[flr]
apqlty = bd.ap_qlt*ap_qlt_f[flr]
shrt_clm = bd.shr_col*(-5)
pound = bd.pnding*2*pnding_f[flr]
soilcn = bd.soil_cn*10
frmact = bd.frm_act*10
bsmt = bd.bas_prsnt*bas_prsnt_f[flr]
vs = sft + vrt + plir + hvyov + apqlty + shrt_clm + pound + soilcn + frmact + bsmt
perf_sc = base_score + vs
return perf_sc
``` |
{
"source": "aakash2016/UI-PRMD-Python",
"score": 3
} |
#### File: aakash2016/UI-PRMD-Python/uiprmd_data.py
```python
import math as m
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
num_kp = 22
num_axes = 3
def Rotx(theta):
# Rotation about X axis
return np.matrix([[1, 0, 0],
[0, m.cos(theta), -m.sin(theta)],
[0, m.sin(theta), m.cos(theta)]])
def Roty(theta):
# Rotation about Y axis
return np.matrix([[m.cos(theta), 0, m.sin(theta)],
[0, 1, 0],
[-m.sin(theta), 0, m.cos(theta)]])
def Rotz(theta):
# Rotation about Z axis
return np.matrix([[m.cos(theta), -m.sin(theta), 0],
[m.sin(theta), m.cos(theta), 0],
[0, 0, 1]])
def eulers_2_rot_matrix(x):
"""
eulers_2_rot_matrix transforms a set of euler angles into a rotation matrix
input: vector of euler angles [gamma_x, beta_y, alpha_z] are ZYX Eulers angles in radians
"""
gamma_x = x[0]
beta_y = x[1]
alpha_z = x[2]
return Rotz(alpha_z) * Roty(beta_y) * Rotx(gamma_x)
def rel2abs(p, a, num_frames):
# convert the data from relative coordinates to absolute coordinates
skel = np.zeros((num_kp, num_axes, num_frames))
for i in range(num_frames):
"""
1 Waist (absolute)
2 Spine
3 Chest
4 Neck
5 Head
6 Head tip
7 Left collar
8 Left upper arm
9 Left forearm
10 Left hand
11 Right collar
12 Right upper arm
13 Right forearm
14 Right hand
15 Left upper leg
16 Left lower leg
17 Left foot
18 Left leg toes
19 Right upper leg
20 Right lower leg
21 Right foot
22 Right leg toes
"""
joint = p[:,:,i]
joint_ang = a[:,:,i]
# chest, neck, head
rot_1 = eulers_2_rot_matrix(joint_ang[0,:]*np.pi/180)
joint[1,:] = rot_1@joint[1,:] + joint[0,:]
rot_2 = rot_1*eulers_2_rot_matrix(joint_ang[1,:]*np.pi/180)
joint[2,:] = rot_2@joint[2,:] + joint[1,:]
rot_3 = rot_2*eulers_2_rot_matrix(joint_ang[2,:]*np.pi/180)
joint[3,:] = rot_3@joint[3,:] + joint[2,:]
rot_4 = rot_3*eulers_2_rot_matrix(joint_ang[3,:]*np.pi/180)
joint[4,:] = rot_4@joint[4,:] + joint[3,:]
rot_5 = rot_4*eulers_2_rot_matrix(joint_ang[4,:]*np.pi/180)
joint[5,:] = rot_5@joint[5,:] + joint[4,:]
# left-arm
rot_6 = eulers_2_rot_matrix(joint_ang[2,:]*np.pi/180)
joint[6,:] = rot_6@joint[6,:] + joint[2,:]
rot_7 = rot_6*eulers_2_rot_matrix(joint_ang[6,:]*np.pi/180)
joint[7,:] = rot_7@joint[7,:] + joint[6,:]
rot_8 = rot_7*eulers_2_rot_matrix(joint_ang[7,:]*np.pi/180)
joint[8,:] = rot_8@joint[8,:] + joint[7,:]
rot_9 = rot_8*eulers_2_rot_matrix(joint_ang[8,:]*np.pi/180)
joint[9,:] = rot_9@joint[9,:] + joint[8,:]
# right-arm
rot_10 = eulers_2_rot_matrix(joint_ang[2,:]*np.pi/180)
joint[10,:] = rot_10@joint[10,:] + joint[2,:]
rot_11 = rot_10*eulers_2_rot_matrix(joint_ang[10,:]*np.pi/180)
joint[11,:] = rot_11@joint[11,:] + joint[10,:]
rot_12 = rot_11*eulers_2_rot_matrix(joint_ang[11,:]*np.pi/180)
joint[12,:] = rot_12@joint[12,:] + joint[11,:]
rot_13 = rot_12*eulers_2_rot_matrix(joint_ang[12,:]*np.pi/180)
joint[13,:] = rot_13@joint[13,:] + joint[12,:]
# left-leg
rot_14 = eulers_2_rot_matrix(joint_ang[0,:]*np.pi/180)
joint[14,:] = rot_14@joint[14,:] + joint[0,:]
rot_15 = rot_14*eulers_2_rot_matrix(joint_ang[14,:]*np.pi/180)
joint[15,:] = rot_15@joint[15,:] + joint[14,:]
rot_16 = rot_15*eulers_2_rot_matrix(joint_ang[15,:]*np.pi/180)
joint[16,:] = rot_16@joint[16,:] + joint[15,:]
rot_17 = rot_16*eulers_2_rot_matrix(joint_ang[16,:]*np.pi/180)
joint[17,:] = rot_17@joint[17,:] + joint[16,:]
# right-leg
rot_18 = eulers_2_rot_matrix(joint_ang[0,:]*np.pi/180)
joint[18,:] = rot_18@joint[18,:] + joint[0,:]
rot_19 = rot_18*eulers_2_rot_matrix(joint_ang[18,:]*np.pi/180)
joint[19,:] = rot_19@joint[19,:] + joint[18,:]
rot_20 = rot_19*eulers_2_rot_matrix(joint_ang[19,:]*np.pi/180)
joint[20,:] = rot_20@joint[20,:] + joint[19,:]
rot_21 = rot_20*eulers_2_rot_matrix(joint_ang[20,:]*np.pi/180)
joint[21,:] = rot_21@joint[21,:] + joint[20,:]
skel[:,:,i] = joint
return skel
def main():
pos_path = "/Users/aakash_agrawal/Downloads/Movements/Kinect/Positions/m01_s01_positions.txt"
ang_path = "/Users/aakash_agrawal/Downloads/Movements/Kinect/Angles/m01_s01_angles.txt"
pos_data = genfromtxt(pos_path)
ang_data = genfromtxt(ang_path)
num_frames = pos_data.shape[0]
p_data = pos_data.T.reshape(num_kp, num_axes, -1)
a_data = ang_data.T.reshape(num_kp, num_axes, -1)
skel = rel2abs(p_data, a_data, num_frames)
# order of joint connections
J = np.array([[3, 5, 4, 2, 1, 2, 6, 7, 8, 2, 10, 11, 12, 0, 14, 15, 16, 0, 18, 19, 20],
[2, 4, 2, 1, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]])
# find maximum and minimum values (for plotting)
a = p_data.transpose(2, 0, 1).reshape(-1, 3)
max_x = max(a[:, 0])
min_x = min(a[:, 0])
max_y = max(a[:, 1])
min_y = min(a[:, 1])
max_z = max(a[:, 2])
min_z = min(a[:, 2])
# Visualization
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot(111)
def get_plot(i):
ax.cla()
ax.set_title('2D plot using transformed data')
ax.set_xlabel('x')
ax.set_ylabel('y')
joint = skel[:, :, i]
for j in range(J.shape[1]):
p1 = joint[J[0, j], :]
p2 = joint[J[1, j], :]
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], 'o-')
plt.rcParams['animation.html'] = 'html5'
anim = animation.FuncAnimation(fig, get_plot, blit=False, frames=num_frames)
plt.close()
if __name__ == "__main__":
main()
``` |
{
"source": "aakash21696/c2c2017",
"score": 3
} |
#### File: c2c2017/src/check_hackerrank_problems.py
```python
import urllib, json
import os
from datetime import datetime
from time import sleep
import sys
HR_URL = "https://www.hackerrank.com/rest/hackers/{user}/recent_challenges?cursor={cursor}&limit=100&response_version=v2"
HR_FILE = "hackerrank_c2c2017.json"
SLEEP_TIME_IN_SECONDS = 5
class HackerRankProblem(object):
def __init__(self, url, created_at, ch_slug, name, con_slug):
self.url = url
self.created_at = created_at
self.ch_slug = ch_slug
self.name = name
self.con_slug = con_slug
def update_json(username):
"""Update our local JSON file with any new problems that user may have solved"""
last_known_prob = get_last_known_solved(username)
last_known_prob_name = None
if last_known_prob:
last_known_prob_name = last_known_prob['name']
# print("Last known solved problem is " + last_known_prob_name)
new_solved = []
for prob in get_problems_from_hackerrank(username):
if prob.name == last_known_prob_name:
break
new_solved.append(prob.__dict__)
# print("{user} has solved {new_probs} new problems since last time!".format(user=username, new_probs=len(new_solved)))
user_data = read_json_file()
if username not in user_data:
user_data[username] = []
user_data[username] = new_solved + user_data[username]
write_json_file(user_data)
return (last_known_prob_name, len(new_solved))
def get_last_known_solved(username):
"""Read the last entry from a JSON file for the user"""
solved_problems = get_all_known_solved(username)
if solved_problems:
return solved_problems[0]
def get_all_known_solved(username):
"""Read all entries from a JSON file for the user"""
all_user_data = read_json_file()
if username not in all_user_data:
return []
user_data = all_user_data[username]
if not user_data:
return []
return user_data
def get_problems_from_hackerrank(username):
"""Generator function that returns solved problems in descending order"""
cursor = "null"
while True:
problems, cursor = _get_next_set_of_problems(username, cursor)
for p in problems:
hr_prob = HackerRankProblem(p['url'], p['created_at'], p['ch_slug'], p['name'], p['con_slug'])
yield hr_prob
if not cursor:
return
def _get_next_set_of_problems(username, cursor):
url = HR_URL.format(user=username, cursor=cursor)
start_time = datetime.now()
response = urllib.urlopen(url)
end_time = datetime.now()
total_seconds = (end_time - start_time).total_seconds()
print("URL call took {} seconds".format(total_seconds))
try:
data = json.load(response)
except ValueError as ve:
print("ERROR: " + str(ve))
print("We will try again later for {user}".format(user=username))
return [], None
problems = data['models']
cursor = data['cursor']
return problems, cursor
def read_json_file():
if not os.path.exists(HR_FILE):
return {}
with open(HR_FILE, 'r') as json_file:
user_data = json.load(json_file)
return user_data
def write_json_file(user_data):
"""Write out the data to the json file"""
with open(HR_FILE, 'w') as json_file:
user_json = json.dumps(user_data)
json_file.truncate()
json_file.write(user_json)
def pretty_print_status_table(user_status):
print("Username".ljust(25) + "\t|" +
"Last Solved".ljust(50) + "\t|" +
"#New Solved".ljust(10) + "\t|" +
"Total Solved")
sorted_user_status = sorted(user_status.items(), key=lambda x: x[1]["no_of_solved_total"])
for username, status in sorted_user_status:
last_known_prob_name = status["last_known_prob_name"].ljust(50)
new_solved = str(status["new_solved"]).ljust(10)
no_of_solved_total = str(status["no_of_solved_total"])
print("{}\t|{}\t|{}\t|{}".format(username.ljust(25), last_known_prob_name, str(new_solved).ljust(10), no_of_solved_total))
def main():
usernames = ["shahdhrumil1060", "kiran_jawahiran1", "urvi_gadda", "masiraansari62", "akshay_kamath", "cpt01", "parikhsagar073", "hvaidsain26", "vikrant51296", "revati0616", "deep20", "knaik", "a_agarwal_14", "sahil_mankad", "niharika_nahar", "suresh_bp", "bhakti12", "melvita_a", "Wazirabahnan3", "adi99ss", "ashanka_bhilare1", "dev69", "Bhakti_C", "priyanka_agarkar", "saketdmano", "rugnesh_k", "aakash216961", "AdiPadi3", "quickgun_ak47", "DevikaShanbhag", "Aishriya", "niyati_js", "gautami_shelar", "adepudivya96", "mihir_jethwa", "Havan"]
user_status = {}
if len(sys.argv) > 1:
usernames = sys.argv[1:]
for username in usernames:
print("Checking status for {}".format(username))
last_known_prob_name, new_solved = update_json(username)
if last_known_prob_name is not None:
last_known_prob_name = last_known_prob_name.ljust(40)
else:
last_known_prob_name = "None".ljust(40)
no_of_solved_total = len(get_all_known_solved(username))
user_status[username] = {
"last_known_prob_name": last_known_prob_name,
"no_of_solved_total": no_of_solved_total,
"new_solved": new_solved
}
print("Sleeping for {} seconds".format(SLEEP_TIME_IN_SECONDS))
sleep(SLEEP_TIME_IN_SECONDS)
pretty_print_status_table(user_status)
if __name__ == '__main__':
main()
``` |
{
"source": "aakash257/tool_for_checking_if_website_allows_text_scraping",
"score": 3
} |
#### File: aakash257/tool_for_checking_if_website_allows_text_scraping/tool_for_checking_web_scraping_terms.py
```python
import os
from bs4 import BeautifulSoup
import urllib, urllib.request, requests
import tkinter
from tkinter import *
#folder_path = os.getcwd()
folder_path = r'C:\Users\aasa9247\Desktop\Research-Current\Buildsystem\Automatic Tools\GitHub-AutomationTools\tool_for_checking_if_website_allows_text_scraping'
# data-tags are the tags used for explaining the conditions of using the
# content in the weblink
file_name = '\\data_tags.txt'
file_path = folder_path + file_name
with open(file_path, encoding="utf-8") as data_file:
content = data_file.readlines()
data_tags = [l.strip() for l in content]
def check_for_data_tags(url):
'''return true if found a match for the data tags or false if found
no match for the data tags'''
with open(file_path, encoding="utf-8") as data_file:
content = data_file.readlines()
data_tags = [l.strip() for l in content]
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
imp_links = []
for link in soup.find_all('a', href=True):
ref_link = link.get('href')
num_tag = len(set(ref_link.split('/')).intersection(set(data_tags)))
if num_tag > 0:
imp_links.append(ref_link)
else:
continue
return imp_links
def urls2text(links):
'''extract text from multiple links to one string'''
all_text = ''
for link in links:
#print(link)
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
paras = soup.find_all('p')
for para in paras:
#print(para)
all_text = all_text + ' ' + para.getText()
return all_text
def clean_text(text):
import re
from nltk.corpus import stopwords
text = text.lower()
punc = '!"#$%&\'()*+,-/:;<=>?@[\\]^_`{|}~'
exclud = set(punc) #list of punctuation
text = re.sub(r'[0-9]+', r' ', text)
text = re.sub(' +', ' ', text)
text = re.sub('\. +', '. ', text)
text = ''.join(ch for ch in text if ch not in exclud)
text = re.sub('( \. )+', ' ', text)
text = re.sub('\.+ ', '. ', text)
text = re.sub(' \. ', ' ', text)
return text
def text2binary_for_scrape(text):
'''extract sentence that contains important information about data usage'''
full_text = clean_text(text)
full_text_sent = full_text.split('. ')
full_tok_sent = []
for sent in full_text_sent:
full_tok_sent.append(sent.split(' '))
file_path = folder_path + '\\data_usage_terms_tags.txt'
with open(file_path, encoding="utf-8") as data_file:
content = data_file.readlines()
data_usage_tags = [l.strip() for l in content]
max_num_common_terms = 0
imp_tok_sent = ''
num_no_term = ''
for tok_sent in full_tok_sent:
msg = 'Web scraping terms are unclear to our system. Please check\
the terms manually'
num_common_terms = len(set(tok_sent).intersection(set(data_usage_tags)))
num_no_term = len(set(tok_sent).intersection(set(['no', 'not'])))
#print(num_common_terms, num_no_term)
if num_common_terms > max_num_common_terms:
max_num_common_terms = num_common_terms
imp_tok_sent = tok_sent
else:
continue
if num_no_term >= 1:
can_scrape = 'no'
sent = ' '.join(imp_tok_sent)
msg = 'You cannot scrape text from the website. Here are more\
details from the url: ' + ' "' + sent + '"'
else:
can_scrape = 'not sure'
msg = 'Please check the terms as they are complicated to\
understand by our system'
continue
return msg
def start_gui():
window = tkinter.Tk()
window.title('Check Web Scraping Terms Tool')
window.geometry('400x100')
label_tag = Label(window, text = 'URL')
label_tag.pack(side = LEFT)
input_button = Entry(window, width = 70, bd = 5)
input_button.pack(expand = True, side = RIGHT)
def tk_store():
global url
url = input_button.get()
from tkinter import messagebox
msgbox = messagebox.showinfo( 'Check Web Scraping Terms Tool',\
'Please wait, while we analyze the url entered')
def tk_close_window ():
window.destroy()
check_button = Button(window, text = 'Check',\
command = lambda:[tk_store(), tk_close_window()])
check_button.place(x = 175,y = 70)
window.mainloop()
def close_gui(final_msg):
window = tkinter.Tk()
window.title('Check Web Scraping Terms Tool')
var = StringVar()
msg_label = Message(window, textvariable = var, relief = RAISED )
var.set(final_msg)
msg_label.pack()
window.mainloop()
window.after(30000, lambda: window.destroy())
start_gui()
links = check_for_data_tags(url)
text = urls2text(links)
final_msg = text2binary_for_scrape(text)
close_gui(final_msg)
``` |
{
"source": "Aakash310/A-Hackers-AI-Voice-Assistant",
"score": 3
} |
#### File: wakeword/scripts/create_wakeword_jsons.py
```python
import os
import argparse
import json
import random
def main(args):
zeros = os.listdir(args.zero_label_dir)
ones = os.listdir(args.one_label_dir)
percent = args.percent
data = []
for z in zeros:
data.append({
"key": os.path.join(args.zero_label_dir, z),
"label": 0
})
for o in ones:
data.append({
"key": os.path.join(args.one_label_dir, o),
"label": 1
})
random.shuffle(data)
f = open(args.save_json_path +"/"+ "train.json", "w")
with open(args.save_json_path +"/"+ 'train.json','w') as f:
d = len(data)
i=0
while(i<d-d/percent):
r=data[i-1]
line = json.dumps(r)
f.write(line + "\n")
i = i+1
f = open(args.save_json_path +"/"+ "test.json", "w")
with open(args.save_json_path +"/"+ 'test.json','w') as f:
d = len(data)
i=int(d-d/percent)
while(i<d):
r=data[i-1]
line = json.dumps(r)
f.write(line + "\n")
i = i+1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""
Utility script to create training json file for wakeword.
There should be two directories. one that has all of the 0 labels
and one with all the 1 labels
"""
)
parser.add_argument('--zero_label_dir', type=str, default=None, required=True,
help='directory of clips with zero labels')
parser.add_argument('--one_label_dir', type=str, default=None, required=True,
help='directory of clips with one labels')
parser.add_argument('--save_json_path', type=str, default=None, required=True,
help='path to save json file')
parser.add_argument('--percent', type=int, default=10, required=False,
help='percent of clips put into test.json instead of train.json')
args = parser.parse_args()
main(args)
``` |
{
"source": "aakash4525/py_link_preview",
"score": 3
} |
#### File: py_link_preview/link_preview/link_preview.py
```python
import urllib.request as req
import re
def generate_dict(url):
'''
returns dictionary containing elements of link_preview:
dict_keys :
'title' : '',
'description': '',
'image': '',
'website': ''
if Exception occurs, it raises Exception of urllib.request module.
'''
return_dict = {}
try:
html = req.urlopen(url).read().decode('utf-8')
meta_elems = re.findall('<[\s]*meta[^<>]+og:(?:title|image|description)(?!:)[^<>]+>', html)
og_map = map(return_og, meta_elems)
og_dict = dict(list(og_map))
# title
try:
return_dict['title'] = og_dict['og.title']
except KeyError:
return_dict['title'] = find_title(html)
# description
try:
return_dict['description'] = og_dict['og.description']
except KeyError:
return_dict['description'] = find_meta_desc(html)
# website
return_dict['website'] = find_host_website(url)
# Image
try:
return_dict['image'] = og_dict['og.image']
except KeyError:
image_path = find_image(html)
if 'http' not in image_path:
image_path = 'http://' + return_dict['website'] + image_path
return_dict['image'] = image_path
return return_dict
except Exception as e:
'Raises Occurred Exception'
raise e
def return_og(elem):
'''
returns content of og_elements
'''
content = re.findall('content[\s]*=[\s]*"[^<>"]+"', elem)[0]
p = re.findall('"[^<>]+"', content)[0][1:-1]
if 'og:title' in elem:
return ("og.title", p)
elif 'og:image' in elem and 'og:image:' not in elem:
return ("og.image", p)
elif 'og:description' in elem:
return ("og.description", p)
def find_title(html):
'''
returns the <title> of html
'''
try:
title_elem = re.findall('<[\s]*title[\s]*>[^<>]+<[\s]*/[\s]*title[\s]*>', html)[0]
title = re.findall('>[^<>]+<', title_elem)[0][1:-1]
except:
title = ''
return title
def find_meta_desc(html):
'''
returns the description (<meta name="description") of html
'''
try:
meta_elem = re.findall('<[\s]*meta[^<>]+name[\s]*=[\s]*"[\s]*description[\s]*"[^<>]*>', html)[0]
content = re.findall('content[\s]*=[\s]*"[^<>"]+"', meta_elem)[0]
description = re.findall('"[^<>]+"', content)[0][1:-1]
except:
description = ''
return description
def find_image(html):
'''
returns the favicon of html
'''
try:
favicon_elem = re.findall('<[\s]*link[^<>]+rel[\s]*=[\s]*"[\s]*shortcut icon[\s]*"[^<>]*>', html)[0]
href = re.findall('href[\s]*=[\s]*"[^<>"]+"', favicon_elem)[0]
image = re.findall('"[^<>]+"', href)[0][1:-1]
except:
image = ''
return image
def find_host_website(url):
'''
returns host website from the url
'''
return list(filter(lambda x: '.' in x, url.split('/')))[0]
``` |
{
"source": "Aakasha01Agarwal/Python",
"score": 5
} |
#### File: Python/conversions/angle_conversions.py
```python
def degree_to_radian(degree):
'''
will take an integer degree and convert it to radians
>>> degree_to_radian(180)
3.14
>>> degree_to_radian(360)
6.28
TypeError: string was passed to the function
>>>degree_to_radian('a')
TypeError: can't multiply sequence by non-int of type 'float'
'''
return (3.14/180)*degree
def radian_to_degree(radian):
'''
will take an integer radian angle and return the converted into degree form
>>>radian_to_degree(3.14)
180.0
>>>radian_to_degree(6.28)
360
TypeError: string was passed to the function
>>>radian_to_degree('a')
TypeError: can't multiply sequence by non-int of type 'float'
'''
return(180/3.14)*radian
``` |
{
"source": "aakashaarya/cefkivy",
"score": 2
} |
#### File: cefkivy/tests/fps.py
```python
from kivy.app import App
from kivy.garden.cefpython import CEFBrowser
CEFBrowser.update_flags({'enable-fps': True})
if __name__ == '__main__':
class SimpleBrowserApp(App):
def build(self):
return CEFBrowser(url="https://www.vsynctester.com")
SimpleBrowserApp().run()
``` |
{
"source": "aakashananth/covid-prediction",
"score": 3
} |
#### File: covid-prediction/api/app.py
```python
import os
import pickle
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.preprocessing import image
import numpy as np
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
app = Flask(__name__)
print(" * Loading models...")
cnn_model = load_model("models/cnn_model.h5")
feature_selector = Sequential()
for layer in cnn_model.layers[:-2]:
feature_selector.add(layer)
scaler = pickle.load(open('models/scaler.pkl', 'rb'))
vcf_clf = pickle.load(open('models/vcf_clf.sav', 'rb'))
print(" * Loaded models")
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
img = image.load_img(file_path, target_size=(299,299,3))
img_arr = (np.expand_dims(image.img_to_array(img), axis=0))/255.0
fs = feature_selector.predict(img_arr)
X = scaler.transform(fs)
result = vcf_clf.predict_proba(X.reshape(1,-1))
normal, covid = result[0][0], result[0][1]
# Delete the file
os.remove(file_path)
return str(round(normal*100, 2))+","+str(round(covid*100, 2))
return None
if __name__ == '__main__':
app.run(debug=False)
``` |
{
"source": "aakashbajaj/Youngun-Campaign-Tracking",
"score": 2
} |
#### File: apps/authentication/tasks.py
```python
import boto3
from django.conf import settings
from datetime import timedelta
from django.utils import timezone
from django_q.tasks import async_task, schedule
from django_q.models import Schedule
def send_otp_mail(email, otptkn):
msg = "Hey!\n\nThe OTP for your login on Youngun Campaign Tracker is {0}".format(
otptkn)
subj = "Login OTP for Youngun Portal"
opts = {'group': "otp-email"}
async_task('django.core.mail.send_mail', subj,
msg, "<EMAIL>", [email], q_options=opts)
def senf_otp_sms(mobile, otptkn):
client = boto3.client(
"sns",
aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET,
region_name=""
)
client.publish(
PhoneNumber=mobile,
msg="Youngun Portal OTP: {0}".format(otptkn),
MessageAttributes={
'AWS.SNS.SMS.SenderID': {
'DataType': 'String',
'StringValue': settings.SENDER_ID
}
}
)
```
#### File: apps/authentication/views.py
```python
import random
import re
from datetime import datetime
import pytz
from rest_framework import status
from rest_framework.generics import RetrieveAPIView
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from .tasks import send_otp_mail
from .models import User
from .serializers import UserInfoSerializer
from .renderers import UserInfoJSONRenderer
from youngun.apps.core.models import MasterLogger
# Create your views here.
def generate_otp():
return random.randint(100000, 999999)
def generate_tempid():
return str(random.randint(1000, 9999))
class UserInfoRetrieveAPIView(RetrieveAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = UserInfoSerializer
renderer_classes = (UserInfoJSONRenderer, )
def retrieve(self, request, *args, **kwargs):
serializer = self.serializer_class(request.user)
m_logger, created = MasterLogger.objects.get_or_create(
user=request.user)
if created:
m_logger.email = request.user.email
m_logger.login_cnt = m_logger.login_cnt + 1
dt_str = datetime.now(pytz.timezone('Asia/Kolkata')
).strftime("%Y-%m-%d %H:%M:%S")
m_logger.history_log = dt_str + "\n" + m_logger.history_log
m_logger.last_login = datetime.now(pytz.timezone('Asia/Kolkata'))
m_logger.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class InitiateLogin(APIView):
permission_classes = (AllowAny, )
def post(self, request):
try:
email = request.data["email"].lower()
except KeyError:
return Response({"response": "Invalid Email"}, status.HTTP_400_BAD_REQUEST)
# verify email exists
try:
user_obj = User.objects.get(email=email)
except User.DoesNotExist:
return Response({"response": "No user found with provided email"}, status.HTTP_401_UNAUTHORIZED)
# generate temp id for user
# generate OTP
# authInProgress = true
tempid = generate_tempid()
tempotp = generate_otp()
if email == "<EMAIL>":
tempid = "1111"
tempotp = 836882
user_obj.tempid = tempid
user_obj.tempotp = tempotp
user_obj.authInProgress = True
user_obj.last_requested = datetime.now(pytz.timezone('Asia/Kolkata'))
# save instance
user_obj.save()
# send sms, mail
send_otp_mail(user_obj.email, tempotp)
# return tempid, masked email/mobile
masked_email = re.sub(r"([A-Za-z0-9])(.*)@([A-Za-z])(.*)\.(.*)$", lambda x: r"{}{}@{}{}.{}".format(
x.group(1), "*"*len(x.group(2)), x.group(3), "*"*len(x.group(4)), x.group(5)), user_obj.email)
masked_mobile = None
if not user_obj.mobile == "":
masked_mobile = user_obj.mobile[3:5] + \
"*******" + user_obj.mobile[-2:]
payload = {
"tempid": tempid,
"masked_email": masked_email,
"masked_mobile": masked_mobile
}
return Response(payload, status=status.HTTP_200_OK)
class VerifyLogin(APIView):
permission_classes = (AllowAny, )
def post(self, request):
try:
tempid = request.data["tempid"]
inpotp = request.data["inpotp"]
except KeyError:
return Response({"response": "Invalid Data"}, status.HTTP_400_BAD_REQUEST)
# get user obj from temp id and email
try:
user_obj = User.objects.get(tempid=tempid)
except User.DoesNotExist:
return Response({"response": "Not Allowed. Invalid Request"}, status.HTTP_400_BAD_REQUEST)
# verify if otp matches
if inpotp == user_obj.tempotp and user_obj.authInProgress == True:
# authInProgress = false
# destroy otp and tempid
user_obj.authInProgress = False
user_obj.tempid = None
user_obj.tempotp = None
user_obj.save()
else:
return Response({"response": "Incorrect OTP"}, status.HTTP_401_UNAUTHORIZED)
# return token
payload = {
"token": user_obj.token_string
}
return Response(payload, status=status.HTTP_200_OK)
```
#### File: apps/campaigns/tasks.py
```python
import requests
import re
from django_q.tasks import async_task, schedule
from django.db.models import Count, Sum
from youngun.apps.campaigns.models import Campaign
from youngun.apps.content.models import Post
from youngun.apps.content.models import InstagramStory, FacebookStory, TwitterStory
# def update_v2_active_camp_metrics():
# opts = {'group': 'update_v2_active_camp_metrics'}
# for camp in Campaign.objects.all():
# if camp.status == "active" and camp.campaign_module == "v2":
# async_task("youngun.apps.content.tasks.update_v2_active_camp_metrics",
# camp.pk, camp.name, q_options=opts)
def trigger_update_campaign_report_metrics():
opts = {'group': 'update_campaign_report_metrics'}
for camp in Campaign.objects.all():
if camp.status == "active" and camp.campaign_module == "v2":
async_task("youngun.apps.campaigns.tasks.update_campaign_report_metrics",
camp.pk, camp.name, q_options=opts)
def update_all_active_camp_engagement_data():
# opts = {'group': 'update_all_active_camp_metrics'}
for camp in Campaign.objects.all():
if camp.status == "active":
camp_reach = camp.posts.all().aggregate(Sum('post_reach'))[
'post_reach__sum'] + camp.posts.all().aggregate(Sum('total_views'))['total_views__sum']
camp_engagement = camp.posts.all().aggregate(
Sum('post_engagement'))['post_engagement__sum']
if camp.posts.filter(platform="in").count() > 0:
in_reach = camp.posts.filter(platform="in").aggregate(Sum('post_reach'))[
'post_reach__sum'] + camp.posts.filter(platform="in").aggregate(Sum('total_views'))['total_views__sum']
in_engagement = camp.posts.filter(platform="in").aggregate(
Sum('post_engagement'))['post_engagement__sum']
camp.in_engagement = in_engagement
camp.in_reach = in_reach
if camp.posts.filter(platform="tw").count() > 0:
tw_reach = camp.posts.filter(platform="tw").aggregate(Sum('post_reach'))[
'post_reach__sum'] + camp.posts.filter(platform="tw").aggregate(Sum('total_views'))['total_views__sum']
tw_engagement = camp.posts.filter(platform="tw").aggregate(
Sum('post_engagement'))['post_engagement__sum']
camp.tw_engagement = tw_engagement
camp.tw_reach = tw_reach
if camp.posts.filter(platform="fb").count() > 0:
fb_reach = camp.posts.filter(platform="fb").aggregate(Sum('post_reach'))[
'post_reach__sum'] + camp.posts.filter(platform="fb").aggregate(Sum('total_views'))['total_views__sum']
fb_engagement = camp.posts.filter(platform="fb").aggregate(
Sum('post_engagement'))['post_engagement__sum']
camp.fb_engagement = fb_engagement
camp.fb_reach = fb_reach
camp.total_post_engagement = camp_engagement
camp.total_campaign_reach = camp_reach
camp.num_posts = camp.posts.all().count()
camp.save()
def update_campaign_report_metrics(camp_pk, camp_name):
print(f"Processing {camp_name}")
camp = Campaign.objects.get(pk=camp_pk)
camp.num_posts = camp.posts.all().count()
engagement = 0
shares = 0
saves = 0
video_views = 0
reach = 0
for post in camp.posts.all():
engagement = engagement + post.post_engagement
shares = shares + post.post_shares
saves = saves + post.post_saves
video_views = video_views + post.total_views
reach = reach + post.post_reach
camp.post_engagement = engagement
camp.post_shares = shares
camp.post_saves = saves
camp.video_views = video_views
camp.post_reach = reach
camp.save()
return f"Success: {camp.name}"
def bulk_upload_csv(posts_list, campaign_id):
opts = {'group': "csv-bulk-post-upload"}
async_task('youngun.apps.campaigns.tasks.upload_posts_lists',
posts_list, campaign_id, q_options=opts)
def upload_posts_lists(posts_list, campaign_id):
cnt = 0
camp_to_add = Campaign.objects.get(id=campaign_id)
for post in posts_list:
try:
p_obj, created = Post.objects.get_or_create(
campaign=camp_to_add, url=post)
if created:
cnt = cnt + 1
if "facebook.com" in post:
p_obj.platform = "fb"
if "/video" in post:
p_obj.post_type = "video"
else:
p_obj.post_type = "post"
elif "instagram.com" in post:
p_obj.platform = "in"
p_obj.embed_code = ""
elif "twitter.com" in post:
p_obj.platform = "tw"
p_obj.embed_code = ""
p_obj.save()
except:
print("Failed to add " + post + " in " + str(camp_to_add.name))
return posts_list
# def update_live_cnts():
# for camp in Campaign.objects.all():
# if camp:
# camp.live_fb_posts = camp.get_facebook_posts.count()
# camp.live_in_posts = camp.get_instagram_posts.count()
# camp.live_tw_posts = camp.get_twitter_posts.count()
# camp.live_fb_stories = camp.get_facebook_stories.count()
# camp.live_in_stories = camp.get_instagram_stories.count()
# camp.live_tw_stories = camp.get_twitter_stories.count()
# camp.save()
def fetch_campaign_stories():
for camp in Campaign.objects.all():
if camp:
try:
reg_str = r'\["(https:\/\/lh3\.googleusercontent\.com\/[a-zA-Z0-9\-_]*)"'
resp = requests.get(camp.in_stories_google_photos_album_url)
resp_str = resp.text
matches = re.findall(reg_str, resp_str)
camp.get_instagram_stories.delete()
for photo_url in matches:
obj, new_create = InstagramStory.objects.get_or_create(
campaign=camp, url=photo_url)
resp = requests.get(camp.fb_stories_google_photos_album_url)
resp_str = resp.text
matches = re.findall(reg_str, resp_str)
camp.get_facebook_stories.delete()
for photo_url in matches:
obj, new_create = FacebookStory.objects.get_or_create(
campaign=camp, url=photo_url)
resp = requests.get(camp.tw_stories_google_photos_album_url)
resp_str = resp.text
matches = re.findall(reg_str, resp_str)
camp.get_twitter_stories.delete()
for photo_url in matches:
obj, new_create = TwitterStory.objects.get_or_create(
campaign=camp, url=photo_url)
except Exception as e:
print(str(e))
```
#### File: helpers/graphapi/iginsights.py
```python
import requests
from django.conf import settings
def get_ig_media_insights_data(media_id, media_type):
access_token = settings.INSTA_GRAPH_LL_TOKEN
metrics = ""
if media_type == "a":
metrics = "carousel_album_engagement,carousel_album_impressions,carousel_album_reach,carousel_album_saved,carousel_album_video_views"
elif media_type == "v":
metrics = "engagement,video_views,saved,impressions,reach"
elif media_type == "p":
metrics = "engagement,saved,impressions,reach"
params = {
"metric": metrics,
}
headers = {
"Authorization": f"Bearer {access_token}"
}
host_url = f"https://graph.facebook.com/v8.0/{media_id}/insights"
data = requests.get(host_url, params=params, headers=headers)
resp = data.json()
print(resp)
data = dict()
for metric in resp["data"]:
data[metric["name"]] = metric["values"][0]["value"]
if media_type == "a":
cleaned_data = dict()
for k in data:
if k.startswith("carousel_album_"):
cleaned_data[k[len("carousel_album_"):]] = data[k]
else:
cleaned_data[k] = data[k]
# del data[k]
data = cleaned_data
try:
body = data
return body
except Exception as e:
return {"error": str(e)}
```
#### File: content/helpers/tw_post.py
```python
import requests
import json
from datetime import datetime, timedelta
import pytz
import re
import dateutil.parser
from pprint import pprint
from django.conf import settings
class TwitterPostScraper:
def __init__(self, post_link, resp):
self.resp = resp
self.data = {"link": post_link}
def get_username(self):
return self.resp.get('includes').get('users')[0].get('username')
def get_profile_img_url(self):
return self.resp.get('includes').get('users')[0].get('profile_image_url')
def get_account_name(self):
return self.resp.get('includes').get('users')[0].get('name')
def get_timestamp(self):
dt = dateutil.parser.parse(self.resp.get('data')[0].get('created_at'))
return datetime.strftime(dt.astimezone(pytz.timezone("Asia/Kolkata")), "%Y-%m-%d %H:%M:%S")
def get_caption(self):
return self.resp['data'][0].get('text')
def get_likes(self):
return self.resp['data'][0].get('public_metrics').get('like_count')
def get_retweet(self):
return self.resp['data'][0].get('public_metrics').get('retweet_count')
def get_replies(self):
return self.resp['data'][0].get('public_metrics').get('reply_count')
def media_exists(self):
return self.resp.get('includes').get('media')
def get_media(self):
url = []
if self.resp.get('includes').get('media')[0].get('type') == 'video':
views = self.resp.get('includes').get('media')[0].get(
'public_metrics').get('view_count')
media_obj = {
'media_url': self.resp.get('includes').get('media')[0].get('preview_image_url'),
'media_key': self.resp.get('includes').get('media')[0].get('media_key'),
'is_video': True,
'view_count': views
}
url.append(media_obj)
else:
views = None
for i in self.resp.get('includes').get('media'):
media_obj = {
'media_url': i.get('url'),
'media_key': i.get('media_key'),
'is_video': False
}
url.append(media_obj)
return views, url
def get_data(self):
X = {
'username': self.get_username(),
'account_name': self.get_account_name(),
'profile_image_url': self.get_profile_img_url(),
'timestamp': self.get_timestamp(),
'likes': self.get_likes(),
'caption': self.get_caption(),
'comments': self.get_replies(),
'retweets': self.get_retweet(),
'total_views': self.get_media()[0] if self.media_exists() else None,
'urls': self.get_media()[1] if self.media_exists() else [],
}
self.data = {**self.data, **X}
return self.data
def tw_headers_and_params():
headers = {
'Authorization': 'Bearer ' + settings.TWITTER_AUTH_TOKEN,
}
params = (
('expansions', 'author_id,attachments.media_keys'),
('tweet.fields', 'public_metrics,created_at'),
('user.fields', 'username,verified,profile_image_url'),
('media.fields', 'public_metrics,preview_image_url,url'),
)
return headers, params
def get_tw_post_details(post_link):
num = int(post_link.strip('/').split('/')[-1])
try:
resp = requests.get(f'https://api.twitter.com/2/tweets?ids={num}', headers=tw_headers_and_params()[
0], params=tw_headers_and_params()[1]).json()
# pprint(resp)
tw = TwitterPostScraper(post_link, resp)
data = tw.get_data()
return {"error": None, "result": data}
except Exception as e:
# print(e)
return {"error": "An error occurred!!", "result": None, "link": post_link, "msg": str(e)}
```
#### File: content/mixins/campaignlistfilter.py
```python
from django.contrib.admin import SimpleListFilter
from youngun.apps.campaigns.models import Campaign
class CampaignNameFilter(SimpleListFilter):
title = 'Campaign'
parameter_name = 'campaign__name'
def lookups(self, request, model_admin):
qs = Campaign.objects.all()
if not request.user.is_superuser or request.user.groups.filter(name="MasterAdmin").exists():
qs = qs.filter(staff_profiles=request.user.profile)
output_list = [(camp.id, camp.name) for camp in qs]
return output_list
def queryset(self, request, queryset):
if self.value() is None:
return queryset
return queryset.filter(campaign__id=self.value())
```
#### File: apps/core/models.py
```python
from django.db import models
from youngun.apps.authentication.models import User
from django.utils.translation import gettext as _
# Create your models here.
class MasterLogger(models.Model):
user = models.OneToOneField("authentication.User", verbose_name=_(
"User"), on_delete=models.CASCADE, related_name="masterlogger")
email = models.EmailField(_("User Email"), max_length=254)
# full_name = models.CharField(
# _("User Name"), max_length=255, default="", blank=True)
login_cnt = models.IntegerField(_("Total Login Count"), default=0)
last_login = models.DateTimeField(
_("Last Login"), auto_now=False, auto_now_add=False, blank=True, null=True)
history_log = models.TextField(_("Login History"))
def __str__(self):
return self.email
# class LoginLogger(models.Model):
# master_user = models.ForeignKey(MasterLogger, verbose_name=_(
# "User Master Logger"), on_delete=models.DO_NOTHING)
# login_time = models.DateTimeField(_("Login Time"), auto_now_add=True)
# class CampaignLogger(models.Model):
```
#### File: apps/usermanager/forms.py
```python
from django import forms
from django.contrib import admin
from youngun.apps.authentication.models import User
from django.contrib.auth.forms import ReadOnlyPasswordHashField
# class UserCreationForm(forms.ModelForm):
# password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
# password2 = forms.CharField(
# label='Password confirmation', widget=forms.PasswordInput)
# class Meta:
# model = User
# fields = ('email', 'password1', 'password2',
# 'is_active', 'is_superuser')
# def clean_password2(self):
# # Check that the two password entries match
# password1 = self.cleaned_data.get("password1")
# password2 = self.cleaned_data.get("password2")
# if password1 and password2 and password1 != password2:
# raise forms.ValidationError("Passwords don't match")
# return password2
# def save(self, commit=True):
# # Save the provided password in hashed format
# user = super().save(commit=False)
# user.set_password(self.cleaned_data["<PASSWORD>"])
# if commit:
# user.save()
# return user
# class UserChangeForm(forms.ModelForm):
# password = forms.CharField(
# label='Password (only edit if you want to change)', widget=forms.PasswordInput, required=False)
# class Meta:
# model = User
# fields = ('email', 'password',
# 'is_active', 'is_staff', 'is_superuser', 'groups')
# def clean_password(self):
# print(self.initial)
# return self.cleaned_data.get("password")
# def save(self, commit=True):
# # Save the provided password in hashed format
# user = super().save(commit=False)
# if not self.cleaned_data["password"] == "":
# user.set_password(self.cleaned_data["password"])
# if commit:
# user.save()
# return user
class StaffUserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'password1', 'password2',
'is_active', 'is_superuser')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.is_staff = True
if commit:
user.save()
return user
class StaffUserChangeForm(forms.ModelForm):
password = forms.CharField(
label='Password (only edit if you want to change)', widget=forms.PasswordInput, required=False)
class Meta:
model = User
fields = ('email', 'password',
'is_active', 'is_staff', 'is_superuser', 'groups')
def clean_password(self):
print(self.initial)
return self.cleaned_data.get("password")
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
if not self.cleaned_data["password"] == "":
user.set_password(self.cleaned_data["password"])
user.is_staff = True
if commit:
user.save()
return user
class ClientUserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'password1', 'password2',
'is_active', 'is_superuser')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.is_staff = False
if commit:
user.save()
return user
class ClientUserChangeForm(forms.ModelForm):
password = forms.CharField(
label='Password (only edit if you want to change)', widget=forms.PasswordInput, required=False)
class Meta:
model = User
fields = ('email', 'password',
'is_active', 'is_staff', 'is_superuser', 'groups')
def clean_password(self):
print(self.initial)
return self.cleaned_data.get("password")
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
if not self.cleaned_data["password"] == "":
user.set_password(self.cleaned_data["password"])
user.is_staff = False
if commit:
user.save()
return user
``` |
{
"source": "aakashbilly/python3-ransomeware",
"score": 3
} |
#### File: python3-ransomeware/ransomeeewareee/dec.py
```python
from cryptography.fernet import Fernet
from loadkey import load_key
import os
def decrypt(filename,key):
f = Fernet(key)
with open(filename,"rb") as content:
content = content.read()
decrypted_data = f.decrypt(content)
with open(filename+".dec","wb") as content:
content.write(decrypted_data)
# path = "/home/pi/Documents/ransomware/text.txt.enc"
# key = load_key()
# decrypt(path,key)
```
#### File: python3-ransomeware/ransomeeewareee/loadkey.py
```python
def load_key():
with open("key.key","rb") as key:
key = key.read()
return key
``` |
{
"source": "Aakash-Devadiga/Machine-Learning-Model-Scoring-and-Monitoring",
"score": 3
} |
#### File: Machine-Learning-Model-Scoring-and-Monitoring/src/ingestion.py
```python
import os
import sys
import logging
import pandas as pd
from datetime import datetime
from config import DATA_PATH, INPUT_FOLDER_PATH
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def merge_multiple_dataframe():
"""
Function for data ingestion. Check for datasets, combine them together,
drops duplicates and write metadata ingestedfiles.txt and ingested data
to finaldata.csv
"""
df = pd.DataFrame()
file_names = []
logging.info(f"Reading files from {INPUT_FOLDER_PATH}")
for file in os.listdir(INPUT_FOLDER_PATH):
file_path = os.path.join(INPUT_FOLDER_PATH, file)
df_tmp = pd.read_csv(file_path)
file = os.path.join(*file_path.split(os.path.sep)[-3:])
file_names.append(file)
df = df.append(df_tmp, ignore_index=True)
logging.info("Dropping duplicates")
df = df.drop_duplicates().reset_index(drop=1)
logging.info("Saving ingested metadata")
with open(os.path.join(DATA_PATH, 'ingestedfiles.txt'), "w") as file:
file.write(
f"Ingestion date: {datetime.now().strftime('%d/%m/%Y %H:%M:%S')}\n")
file.write("\n".join(file_names))
logging.info("Saving ingested data")
df.to_csv(os.path.join(DATA_PATH, 'finaldata.csv'), index=False)
if __name__ == '__main__':
logging.info("Running ingestion.py")
merge_multiple_dataframe()
```
#### File: Machine-Learning-Model-Scoring-and-Monitoring/src/training.py
```python
import os
import sys
import pickle
import logging
import pandas as pd
from sklearn.linear_model import LogisticRegression
from config import MODEL_PATH, DATA_PATH
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def train_model():
"""
Train logistic regression model on ingested data and
and saves the model
"""
logging.info("Loading and preparing finaldata.csv")
data_df = pd.read_csv(os.path.join(DATA_PATH, 'finaldata.csv'))
y_df = data_df.pop('exited')
X_df = data_df.drop(['corporation'], axis=1)
model = LogisticRegression(
C=1.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
l1_ratio=None,
max_iter=100,
multi_class='auto',
n_jobs=None,
penalty='l2',
random_state=0,
solver='liblinear',
tol=0.0001,
verbose=0,
warm_start=False)
logging.info("Training model")
model.fit(X_df, y_df)
logging.info("Saving trained model")
pickle.dump(
model,
open(
os.path.join(
MODEL_PATH,
'trainedmodel.pkl'),
'wb'))
if __name__ == '__main__':
logging.info("Running training.py")
train_model()
``` |
{
"source": "Aakashdgupta/Python-How-To",
"score": 4
} |
#### File: Algorithms/Searching/binarySearch_ittr.py
```python
def binarySearch(array,leftIndex,rightIndex,toFind):
# a =[1,2,3,4,5,6,7,8]
# print(binarySearch(a,0,7,5))
# 0 < 7
# mid = 0 + (7 -0)//2 = 3
# a[mid] = 4 < 5
# left = 4
#a = [5,6,7,8]
# repeat
while leftIndex <= rightIndex:
midIndex = leftIndex + (rightIndex - leftIndex)//2
if array[midIndex]==toFind:
return toFind
elif array[midIndex]>toFind:
rightIndex = midIndex - 1
else:
leftIndex = midIndex + 1
return -1
a =[1,2,3,4,5,6,7,8]
print(binarySearch(a,0,7,5))
```
#### File: Datastructures/Tree/BST.py
```python
class node:
def __init__(self, key, name="no name yet"):
self.key = key
self.name = name
self.left = None
self.right = None
# bst class by default root node is set to none
class BST:
def __init__(self):
self.root = None
# method for in order traversal
# gives visit nodes from smaller to greater
def inorderTraversal(self, currentNode):
# method takes start node
# to start traversal currentNode
if currentNode != None:
# if current node is not none
# call inorderTraversal to its left child
# print currentNodes key
# call inorderTraversal to its right child
self.inorderTraversal(currentNode.left)
print(currentNode.key)
self.inorderTraversal(currentNode.right)
# similar to inorder traversal
# just we print key before calling method recursiveliy
# to both of its child
def preorderTraversal(self, currentNode):
if currentNode != None:
print(currentNode.key)
self.preorderTraversal(currentNode.left)
self.preorderTraversal(currentNode.right)
# similar to in order traversal just
# we print key at last after calling
# method recursively to both of its child
def postorderTraversal(self, currentNode):
if currentNode != None:
self.postorderTraversal(currentNode.left)
self.postorderTraversal(currentNode.right)
print(currentNode.key)
# method to add node
def addNodes(self, key):
# create new node using key passed
n = node(key)
# if root is none which means tree is empty
# set n as root
if self.root == None:
self.root = n
# else traverse tree to find appropriate position for
# node
else:
# at start currentNode = root
currentNode = self.root
# while current node is true
while currentNode:
# set parent to current node
parent = currentNode
# and if key of node to be added is smaller
# than current node
# set currentnode to its left child
# if current node is none
# set parents left to newnode
# and return
if key < currentNode.key:
currentNode = currentNode.left
if currentNode == None:
parent.left = n
return
# and if key of node to be added is greater
# than current node
# set currentnode to its right child
# if current node is none
# set parents right to newnode
# and return
else:
currentNode = currentNode.right
if currentNode == None:
parent.right = n
return
def remove(self, key):
currentNode = self.root
parent = self.root
isAtLeft = True
# Finding Phase
while currentNode.key != key:
parent = currentNode
if key < currentNode.key:
isAtLeft = True
currentNode = currentNode.left
else:
isAtLeft = False
currentNode = currentNode.right
# If its not found
if currentNode == None:
return False
# Removing Phase behind
# If item to be removed found
# and have no children
# ( Leaf Format )
if currentNode.left == None and currentNode.right == None:
# if its root node
if currentNode == self.root:
# delete root
self.root = None
# if found node is not root
# and its a left child
elif isAtLeft:
# delete found node
parent.left = None
else:
# if its not a left child
# its a right child
# delete the node
parent.right = None
# if found node has a left child
# but no right child
elif currentNode.right == None:
# if it's root nose
if currentNode == self.root:
# delete it and replace
# it with its left child
self.root = currentNode.left
# else if it's a left child
elif isAtLeft:
parent.left = currentNode.left
# else it's a right child
else:
parent.right = currentNode.left
# else if it has a right child
# but no left child
elif currentNode.left == None:
# if it's root nose
if currentNode == self.root:
# delete it and replace
# it with its left child
self.root = currentNode.right
# else if it's a left child
elif isAtLeft:
parent.right = currentNode.right
# else it's a right child
else:
parent.right = currentNode.left
# else have both left and right child
else:
replacement = self.getReplacement(currentNode)
# if it's root
if currentNode == self.root:
self.root = replacement
# if it's a left child
elif isAtLeft:
parent.left = replacement
else:
parent.right = replacement
# finally set replacements
# left child
# to current left child
replacement.left = currentNode.left
return True
def getReplacement(self, toReplace):
replacementParent = toReplace
replacement = toReplace
cur = toReplace.right
while cur != None:
replacementParent = replacement
replacement = cur
cur = cur.left
if replacement != toReplace.right:
replacementParent.left = replacement.right
replacement.right = toReplace.right
return replacement
bst = BST()
bst.addNodes(1)
bst.addNodes(5)
bst.addNodes(3)
bst.addNodes(7)
bst.remove(5)
bst.addNodes(10)
bst.addNodes(4)
bst.inorderTraversal(bst.root)
print()
print()
bst.preorderTraversal(bst.root)
print()
print()
bst.postorderTraversal(bst.root)
``` |
{
"source": "Aakash-Ez/indicLP",
"score": 2
} |
#### File: indicLP/indicLP/preprocessing.py
```python
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
from torchtext.data.functional import sentencepiece_tokenizer
from torchtext.data.functional import load_sp_model
from gensim.models import Word2Vec
import snowballstemmer
import pickle
import os.path
import re
punctuation = """[,.;@#?!&$।|()]+\ *"""
package_directory = os.path.dirname(os.path.abspath(__file__))
supported_lang = ["hi","ta"]
langDict = {"hi":"hindi","ta":"tamil"}
class TextNormalizer:
def __init__(self, lang = "hi"):
assert lang in supported_lang, "Language is not supported. Currently we support the following: "+str(supported_lang)
self.lang = lang
self.stemmer = snowballstemmer.stemmer(langDict[lang])
def tokenizer(self, inp_list, stem = True):
inp = [re.sub(punctuation, " ", i) for i in inp_list]
# Fetch the model from assets
file = os.path.join(package_directory, "assets","tokenizer_models",self.lang,"token.model")
sp_model = load_sp_model(file)
sp_tokens_generator = sentencepiece_tokenizer(sp_model)
tokens = list(sp_tokens_generator(inp))
if not stem:
return tokens
out = []
for i in tokens:
out.append(self.stemmer.stemWords(i))
return out
def stem(self, word):
assert type(word) == str, "ERROR: Expected a "+str(str)+" but received a "+str(type(word))
return self.stemmer.stemWord(word)
def remove_stop_words(self,wordList):
file = open(os.path.join(package_directory, "assets","stopwords",self.lang,"stopwords.pkl"),'rb')
stopWords = pickle.load(file)
file.close()
out = [i for i in wordList if i not in stopWords]
return out
class Embedding:
def __init__(self, lang = "hi"):
assert lang in supported_lang, "Language is not supported. Currently we support the following: "+str(supported_lang)
self.lang = lang
self.file_path = os.path.join(package_directory, "assets","embed_models","word2vec."+lang+".model")
self.model = Word2Vec.load(self.file_path)
self.stemmer = snowballstemmer.stemmer(langDict[lang])
def get_vector(self, word):
assert type(word) == str, "ERROR: Expected a "+str(str)+" but received a "+str(type(word))
word = word.replace("▁","")
try:
return self.model.wv[word]
except KeyError:
try:
return self.model.wv[self.stemmer.stemWord(word)]
except:
raise KeyError("The given word is not in vocabulary.")
def get_closest(self, word, n = 10):
assert type(word) == str, "ERROR: Expected a "+str(str)+" but received a "+str(type(word))
assert type(n) == int, "ERROR: Expected a "+str(int)+" but received a "+str(type(n))
word = word.replace("▁","")
try:
return self.model.wv.most_similar(word, topn=n)
except KeyError:
try:
return self.model.wv.most_similar(self.stemmer.stemWord(word), topn=n)
except:
raise KeyError("The given word is not in vocabulary.")
``` |
{
"source": "aakashgandham-nucleussoftware/ansible-lint",
"score": 3
} |
#### File: lib/ansiblelint/errors.py
```python
class Match(object):
"""Rule violation detected during linting."""
def __init__(self, linenumber, line, filename, rule, message=None):
"""Initialize a Match instance."""
self.linenumber = linenumber
self.line = line
self.filename = filename
self.rule = rule
self.message = message or rule.shortdesc
def __repr__(self):
"""Return a Match instance representation."""
formatstr = u"[{0}] ({1}) matched {2}:{3} {4}"
return formatstr.format(self.rule.id, self.message,
self.filename, self.linenumber, self.line)
```
#### File: lib/ansiblelint/utils.py
```python
from collections import OrderedDict
import glob
import importlib
import logging
import os
from pathlib import Path
import pprint
import subprocess
import yaml
from yaml.composer import Composer
from yaml.representer import RepresenterError
from ansible import constants
from ansible.errors import AnsibleError
from ansible.errors import AnsibleParserError
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.splitter import split_args
from ansible.parsing.yaml.constructor import AnsibleConstructor
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleSequence
from ansible.plugins.loader import module_loader
from ansible.template import Templar
# ansible-lint doesn't need/want to know about encrypted secrets, so we pass a
# string as the password to enable such yaml files to be opened and parsed
# successfully.
DEFAULT_VAULT_PASSWORD = 'x'
PLAYBOOK_DIR = os.environ.get('ANSIBLE_PLAYBOOK_DIR', None)
INVALID_CONFIG_RC = 2
ANSIBLE_FAILURE_RC = 3
_logger = logging.getLogger(__name__)
def initialize_logger(level=0):
"""Set up the global logging level based on the verbosity number."""
VERBOSITY_MAP = {
0: logging.NOTSET,
1: logging.INFO,
2: logging.DEBUG
}
handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(__package__)
logger.addHandler(handler)
# Unknown logging level is treated as DEBUG
logging_level = VERBOSITY_MAP.get(level, logging.DEBUG)
logger.setLevel(logging_level)
# Use module-level _logger instance to validate it
_logger.debug("Logging initialized to level %s", logging_level)
def parse_yaml_from_file(filepath):
dl = DataLoader()
if hasattr(dl, 'set_vault_password'):
dl.set_vault_password(DEFAULT_VAULT_PASSWORD)
return dl.load_from_file(filepath)
def path_dwim(basedir, given):
dl = DataLoader()
dl.set_basedir(basedir)
return dl.path_dwim(given)
def ansible_template(basedir, varname, templatevars, **kwargs):
dl = DataLoader()
dl.set_basedir(basedir)
templar = Templar(dl, variables=templatevars)
return templar.template(varname, **kwargs)
LINE_NUMBER_KEY = '__line__'
FILENAME_KEY = '__file__'
VALID_KEYS = [
'name', 'action', 'when', 'async', 'poll', 'notify',
'first_available_file', 'include', 'include_tasks', 'import_tasks', 'import_playbook',
'tags', 'register', 'ignore_errors', 'delegate_to',
'local_action', 'transport', 'remote_user', 'sudo',
'sudo_user', 'sudo_pass', 'when', 'connection', 'environment', 'args', 'always_run',
'any_errors_fatal', 'changed_when', 'failed_when', 'check_mode', 'delay',
'retries', 'until', 'su', 'su_user', 'su_pass', 'no_log', 'run_once',
'become', 'become_user', 'become_method', FILENAME_KEY,
]
BLOCK_NAME_TO_ACTION_TYPE_MAP = {
'tasks': 'task',
'handlers': 'handler',
'pre_tasks': 'task',
'post_tasks': 'task',
'block': 'meta',
'rescue': 'meta',
'always': 'meta',
}
def load_plugins(directory):
result = []
for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')):
pluginname = os.path.basename(pluginfile.replace('.py', ''))
spec = importlib.util.spec_from_file_location(pluginname, pluginfile)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
obj = getattr(module, pluginname)()
result.append(obj)
return result
def tokenize(line):
tokens = line.lstrip().split(" ")
if tokens[0] == '-':
tokens = tokens[1:]
if tokens[0] == 'action:' or tokens[0] == 'local_action:':
tokens = tokens[1:]
command = tokens[0].replace(":", "")
args = list()
kwargs = dict()
nonkvfound = False
for arg in tokens[1:]:
if "=" in arg and not nonkvfound:
kv = arg.split("=", 1)
kwargs[kv[0]] = kv[1]
else:
nonkvfound = True
args.append(arg)
return (command, args, kwargs)
def _playbook_items(pb_data):
if isinstance(pb_data, dict):
return pb_data.items()
elif not pb_data:
return []
else:
return [item for play in pb_data for item in play.items()]
def find_children(playbook, playbook_dir):
if not os.path.exists(playbook[0]):
return []
if playbook[1] == 'role':
playbook_ds = {'roles': [{'role': playbook[0]}]}
else:
try:
playbook_ds = parse_yaml_from_file(playbook[0])
except AnsibleError as e:
raise SystemExit(str(e))
results = []
basedir = os.path.dirname(playbook[0])
items = _playbook_items(playbook_ds)
for item in items:
for child in play_children(basedir, item, playbook[1], playbook_dir):
if "$" in child['path'] or "{{" in child['path']:
continue
valid_tokens = list()
for token in split_args(child['path']):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
results.append({
'path': path_dwim(basedir, path),
'type': child['type']
})
return results
def template(basedir, value, vars, fail_on_undefined=False, **kwargs):
try:
value = ansible_template(os.path.abspath(basedir), value, vars,
**dict(kwargs, fail_on_undefined=fail_on_undefined))
# Hack to skip the following exception when using to_json filter on a variable.
# I guess the filter doesn't like empty vars...
except (AnsibleError, ValueError, RepresenterError):
# templating failed, so just keep value as is.
pass
return value
def play_children(basedir, item, parent_type, playbook_dir):
delegate_map = {
'tasks': _taskshandlers_children,
'pre_tasks': _taskshandlers_children,
'post_tasks': _taskshandlers_children,
'block': _taskshandlers_children,
'include': _include_children,
'import_playbook': _include_children,
'roles': _roles_children,
'dependencies': _roles_children,
'handlers': _taskshandlers_children,
'include_tasks': _include_children,
'import_tasks': _include_children,
}
(k, v) = item
play_library = os.path.join(os.path.abspath(basedir), 'library')
_load_library_if_exists(play_library)
if k in delegate_map:
if v:
v = template(os.path.abspath(basedir),
v,
dict(playbook_dir=PLAYBOOK_DIR or os.path.abspath(basedir)),
fail_on_undefined=False)
return delegate_map[k](basedir, k, v, parent_type)
return []
def _include_children(basedir, k, v, parent_type):
# handle include: filename.yml tags=blah
(command, args, kwargs) = tokenize("{0}: {1}".format(k, v))
result = path_dwim(basedir, args[0])
if not os.path.exists(result) and not basedir.endswith('tasks'):
result = path_dwim(os.path.join(basedir, '..', 'tasks'), v)
return [{'path': result, 'type': parent_type}]
def _taskshandlers_children(basedir, k, v, parent_type):
results = []
for th in v:
if 'include' in th:
append_children(th['include'], basedir, k, parent_type, results)
elif 'include_tasks' in th:
append_children(th['include_tasks'], basedir, k, parent_type, results)
elif 'import_playbook' in th:
append_children(th['import_playbook'], basedir, k, parent_type, results)
elif 'import_tasks' in th:
append_children(th['import_tasks'], basedir, k, parent_type, results)
elif 'include_role' in th or 'import_role' in th:
th = normalize_task_v2(th)
module = th['action']['__ansible_module__']
if "name" not in th['action']:
raise RuntimeError(
"Failed to find required 'name' key in %s" % module)
if not isinstance(th['action']["name"], str):
raise RuntimeError(
"Value assigned to 'name' key on '%s' is not a string." %
module)
results.extend(_roles_children(basedir, k, [th['action'].get("name")],
parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'block' in th:
results.extend(_taskshandlers_children(basedir, k, th['block'], parent_type))
if 'rescue' in th:
results.extend(_taskshandlers_children(basedir, k, th['rescue'], parent_type))
if 'always' in th:
results.extend(_taskshandlers_children(basedir, k, th['always'], parent_type))
return results
def append_children(taskhandler, basedir, k, parent_type, results):
# when taskshandlers_children is called for playbooks, the
# actual type of the included tasks is the section containing the
# include, e.g. tasks, pre_tasks, or handlers.
if parent_type == 'playbook':
playbook_section = k
else:
playbook_section = parent_type
results.append({
'path': path_dwim(basedir, taskhandler),
'type': playbook_section
})
def _roles_children(basedir, k, v, parent_type, main='main'):
results = []
for role in v:
if isinstance(role, dict):
if 'role' in role or 'name' in role:
if 'tags' not in role or 'skip_ansible_lint' not in role['tags']:
results.extend(_look_for_role_files(basedir,
role.get('role', role.get('name')),
main=main))
elif k != 'dependencies':
raise SystemExit('role dict {0} does not contain a "role" '
'or "name" key'.format(role))
else:
results.extend(_look_for_role_files(basedir, role, main=main))
return results
def _load_library_if_exists(path):
if os.path.exists(path):
module_loader.add_directory(path)
def _rolepath(basedir, role):
role_path = None
possible_paths = [
# if included from a playbook
path_dwim(basedir, os.path.join('roles', role)),
path_dwim(basedir, role),
# if included from roles/[role]/meta/main.yml
path_dwim(
basedir, os.path.join('..', '..', '..', 'roles', role)
),
path_dwim(basedir, os.path.join('..', '..', role)),
]
if constants.DEFAULT_ROLES_PATH:
search_locations = constants.DEFAULT_ROLES_PATH
if isinstance(search_locations, str):
search_locations = search_locations.split(os.pathsep)
for loc in search_locations:
loc = os.path.expanduser(loc)
possible_paths.append(path_dwim(loc, role))
possible_paths.append(path_dwim(basedir, ''))
for path_option in possible_paths:
if os.path.isdir(path_option):
role_path = path_option
break
if role_path:
_load_library_if_exists(os.path.join(role_path, 'library'))
return role_path
def _look_for_role_files(basedir, role, main='main'):
role_path = _rolepath(basedir, role)
if not role_path:
return []
results = []
for th in ['tasks', 'handlers', 'meta']:
current_path = os.path.join(role_path, th)
for dir, subdirs, files in os.walk(current_path):
for file in files:
file_ignorecase = file.lower()
if file_ignorecase.endswith(('.yml', '.yaml')):
thpath = os.path.join(dir, file)
results.append({'path': thpath, 'type': th})
return results
def rolename(filepath):
idx = filepath.find('roles/')
if idx < 0:
return ''
role = filepath[idx + 6:]
role = role[:role.find('/')]
return role
def _kv_to_dict(v):
(command, args, kwargs) = tokenize(v)
return (dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs))
def normalize_task_v2(task):
"""Ensure tasks have an action key and strings are converted to python objects."""
result = dict()
mod_arg_parser = ModuleArgsParser(task)
try:
action, arguments, result['delegate_to'] = mod_arg_parser.parse()
except AnsibleParserError as e:
try:
task_info = "%s:%s" % (task[FILENAME_KEY], task[LINE_NUMBER_KEY])
del task[FILENAME_KEY]
del task[LINE_NUMBER_KEY]
except KeyError:
task_info = "Unknown"
pp = pprint.PrettyPrinter(indent=2)
task_pprint = pp.pformat(task)
_logger.critical("Couldn't parse task at %s (%s)\n%s", task_info, e.message, task_pprint)
raise SystemExit(ANSIBLE_FAILURE_RC)
# denormalize shell -> command conversion
if '_uses_shell' in arguments:
action = 'shell'
del(arguments['_uses_shell'])
for (k, v) in list(task.items()):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action:
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
continue
else:
result[k] = v
result['action'] = dict(__ansible_module__=action)
if '_raw_params' in arguments:
result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ')
del(arguments['_raw_params'])
else:
result['action']['__ansible_arguments__'] = list()
if 'argv' in arguments and not result['action']['__ansible_arguments__']:
result['action']['__ansible_arguments__'] = arguments['argv']
del(arguments['argv'])
result['action'].update(arguments)
return result
def normalize_task_v1(task):
result = dict()
for (k, v) in task.items():
if k in VALID_KEYS or k.startswith('with_'):
if k == 'local_action' or k == 'action':
if not isinstance(v, dict):
v = _kv_to_dict(v)
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
else:
result[k] = v
else:
if isinstance(v, str):
v = _kv_to_dict(k + ' ' + v)
elif not v:
v = dict(__ansible_module__=k)
else:
if isinstance(v, dict):
v.update(dict(__ansible_module__=k))
else:
if k == '__line__':
# Keep the line number stored
result[k] = v
continue
else:
# Tasks that include playbooks (rather than task files)
# can get here
# https://github.com/ansible/ansible-lint/issues/138
raise RuntimeError("Was not expecting value %s of type %s for key %s\n"
"Task: %s. Check the syntax of your playbook using "
"ansible-playbook --syntax-check" %
(str(v), type(v), k, str(task)))
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
if 'module' in result['action']:
# this happens when a task uses
# local_action:
# module: ec2
# etc...
result['action']['__ansible_module__'] = result['action']['module']
del(result['action']['module'])
if 'args' in result:
result['action'].update(result.get('args'))
del(result['args'])
return result
def normalize_task(task, filename):
ansible_action_type = task.get('__ansible_action_type__', 'task')
if '__ansible_action_type__' in task:
del(task['__ansible_action_type__'])
task = normalize_task_v2(task)
task[FILENAME_KEY] = filename
task['__ansible_action_type__'] = ansible_action_type
return task
def task_to_str(task):
name = task.get("name")
if name:
return name
action = task.get("action")
args = " ".join([u"{0}={1}".format(k, v) for (k, v) in action.items()
if k not in ["__ansible_module__", "__ansible_arguments__"]] +
action.get("__ansible_arguments__"))
return u"{0} {1}".format(action["__ansible_module__"], args)
def extract_from_list(blocks, candidates):
results = list()
for block in blocks:
for candidate in candidates:
if isinstance(block, dict) and candidate in block:
if isinstance(block[candidate], list):
results.extend(add_action_type(block[candidate], candidate))
elif block[candidate] is not None:
raise RuntimeError(
"Key '%s' defined, but bad value: '%s'" %
(candidate, str(block[candidate])))
return results
def add_action_type(actions, action_type):
results = list()
for action in actions:
action['__ansible_action_type__'] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type]
results.append(action)
return results
def get_action_tasks(yaml, file):
tasks = list()
if file['type'] in ['tasks', 'handlers']:
tasks = add_action_type(yaml, file['type'])
else:
tasks.extend(extract_from_list(yaml, ['tasks', 'handlers', 'pre_tasks', 'post_tasks']))
# Add sub-elements of block/rescue/always to tasks list
tasks.extend(extract_from_list(tasks, ['block', 'rescue', 'always']))
# Remove block/rescue/always elements from tasks list
block_rescue_always = ('block', 'rescue', 'always')
tasks[:] = [task for task in tasks if all(k not in task for k in block_rescue_always)]
return [task for task in tasks if
set(['include', 'include_tasks',
'import_playbook', 'import_tasks']).isdisjoint(task.keys())]
def get_normalized_tasks(yaml, file):
tasks = get_action_tasks(yaml, file)
res = []
for task in tasks:
# An empty `tags` block causes `None` to be returned if
# the `or []` is not present - `task.get('tags', [])`
# does not suffice.
if 'skip_ansible_lint' in (task.get('tags') or []):
# No need to normalize_task is we are skipping it.
continue
res.append(normalize_task(task, file['path']))
return res
def parse_yaml_linenumbers(data, filename):
"""Parse yaml as ansible.utils.parse_yaml but with linenumbers.
The line numbers are stored in each node's LINE_NUMBER_KEY key.
"""
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
if hasattr(node, '__line__'):
mapping[LINE_NUMBER_KEY] = node.__line__
else:
mapping[LINE_NUMBER_KEY] = mapping._line_number
mapping[FILENAME_KEY] = filename
return mapping
try:
import inspect
kwargs = {}
if 'vault_password' in inspect.getargspec(AnsibleLoader.__init__).args:
kwargs['vault_password'] = <PASSWORD>_VAULT_PASSWORD
loader = AnsibleLoader(data, **kwargs)
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e)))
return data
def get_first_cmd_arg(task):
try:
if 'cmd' in task['action']:
first_cmd_arg = task['action']['cmd'].split()[0]
else:
first_cmd_arg = task['action']['__ansible_arguments__'][0]
except IndexError:
return None
return first_cmd_arg
def normpath(path):
"""
Normalize a path in order to provide a more consistent output.
Currently it generates a relative path but in the future we may want to
make this user configurable.
"""
# convertion to string in order to allow receiving non string objects
return os.path.relpath(str(path))
def is_playbook(filename):
"""
Check if the file is a playbook.
Given a filename, it should return true if it looks like a playbook. The
function is not supposed to raise exceptions.
"""
# we assume is a playbook if we loaded a sequence of dictionaries where
# at least one of these keys is present:
playbooks_keys = {
"gather_facts",
"hosts",
"import_playbook",
"post_tasks",
"pre_tasks",
"roles"
"tasks",
}
# makes it work with Path objects by converting them to strings
if not isinstance(filename, str):
filename = str(filename)
try:
f = parse_yaml_from_file(filename)
except Exception as e:
_logger.warning(
"Failed to load %s with %s, assuming is not a playbook.",
filename, e)
else:
if (
isinstance(f, AnsibleSequence) and
hasattr(f, 'keys') and
playbooks_keys.intersection(next(iter(f), {}).keys())
):
return True
return False
def get_yaml_files(options):
"""Find all yaml files."""
# git is preferred as it also considers .gitignore
git_command = ['git', 'ls-files', '*.yaml', '*.yml']
_logger.info("Discovering files to lint: %s", ' '.join(git_command))
out = None
try:
out = subprocess.check_output(
git_command,
stderr=subprocess.STDOUT,
universal_newlines=True
).split()
except subprocess.CalledProcessError as exc:
_logger.warning(
"Failed to discover yaml files to lint using git: %s",
exc.output.rstrip('\n')
)
except FileNotFoundError as exc:
if options.verbosity:
_logger.warning(
"Failed to locate command: %s", exc
)
if out is None:
out = [
os.path.join(root, name)
for root, dirs, files in os.walk('.')
for name in files
if name.endswith('.yaml') or name.endswith('.yml')
]
return OrderedDict.fromkeys(sorted(out))
def get_playbooks_and_roles(options=None):
"""Find roles and playbooks."""
if options is None:
options = {}
files = get_yaml_files(options)
playbooks = []
role_dirs = []
role_internals = {
'defaults',
'files',
'handlers',
'meta',
'tasks',
'templates',
'vars',
}
# detect role in repository root:
if 'tasks/main.yml' in files or 'tasks/main.yaml' in files:
role_dirs.append('.')
for p in map(Path, files):
try:
for file_path in options.exclude_paths:
if str(p.resolve()).startswith(str(file_path)):
raise FileNotFoundError(
f'File {file_path} matched exclusion entry: {p}')
except FileNotFoundError as e:
_logger.debug('Ignored %s due to: %s', p, e)
continue
if (next((i for i in p.parts if i.endswith('playbooks')), None) or
'playbook' in p.parts[-1]):
playbooks.append(normpath(p))
continue
# ignore if any folder ends with _vars
if next((i for i in p.parts if i.endswith('_vars')), None):
continue
elif 'roles' in p.parts or '.' in role_dirs:
if 'tasks' in p.parts and p.parts[-1] in ['main.yaml', 'main.yml']:
role_dirs.append(str(p.parents[1]))
elif role_internals.intersection(p.parts):
continue
elif 'tests' in p.parts:
playbooks.append(normpath(p))
if 'molecule' in p.parts:
if p.parts[-1] != 'molecule.yml':
playbooks.append(normpath(p))
continue
# hidden files are clearly not playbooks, likely config files.
if p.parts[-1].startswith('.'):
continue
if is_playbook(p):
playbooks.append(normpath(p))
continue
_logger.info('Unknown file type: %s', normpath(p))
_logger.info('Found roles: %s', ' '.join(role_dirs))
_logger.info('Found playbooks: %s', ' '.join(playbooks))
return role_dirs + playbooks
def expand_path_vars(path):
"""Expand the environment or ~ variables in a path string."""
path = path.strip()
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def expand_paths_vars(paths):
"""Expand the environment or ~ variables in a list."""
paths = [expand_path_vars(p) for p in paths]
return paths
``` |
{
"source": "AakashGfude/MyST-Parser",
"score": 2
} |
#### File: tests/test_html/test_html_to_nodes.py
```python
from pathlib import Path
from unittest.mock import Mock
import pytest
from docutils import nodes
from markdown_it.utils import read_fixture_file
from myst_parser.html_to_nodes import html_to_nodes
FIXTURE_PATH = Path(__file__).parent
@pytest.fixture()
def mock_renderer():
def _run_directive(name: str, first_line: str, content: str, position: int):
node = nodes.Element(name=name, first=first_line, position=position)
node += nodes.Text(content)
return [node]
return Mock(
config={"myst_extensions": ["html_image", "html_admonition"]},
document={"source": "source"},
reporter=Mock(
warning=Mock(return_value=nodes.system_message("warning")),
error=Mock(return_value=nodes.system_message("error")),
),
run_directive=_run_directive,
)
@pytest.mark.parametrize(
"line,title,text,expected",
read_fixture_file(FIXTURE_PATH / "html_to_nodes.md"),
ids=[
f"{i[0]}-{i[1]}" for i in read_fixture_file(FIXTURE_PATH / "html_to_nodes.md")
],
)
def test_html_to_nodes(line, title, text, expected, mock_renderer):
output = nodes.container()
output += html_to_nodes(text, line_number=0, renderer=mock_renderer)
try:
assert output.pformat().rstrip() == expected.rstrip()
except AssertionError:
print(output.pformat())
raise
``` |
{
"source": "AakashGupta1993/P4-Advanced-Lane_lines",
"score": 4
} |
#### File: P4-Advanced-Lane_lines/Submit/Final.py
```python
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'qt')
#get_ipython().run_line_magic('matplotlib', 'inline')
from mpl_toolkits.axes_grid1 import ImageGrid
# preparing the object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# List of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
i = 0
#fig = plt.figure(1, (30, 30))
'''grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(6, 3), # creates 6x3 grid of axes
axes_pad=0.2, # pad between axes in inch.
)'''
print("Total images :" , len(images))
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (9,6), corners, ret)
i = i +1
plt.show()
print("Done")
# Now I have objpoints and imgpoints needed for camera calibration. I can thus calculate distortion coefficients, and test undistortion on an image!
# In[2]:
import pickle
#get_ipython().run_line_magic('matplotlib', 'inline')
# Undistortion on an image
img = cv2.imread('camera_cal/test_image.jpg')
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
cv2.imwrite('camera_cal/test_undist11.jpg',dst)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump( dist_pickle, open( "camera_cal/wide_dist_pickle.p", "wb" ) )
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Visualize undistortion
'''f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)'''
# ### The image was distorted successfully
# ## Perspective transform
#
# Why are we using perspective transform? The reason behid using the perspective transform is that when we see the lane lines from the normal camera view we see them converging like a triangle, whereas they are actually not converging. Also by using bird-eye-view of the same image helps us get the better visualization of the line in terms of turns etc. So here I am performing the perspective transfrom to bird-eye-view or the top-view of the lane lines.
#
# I have two main functions defined in the bloack below.
#
# The **get_src_destination_points_perspective_transform** has 3 sets of src and destination points. Why 3 sets ? Isn't one enough? One is sufficient but during the project I tried various set so I have put them in a function to testing which gives better results.
#
# The **corners_unwarp** function is used to undistort the image and give a top-view of the image. For this it takes various parameters as input like camera matrix(calculated using chessboard images), souce and destination points , and distortion coefficients etc.
# In[3]:
#import pickle
#import cv2
#import numpy as np
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
def get_src_destination_points_perspective_transform(index):
if index == 1 :
#set 1
offset=10
src_bottom_left = [260,680]
src_bottom_right = [1040,680]
src_top_left = [581,460]
src_top_right = [700,460]
destination_bottom_left = [100,700]
destination_bottom_right = [1000,700]
destination_top_left = [100,50]
destination_top_right = [1000,50]
src = np.float32([[src_top_left,src_top_right,src_bottom_right,src_bottom_left]])
dst_points = np.float32([[destination_top_left,destination_top_right,destination_bottom_right,destination_bottom_left]])
elif index == 2 :
#set 2
src = np.float32([[(200, 720), (570, 470), (720, 470), (1130, 720)]])
dst_points = np.float32([[(350, 720), (350, 0), (980, 0), (980, 720)]])
elif index == 3 :
#set 3
offset_x = 400 # offset for dst points
offset_y = 50
src = np.float32([[610,440], [670, 440], [1040, 680], [260, 680]])
dst_points = np.float32([[offset_x, offset_y], [img_size[0]-offset_x, offset_y],
[img_size[0]-offset_x, img_size[1]-offset_y],
[offset_x, img_size[1]-offset_y]])
else :
src = np.float32([[(200, 720), (570, 470), (720, 470), (1130, 720)]])
dst_points = np.float32([[(350, 720), (350, 0), (980, 0), (980, 720)]])
return src, dst_points
def carmera_parameters():
# Read in the saved camera matrix and distortion coefficients
# These are the arrays calculated using cv2.calibrateCamera()
dist_pickle = pickle.load( open( "camera_cal/wide_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
return mtx,dist
def corners_unwarp(img, src, dst_points, print_statements=False):
mtx,dist_coeff = carmera_parameters()
#Undistort using mtx and dist.
undis = cv2.undistort(img, mtx, dist, None, mtx)
if print_statements == True :
print('Calibration matrix :' , mtx)
print('Image shape : ',img.shape)
M = cv2.getPerspectiveTransform(src, dst_points)
warped = cv2.warpPerspective(undis, M, (img.shape[1],img.shape[0]), flags=cv2.INTER_LINEAR)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
dist_pickle["M"] = M
pickle.dump( dist_pickle, open( "camera_cal/wide_dist_pickle.p", "wb" ) )
return warped, M, undis
print('Done')
# In[4]:
def print_undistorted_and_warped_images(image1, image2, text1= 'Original Image', text2 = 'Undistorted and Warped Image'):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image1)
ax1.set_title(text1, fontsize=50)
ax2.imshow(image2)
ax2.set_title(text2, fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
def print_4_images(img1,img2,img3,img4,txt1,txt2,txt3,txt4):
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(30, 30))
f.tight_layout()
ax1.imshow(img1,cmap='gray')
ax1.set_title(txt1, fontsize=50)
ax2.imshow(img2,cmap='gray')
ax2.set_title(txt2, fontsize=50)
ax3.imshow(img3,cmap='gray')
ax3.set_title(txt3, fontsize=50)
ax4.imshow(img4,cmap='gray')
ax4.set_title(txt4, fontsize=50)
def print_2_images (img1,img2,txt1='',txt2=''):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 30))
f.tight_layout()
ax1.imshow(img1,cmap='gray')
ax1.set_title(txt1, fontsize=50)
ax2.imshow(img2,cmap='gray')
ax2.set_title(txt2, fontsize=50)
# ## Color channels
# Printing various color channels for visualization to check which channel detects what.
#
# Printing BGR, HSV and HLS color channels.
# ### BGR Color channels
# In[5]:
def print_bgr_color_images(img) :
#grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#b,g,r image
b_img = img[:,:,0]
g_img = img[:,:,1]
r_img = img[:,:,2]
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(30, 30))
f.tight_layout()
ax1.imshow(gray,cmap='gray')
ax1.set_title('gray', fontsize=50)
ax2.imshow(b_img,cmap='gray')
ax2.set_title('b_img', fontsize=50)
ax3.imshow(g_img,cmap='gray')
ax3.set_title('g_img', fontsize=50)
ax4.imshow(r_img,cmap='gray')
ax4.set_title('r_img', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# ### HLS Color channels
# In[6]:
def print_hls_color_images(img) :
#RGB to HLS
hls_image = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
h_img = hls_image[:,:,0]
l_img = hls_image[:,:,1]
s_img = hls_image[:,:,2]
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(30, 30))
f.tight_layout()
ax1.imshow(hls_image)
ax1.set_title('hls_image', fontsize=50)
ax2.imshow(h_img,cmap='gray')
ax2.set_title('h_img', fontsize=50)
ax3.imshow(l_img,cmap='gray')
ax3.set_title('l_img', fontsize=50)
ax4.imshow(s_img,cmap='gray')
ax4.set_title('s_img', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# ## HSV Color Channels
# In[7]:
def print_hsv_color_images(img) :
#RGB to HLS
hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv_h_img = hsv_image[:,:,0]
hsv_s_img = hsv_image[:,:,1]
hsv_v_img = hsv_image[:,:,2]
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(30, 30))
f.tight_layout()
ax1.imshow(hsv_image)
ax1.set_title('hsv_image', fontsize=50)
ax2.imshow(hsv_h_img,cmap='gray')
ax2.set_title('hsv_h_img', fontsize=50)
ax3.imshow(hsv_s_img,cmap='gray')
ax3.set_title('hsv_s_img', fontsize=50)
ax4.imshow(hsv_v_img,cmap='gray')
ax4.set_title('hsv_v_img', fontsize=50)
# ## Threshold functions for sobel
#
# Defining various sobel threshold functions. These are visualized later to help in choosing the bast combination for lane lines detection.
# In[8]:
def abs_sobel_thresh(image, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Steps fro abs sobel
# 1) Converting to grayscale
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh_min = thresh[0]
thresh_max = thresh[1]
# 2) Taking the derivative in x or y given orient = 'x' or 'y'
if orient == 'x' :
sobelx = cv2.Sobel(image, cv2.CV_64F, 1, 0)
else :
sobely = cv2.Sobel(image, cv2.CV_64F, 0, 1)
# 3) Taking the absolute value of the derivative or gradient
if orient == 'x' :
absSobel = abs(sobelx)
else :
absSobel = abs(sobely)
# 4) Scaling to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*absSobel/(np.max(absSobel)))
# 5) Create a mask of 1's where the scaled gradient magnitude
masked_output = np.zeros_like(scaled_sobel)
masked_output [(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
binary_output = np.copy(masked_output)
return binary_output
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
# 1) Converting to grayscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# 2) Taking the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# 3) Calculating the magnitude
mag = np.sqrt(sobelx*sobelx+sobely*sobely)
# 4) Scaling to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*mag/(np.max(mag)))
# 5) Creating a binary mask where mag thresholds are met
masked_sobel = np.zeros_like(scaled_sobel)
masked_sobel[ (scaled_sobel>=mag_thresh[0]) & (scaled_sobel<= mag_thresh[1]) ] = 1
binary_output = np.copy(masked_sobel)
return binary_output
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# 1) Converting to grayscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# 2) Taking the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Taking the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Using np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
direction_gradient = np.arctan2(abs_sobely,abs_sobelx)
# 5) Creating a binary mask where direction thresholds are met
masked_sobel = np.zeros_like(direction_gradient)
masked_sobel[ (direction_gradient >= thresh[0]) & (direction_gradient <= thresh[1]) ] = 1
# 6) Return this mask as masked_sobel image
return masked_sobel
# ## Threshold for color spaces
# After visualizing the color channel results, I have taken decided to use red binary, green binary and s binary from HLS color space.
# In[9]:
def red_threshold(bird_eye_view_image, thresh = (200,255)):
r_img = bird_eye_view_image[:,:,2]
binary_red = np.zeros_like(r_img)
binary_red[(r_img > thresh[0]) & (r_img <= thresh[1])] = 1
return binary_red
def green_threshold(bird_eye_view_image, thresh = (200,255)):
g_img = bird_eye_view_image[:,:,1]
binary_green = np.zeros_like(g_img)
binary_green[(g_img > thresh[0]) & (g_img <= thresh[1])] = 1
return binary_green
def hls_s_threshold(bird_eye_view_image, thresh = (90, 255)):
hls_image = cv2.cvtColor(bird_eye_view_image, cv2.COLOR_BGR2HLS)
s_img = hls_image[:,:,2]
binary_s_hls = np.zeros_like(s_img)
binary_s_hls[(s_img > thresh[0]) & (s_img <= thresh[1])] = 1
return binary_s_hls
# ## Process the image - test block (Can be ignored as used for testing purposes)
#
# This block is the testing block to check the pipeline process to get a good binary image after appling sobel and color binaries.
# In[10]:
nx = 9
ny = 6
img = cv2.imread('test_images/test4.jpg')
ksize = 3
bird_eye_view_image = []
def process_the_image(img):
#img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
src, dst_points = get_src_destination_points_perspective_transform(index=1)
mtx,dist_coeff = carmera_parameters()
top_down, perspective_M, undist = corners_unwarp(img, src, dst_points, print_statements=False)
#print_undistorted_and_warped_images(cv2.cvtColor(img,cv2.COLOR_BGR2RGB),cv2.cvtColor(top_down,cv2.COLOR_BGR2RGB))
#print_2_images(cv2.cvtColor(img,cv2.COLOR_BGR2RGB),cv2.cvtColor(top_down,cv2.COLOR_BGR2RGB), 'Original Image', 'Undistorted and Warped Image')
cv2.imwrite('test_images/bird_eye_test5.jpg',top_down)
bird_eye_view_image = np.copy(top_down)
#print_bgr_color_images(bird_eye_view_image)
#print_hls_color_images(bird_eye_view_image)
#print_hsv_color_images(bird_eye_view_image)
gradx = abs_sobel_thresh(bird_eye_view_image, orient='x', sobel_kernel=ksize, thresh=(20, 100))
grady = abs_sobel_thresh(bird_eye_view_image, orient='y', sobel_kernel=ksize, thresh=(20, 100))
mag_binary = mag_thresh(bird_eye_view_image, sobel_kernel=ksize, mag_thresh=(20, 100))
dir_binary = dir_threshold(bird_eye_view_image, sobel_kernel=ksize, thresh=(0, 0.2))
#print_4_images(gradx,grady,mag_binary,dir_binary,'Gradient x','Gradient y','mag_binary','dir_binary')
#Combined threshols sobel
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
#==========================================================#
combined_2 = np.zeros_like(dir_binary)
combined_2[((gradx == 1) & (grady == 1)) | ((mag_binary == 1))] = 1
#==========================================================#
#print_2_images(combined,combined_2,'Combined 1','Combined 2')
#==========================================================#
#Color thresholding
#=======================================================#
r_img = bird_eye_view_image[:,:,2]
thresh = (200, 255)
binary_red = np.zeros_like(r_img)
binary_red[(r_img > thresh[0]) & (r_img <= thresh[1])] = 1
#=======================================================#
#=======================================================#
hls_image = cv2.cvtColor(bird_eye_view_image, cv2.COLOR_BGR2HLS)
s_img = hls_image[:,:,2]
thresh = (90, 255)
binary_s_hls = np.zeros_like(s_img)
binary_s_hls[(s_img > thresh[0]) & (s_img <= thresh[1])] = 1
#=======================================================#
#print_2_images(binary_red,binary_s_hls,'binary_red','binary_s_hls')
#==========================================================#
combined_try1 = np.zeros_like(mag_binary)
combined_try1[((gradx == 1) & (grady == 1)) | ((mag_binary == 1)) | (binary_red==1) & (binary_s_hls==1)] = 1
#plt.figure()
#plt.imshow(combined_try1,cmap='gray')
return combined_try1, bird_eye_view_image, undist
processed_image, bird_eye_view_image, undist = process_the_image(img)
# ## Histogram and Minv
#
# Here I am getting histogram of the comibned binary image. The histogram helps us find the lane lines by detecting the starting points of the lane lines. After detecting the initial points I have used sliding window method to get the other points of lane lines.
#
#
# Minv is needed to change the detected driving portion back to overlap with original image
# In[11]:
def get_histogram(combined_binay, print_statements=False):
histogram = np.sum(combined_binay[combined_binay.shape[0]//2:,:], axis=0)
if print_statements==True:
plt.plot(histogram,linewidth=2.5,color='yellow')
return histogram
def get_Minv(src,dst_points):
Minv = cv2.getPerspectiveTransform(dst_points, src)
return Minv
# ## Pipeline for the
# Get the starting point of the lane lines using the histogram. Get the lane line points using sliding window and then build the polynomial. Once we get the starting point of the lane lines and the polynomial function, we can skip the sliding window part for the next frame until we encounter a bad frame.
#
# This skipping the sliding window part saves a lot of computation. It also shows the polynomial on the lane lines. However the output is shown when the function is called and not after this block.
#
# Using count variable to check when to use sliding window and when to skip. Use the sliding window in the first frame and then use the previous polynomial for the next frame. If somehow there is error or less ponits are detected in the polynomial for the next frame, shift to sliding window.
#
# In[12]:
def lane_line(count,left_fitx, left_fit, right_fitx, right_fit, Minv, bird_eye_view_image, undist, histogram, processed_image, print_image_statment=False) :
#print(count)
#using count to check when to use sliding window and when to skip.
if count==0 :
# Create an output image to draw on and visualize the result
out_img = np.dstack((processed_image, processed_image, processed_image))*255
if print_image_statment == True :
plt.imshow(out_img)
print("out_img :", out_img.shape)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(processed_image.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = processed_image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = processed_image.shape[0] - (window+1)*window_height
win_y_high = processed_image.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if print_image_statment == True :
print(leftx_current)
print(rightx_current)
print(left_lane_inds)
print(right_lane_inds)
print(len(left_lane_inds))
print(len(right_lane_inds))
#=====================================================#
ploty = np.linspace(0, processed_image.shape[0]-1, processed_image.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
if print_image_statment == True :
plt.figure()
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
result = out_img
else :
#===================================================#
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = processed_image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, processed_image.shape[0]-1, processed_image.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
#=======================================================#
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((processed_image, processed_image, processed_image))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
if print_image_statment == True :
plt.figure()
plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
return left_fitx, left_fit, leftx, right_fitx, right_fit, rightx, ploty, result
#left_fitx, left_fit, right_fit = lane_line(count, left_fitx, left_fit, right_fit, Minv, bird_eye_view_image, undist)
#count= count+1
# ## Unwarp and fill the polygon
#
# Here we pass the x and y arrays for left lane and right lane, plot the polygon, and then do a perspective transform to plot it on the original image.
# In[13]:
def unwarp_and_fillPoly(image, combined_binary, left_fitx, right_fitx, ploty, Minv, undist, print_image_statments=False) :
# Create an image to draw the lines on
warp_zero = np.zeros_like(combined_binary).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
if print_image_statments == True :
print("color warp shape", color_warp.shape)
print("warp zero shape", warp_zero.shape)
print(type(color_warp))
#plt.imshow(color_warp)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
if print_image_statments == True :
plt.figure()
plt.imshow(result)
return result
# ## Radius of curvature and center offset
# Finding the radius of curvature and center offset here. Offset is difference of center of image with center of the lane lines.
# In[14]:
def set_raduis_curvature(ploty , left_fitx, right_fitx, final_image) :
y_eval = np.max(ploty)
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(final_image, 'Radius of Curvature left curved : '+ str(left_curverad)[:6]+ 'm', (20,40), font, 1, (255,255,255), 2)
cv2.putText(final_image, 'Radius of Curvature right curved : '+ str(right_curverad)[:6]+ 'm', (20,80), font, 1, (255,255,255), 2)
return final_image
def set_center(left_line_x, right_line_x,final_image) :
#y_max = np.max(ploty)
offset = 640 - (right_line_x + left_line_x)/2
offset_meter = offset*3.7/700
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(final_image, 'Center '+ str(offset_meter)[:6]+ 'm', (20,120), font, 1, (255,255,255), 2)
return final_image
# ### Convert 1-D array to 3-D for stacking images together
# In[15]:
def binary_to_stack_fit(image) :
return (np.dstack((image, image, image))*255).astype(np.uint8)
# ## Process the image to get the final output
# This is the main function from where all the functions are called to generate the output. I am using various global variables to keep track of variables from 1st frame.
#
# ## Output images:
#
# In[25]:
import cv2
img = cv2.imread('test_images/test4.jpg')
#img = cv2.imread('testing/test_img_3_20.jpg')
count = 0
left_fitx = 0
right_fitx = 0
left_fit = 0
right_fit = 0
ploty = 0
leftx = 0
rightx = 0
def process_image_for_video(img) :
global count
print_logs = False
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.imwrite('data/data_road/training/image_2/um_00000'+str(count)+'.jpg',img)
global left_fitx
global left_fit
global right_fitx
global right_fit
global ploty
global leftx
global rightx
src, dst_points = get_src_destination_points_perspective_transform(index=1)
top_down, perspective_M, undist = corners_unwarp(img, src, dst_points, print_statements=False)
bird_eye_view_image = np.copy(top_down)
gradx = abs_sobel_thresh(bird_eye_view_image, orient='x', sobel_kernel=ksize, thresh=(20, 100))
grady = abs_sobel_thresh(bird_eye_view_image, orient='y', sobel_kernel=ksize, thresh=(20, 100))
mag_binary = mag_thresh(bird_eye_view_image, sobel_kernel=ksize, mag_thresh=(20, 100))
dir_binary = dir_threshold(bird_eye_view_image, sobel_kernel=ksize, thresh=(0, 0.2))
r_binary = red_threshold(bird_eye_view_image, thresh = (200, 255))#red binary from cv2.imread(), bgr
g_binary = green_threshold(bird_eye_view_image, thresh = (200, 255))
hls_s_binary = hls_s_threshold(bird_eye_view_image, thresh = (90, 255))#s binary from cv2.imread(), hls
combined_binary = np.zeros_like(mag_binary)
#combined_binary[((gradx == 1) & (grady == 1)) | ((mag_binary == 1)) | (r_binary==1) & (hls_s_binary==1)] = 1
combined_binary[((gradx == 1) & (grady == 1))| ((g_binary == 1)) | ((mag_binary == 1)) | (r_binary==1) & (hls_s_binary==1)] = 1
histogram = get_histogram(combined_binary, print_statements=False)
Minv = get_Minv(src,dst_points)
left_fitx, left_fit, leftx, right_fitx, right_fit, rightx, ploty, image_search_window = lane_line(count,left_fitx, left_fit, right_fitx, right_fit, Minv, bird_eye_view_image, undist, histogram,
combined_binary, print_image_statment=False)
final_image = unwarp_and_fillPoly(img, combined_binary, left_fitx, right_fitx, ploty,
Minv, undist, print_image_statments=False)
#count = count
#print('Count in updation' , count)
if print_logs == True :
#print_undistorted_and_warped_images(cv2.cvtColor(img,cv2.COLOR_BGR2RGB),cv2.cvtColor(top_down,cv2.COLOR_BGR2RGB))
print_2_images(img,top_down, 'Original Image', 'Undistorted and Warped Image')
print_bgr_color_images(bird_eye_view_image)
print_hls_color_images(bird_eye_view_image)
print_hsv_color_images(bird_eye_view_image)
print_4_images(gradx,grady,mag_binary,dir_binary,'Gradient x','Gradient y','mag_binary','dir_binary')
print_2_images(r_binary,hls_s_binary,'binary_red','binary_s_hls')
plt.figure()
plt.imshow(combined_binary,cmap='gray')
cv2.imwrite('data/data_road/training/gt_image_2/um_lane_00000'+str(count)+'.jpg',final_image)
final_image = set_raduis_curvature(ploty, left_fitx, right_fitx, final_image)
final_image = set_center(left_fitx[719],right_fitx[719],final_image)
final_image = cv2.cvtColor(final_image,cv2.COLOR_RGB2BGR)
bird_eye_view_image = cv2.cvtColor(bird_eye_view_image,cv2.COLOR_RGB2BGR)
combined_binary_stack_fit = binary_to_stack_fit(combined_binary)
stack_image1 = np.hstack((final_image,image_search_window))
stack_image2 = np.hstack((combined_binary_stack_fit,bird_eye_view_image))
stack_image3 = np.vstack((stack_image1,stack_image2))
count = count + 1
print(count)
return stack_image3
#final_image = process_image_for_video(img)
#plt.figure()
#final_image = cv2.cvtColor(final_image,cv2.COLOR_BGR2RGB)
#plt.imshow(final_image)
# ## Video pipeline
# It calls the image pipeline by passing each frame and then appends each frame. Global variables are initialized here.
# ## Choose a method for processing image
# ### Method 1:process_image_for_video
# * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# * Apply a distortion correction to raw images.
# * ** Apply a perspective transform to raw images ("birds-eye view").**
# * ** Use color transforms, gradients, etc., to create a thresholded binary image from bird-eye view.**
# * Detect lane pixels and fit to find the lane boundary.
# * Determine the curvature of the lane and vehicle position with respect to center.
# * Warp the detected lane boundaries back onto the original image.
# * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
#
# ### Method 2: process_image_for_video2
# * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# * **Apply a distortion correction to raw images.**
# * **Use color transforms, gradients, etc., to create a thresholded binary image.**
# * Apply a perspective transform to rectify binary image ("birds-eye view").
# * Detect lane pixels and fit to find the lane boundary.
# * Determine the curvature of the lane and vehicle position with respect to center.
# * Warp the detected lane boundaries back onto the original image.
# * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
# In[26]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
white_output = 'project_video_full8.mp4'
count = 0
left_fitx = 0
right_fitx = 0
left_fit = 0
right_fit = 0
ploty = 0
clip1 = VideoFileClip("project_video.mp4")#.subclip(0,20)
white_clip = clip1.fl_image(process_image_for_video) #NOTE: this function expects color images!!
#get_ipython().run_line_magic('time', 'white_clip.write_videofile(white_output, audio=False)')
# ## Output of the video
# In[ ]:
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
# ## Problems
#
# There are many problems that I am facing. Some of them are :
#
# 1) It is not able to handle challenge video. It requires averaging.
# 2) No sanity checks have been done. Sanity checks need to be done to start searching using sliding window.
# 3) I want to apply thresholds first and then take the top-down view.
#
# These are some of the problems. I will try to work on them so that project looks good.
# In[ ]:
import cv2
img = cv2.imread('test_images/test4.jpg')
#img = cv2.imread('testing/test_img_3_20.jpg')
count = 0
left_fitx = 0
right_fitx = 0
left_fit = 0
right_fit = 0
ploty = 0
leftx = 0
rightx = 0
def process_image_for_video2(img) :
global count
print_logs = False
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
#cv2.imwrite('data/data_road/training/image_2/um_00000'+str(count)+'.png',img)
global left_fitx
global left_fit
global right_fitx
global right_fit
global ploty
global leftx
global rightx
src, dst_points = get_src_destination_points_perspective_transform(index=1)
top_down1, perspective_M1, undist = corners_unwarp(img, src, dst_points, print_statements=False)
bird_eye_view_image = np.copy(img)
gradx = abs_sobel_thresh(bird_eye_view_image, orient='x', sobel_kernel=ksize, thresh=(20, 100))
grady = abs_sobel_thresh(bird_eye_view_image, orient='y', sobel_kernel=ksize, thresh=(20, 100))
mag_binary = mag_thresh(bird_eye_view_image, sobel_kernel=ksize, mag_thresh=(20, 100))
dir_binary = dir_threshold(bird_eye_view_image, sobel_kernel=ksize, thresh=(0, 0.2))
r_binary = red_threshold(bird_eye_view_image, thresh = (200, 255))#red binary from cv2.imread(), bgr
g_binary = green_threshold(bird_eye_view_image, thresh = (200, 255))
hls_s_binary = hls_s_threshold(bird_eye_view_image, thresh = (90, 255))#s binary from cv2.imread(), hls
combined_binary = np.zeros_like(mag_binary)
#combined_binary[((gradx == 1) & (grady == 1)) | ((mag_binary == 1)) | (r_binary==1) & (hls_s_binary==1)] = 1
combined_binary[((gradx == 1) & (grady == 1))| ((g_binary == 1)) | ((mag_binary == 1)) | (r_binary==1) & (hls_s_binary==1)] = 1
top_down, perspective_M, undist1 = corners_unwarp(combined_binary, src, dst_points, print_statements=False)
combined_binary = np.copy(top_down)
histogram = get_histogram(combined_binary, print_statements=False)
Minv = get_Minv(src,dst_points)
left_fitx, left_fit, leftx, right_fitx, right_fit, rightx, ploty, image_search_window = lane_line(count,left_fitx, left_fit, right_fitx, right_fit, Minv, bird_eye_view_image, undist, histogram,
combined_binary, print_image_statment=False)
final_image = unwarp_and_fillPoly(img, combined_binary, left_fitx, right_fitx, ploty,
Minv, undist, print_image_statments=False)
#count = count
#print('Count in updation' , count)
if print_logs == True :
#print_undistorted_and_warped_images(cv2.cvtColor(img,cv2.COLOR_BGR2RGB),cv2.cvtColor(top_down,cv2.COLOR_BGR2RGB))
print_2_images(img,top_down, 'Original Image', 'Undistorted and Warped Image')
print_bgr_color_images(bird_eye_view_image)
print_hls_color_images(bird_eye_view_image)
print_hsv_color_images(bird_eye_view_image)
print_4_images(gradx,grady,mag_binary,dir_binary,'Gradient x','Gradient y','mag_binary','dir_binary')
print_2_images(r_binary,hls_s_binary,'binary_red','binary_s_hls')
plt.figure()
plt.imshow(combined_binary,cmap='gray')
cv2.imwrite('data/data_road/training/gt_image_2/um_lane_00000'+str(count)+'.png',img)
final_image = set_raduis_curvature(ploty, left_fitx, right_fitx, final_image)
final_image = set_center(left_fitx[719],right_fitx[719],final_image)
final_image = cv2.cvtColor(final_image,cv2.COLOR_RGB2BGR)
bird_eye_view_image = cv2.cvtColor(bird_eye_view_image,cv2.COLOR_RGB2BGR)
combined_binary_stack_fit = binary_to_stack_fit(combined_binary)
stack_image1 = np.hstack((final_image,image_search_window))
stack_image2 = np.hstack((combined_binary_stack_fit,bird_eye_view_image))
stack_image3 = np.vstack((stack_image1,stack_image2))
count = count + 1
return stack_image3
#final_image = process_image_for_video(img)
#plt.figure()
#final_image = cv2.cvtColor(final_image,cv2.COLOR_BGR2RGB)
#plt.imshow(final_image)
``` |
{
"source": "aakashgusain1990/Text-Encryption",
"score": 2
} |
#### File: Text-Encryption/home/views.py
```python
from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.contrib import messages
from .models import AllData,ImgData
def home(request):
return render(request,'home.html')
def encrypt(request):
if request.method == 'POST':
allData=AllData()
allData.passkey = request.POST['passkey']
pwd = request.POST['passkey']
text = request.POST['enctext']
encryptext=""
j=0
for i in range(len(text)):
if j==len(pwd):
j=0
val = ((ord(text[i])-33)+(ord(pwd[j])-33))%89
val+=33
char = chr(val)
encryptext+=char
j+=1
allData.encrytext = encryptext
allData.save()
request.session['resp'] = encryptext
return render(request,'encrypted.html',{'enc':encryptext})
return render(request, 'encrypt.html')
def decrypt(request):
if request.method == 'POST':
try:
pwd = request.POST['pwdd']
text = request.POST['etext']
j=0
decryptext=""
q = AllData.objects.filter(passkey = request.POST["pwdd"], encrytext=request.POST["etext"]).first()
print(q)
for i in text:
if j == len(pwd):
j=0
val = ((ord(i)-33)-(ord(pwd[j])-33)+89)%89
val+=33
char = chr(val)
decryptext+=char
j+=1
print(decryptext)
return render(request,'decrypted.html',{'decryptext':decryptext})
except:
messages.warning(request, "INCORRECT PASSWORD OR TEXT")
return render(request, 'decrypt.html')
def encrypted(request):
q = AllData.objects.filter(passkey = "sds").first()
return render(request, 'encrypted.html', {'resp': q})
def decrypted(request):
return render(request, 'decrypted.html')
def imgencrypt(request):
if request.method=='POST':
key=request.POST['passkey']
image=request.FILES['image']
imgdata=ImgData()
imgdata.imgpasskey=key
imgdata.encryptimg=image
imgdata.save()
imgObj = ImgData.objects.filter(imgpasskey=key).last()
passwd=0
j=1
for i in key:
passwd+=j*(ord(i))
j+=1
key=(passwd)%256
print(imgObj.encryptimg.path)
fi=open(imgObj.encryptimg.path,'rb')
image=fi.read()
fi.close()
image=bytearray(image)
# print(image)
for index, values in enumerate(image):
image[index]=values^int(key)
fi1=open(imgObj.encryptimg.path,'wb')
fi1.write(image)
fi1.close()
return render(request, 'imgencrypted.html',{'imgdata':imgdata})
return render(request,'imgencrypt.html')
def imgdecrypt(request):
if request.method=='POST':
key=request.POST['passkey']
image=request.FILES['image']
imgObj = ImgData.objects.filter(imgpasskey=key).last()
imgObj.decryptimg=image
imgObj.save()
passwd=0
j=1
for i in key:
passwd+=j*(ord(i))
j+=1
key=(passwd)%256
print(imgObj.decryptimg.path)
fi=open(imgObj.decryptimg.path,'rb')
image=fi.read()
fi.close()
image=bytearray(image)
# print(image)
for index, values in enumerate(image):
image[index]=values^int(key)
fi1=open(imgObj.decryptimg.path,'wb')
fi1.write(image)
fi1.close()
return render(request, 'imgdecrypted.html',{'imgdata':imgObj})
return render(request,'imgdecrypt.html')
def imgdecrypted(request):
return render(request,'imgdecrypted.html')
def imgencrypted(request):
return render(request,'imgencrypted.html')
``` |
{
"source": "aakashhemadri/courses",
"score": 4
} |
#### File: 15XW23 - Data Structures and Algorithms/dijkstra/dijkstra.py
```python
import sys
import numpy
class dijkstra:
"""Dijkstra's algorithm"""
def __init__(self, data=[numpy.zeros((2,2),numpy.int8),1,1,'A']):
self.graph = data[0]
self.n_vertices = data[1]
self.n_edges = data[2]
self.source = data[3]
self.dist = [sys.maxsize] * self.n_vertices
self.dist[ 65 - ord(self.source)] = 0
self.spanning_tree = [False] * self.n_vertices
#Auto assign each vertice as an incremental Alphabet
self.vertices = []
for iter in range(self.n_vertices):
self.vertices.append(chr(65+iter))
def min_distance(self):
min = sys.maxsize
min_index = 0
for vertex in range(self.n_vertices):
if(self.dist[vertex] < min and self.spanning_tree[vertex] == False):
min = self.dist[vertex]
min_index = vertex
return min_index
def algorithm(self):
for iterator in range(self.n_vertices):
u = self.min_distance()
self.spanning_tree[u] = True
for v in range(self.n_vertices):
if self.graph[u][v] > 0 and self.spanning_tree[v] == False and self.dist[v] > self.dist[u] + self.graph[u][v]:
self.dist[v] = self.dist[u] + self.graph[u][v]
def print_dist(self):
"""Print's distance from Source"""
print("[Vertex]\t-\t[Distance from Source]")
for iterator in range(self.n_vertices):
print(self.vertices[iterator],"\t\t-\t\t",self.dist[iterator])
def print_member(self):
"""Print function"""
print(self.graph)
print(self.n_vertices)
print(self.n_edges)
print(self.source)
print(self.vertices)
def test():
pass
if __name__ == "__main__":
test()
``` |
{
"source": "aakashhemadri/mc-serve",
"score": 3
} |
#### File: mc-serve/mcserve/serve.py
```python
class MCServe(object):
def __init__(self, config=None, args=None):
self.config = config
self.args = args
def run(self):
print("Initializing mc-serve")
if self.args.action == 'start':
self.start()
elif self.args.action == 'stop':
self.stop()
elif self.args.action == 'stop':
self.logs()
elif self.args.action == 'stop':
self.execute()
print("bye bye")
def execute(self):
print("Executing command on minecraft-server instance")
def start(self):
print("Starting minecraft-server instance")
def stop(self):
print("Stopping minecraft-server instance")
def logs(self):
print("Following logs of the minecraft-server instance")
```
#### File: aakashhemadri/mc-serve/setup.py
```python
import setuptools
import sys
import os
import re
if sys.version_info < (3, 0):
print('MCServe requires at least Python 3 to run.')
sys.exit(1)
PY3 = sys.version_info[0] == 3
# Global functions
##################
with open(os.path.join('mcserve', '__init__.py'), encoding='utf-8') as f:
version = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M).group(1)
if not version:
raise RuntimeError('Cannot find MCServe version information.')
with open("README.md", "r") as fh:
long_description = fh.read()
def get_data_files():
data_files = [
('share/doc/compose-templates', ['docs/compose-templates/bukkit/bukkit-docker-compose.yml', 'docs/compose-templates/paper/paper-docker-compose.yml', 'docs/compose-templates/vanilla/hardcore-docker-compose.yml'] ),
( 'share/doc/mcserve',['AUTHORS.md', 'README.md','CONTRIBUTING.md', 'conf/mcserve.conf']),
('share/man/man1', ['docs/man/mcserve.1'])
]
return data_files
setuptools.setup(
name="mcserve",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="docker-minecraft-server manager.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/aakashhemadri/mcserve",
packages=setuptools.find_packages(),
data_files=get_data_files(),
entry_points={"console_scripts": ['mcserve=mcserve:main']},
classifiers=[
'Environment :: Console :: Curses',
'Intended Audience :: End Users/Desktop',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3',
)
``` |
{
"source": "aakashhemadri/python",
"score": 3
} |
#### File: python/tcp_finite_state/client.py
```python
from tcp_statemachine import TCPStateMachine, Data
import socket
class Client:
"""
Client class
"""
def __init__(self, statemachine, data):
self.ME = 'client'
self.HOST = '127.0.0.1'
self.PORT = 22085
self.ADDRESS = (self.HOST, self.PORT)
self.client_socket = socket.socket()
def run(self):
self.client_socket.connect(self.ADDRESS)
self.statemachine.send_syn(self.client_socket, self.statemachine)
self.statemachine.conn_estb_client()
data.receive(self.client_socket, statemachine)
while statemachine.is_established:
data.send(self.client_socket, statemachine)
data.receive(self.client_socket, statemachine)
self.client_socket.close()
def whoami(self):
return self.ME
def main():
tcp_fsm = TCPFiniteStateMachine()
data = Data(12345)
client = Client(tcp_fsm, data)
if __name__ == '__main__':
main()
```
#### File: python/validate_ip/class_identification.py
```python
from validate import validate,request
def identify(ip):
if(ip == False):
return False
else:
#ip = '.'.join(ip)
netid = format(int(ip[0]),'08b')
if(netid[:1] == '0'):
cls = 'A'
cls_ip = [255,0,0,0]
elif(netid[:2] == '10'):
cls = 'B'
cls_ip = [255,255,0,0]
elif(netid[:3] == '110'):
cls = 'C'
cls_ip = [255,255,255,0]
elif(netid[:4] == '1110'):
cls = 'D'
cls_ip = [255,255,255,255]
elif(netid[:4] == '1111'):
cls = 'E'
cls_ip = [255,255,255,255]
return (ip, cls, cls_ip)
def test():
print('Running Test..')
message = identify(validate(request()))
if(message != False):
print(message)
else:
print('Invalid IP Address! Try Again!')
if __name__=='__main__':
test()
``` |
{
"source": "AakashKath/location_service",
"score": 2
} |
#### File: app/location/tests.py
```python
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
from location.models import Locality, City
def sample_city(name='city'):
"""Creates and returns sample city"""
return City.objects.create(name=name)
def sample_locality(**params):
"""Creates and returns sample locality"""
defaults = {
'name': 'local',
'latitude': 0,
'longitude': 0,
'city': sample_city()
}
defaults.update(params)
return Locality.objects.create(**defaults)
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
class ModelTests(TestCase):
def tes_city_str(self):
"""Test city string representation"""
city = sample_city(name='city1')
self.assertEqual(str(city), city.name)
def test_locality_str(self):
"""Test city string representation"""
locality = sample_locality(name='local1')
self.assertEqual(str(locality), locality.name)
```
#### File: app/location/views.py
```python
from django.db.models import F
from rest_framework import viewsets
from ratelimit.mixins import RatelimitMixin
from location.models import Locality, City
from location.serializers import LocalitySerializer, CitySerializer
class CityViewSet(viewsets.ModelViewSet):
"""Manage City objects"""
queryset = City.objects.all()
serializer_class = CitySerializer
class LocalityNameViewSet(viewsets.ModelViewSet, RatelimitMixin):
"""Manage locality objects by name"""
ratelimit_key = 'ip'
ratelimit_rate = '5/m'
ratelimit_block = True
queryset = Locality.objects.all()
serializer_class = LocalitySerializer
def get_queryset(self):
city = self.request.query_params.get('name')
lat_str = self.request.query_params.get('lat') or '0'
long_str = self.request.query_params.get('long') or '0'
lat = float(lat_str)
long = float(long_str)
queryset = self.queryset
if city:
queryset = queryset.filter(name__istartswith=city)\
.order_by((F('latitude')-lat)**2+(F('longitude')-long)**2)[:5]
return queryset
class LocalityDistanceViewSet(viewsets.ModelViewSet, RatelimitMixin):
"""Manage locality objects by distance"""
ratelimit_key = 'ip'
ratelimit_rate = '5/m'
ratelimit_block = True
queryset = Locality.objects.all()
serializer_class = LocalitySerializer
def get_queryset(self):
lat_str = self.request.query_params.get('lat') or '0'
long_str = self.request.query_params.get('long') or '0'
lat = float(lat_str)
long = float(long_str)
queryset = self.queryset
if lat_str and long_str:
queryset = queryset.annotate(distance=(F('latitude')-lat)**2+(F('longitude')-long)**2)\
.filter(distance__lte=10).order_by((F('latitude')-lat)**2+(F('longitude')-long)**2)
return queryset
``` |
{
"source": "Aakash-kaushik/aibox",
"score": 3
} |
#### File: aibox/aibox/utils.py
```python
import os
import io
from PIL import Image
def fetch(url):
# efficient loading of URLS
import os, tempfile, hashlib, requests
fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode("utf-8")).hexdigest())
if os.path.isfile(fp) and os.stat(fp).st_size > 0:
with open(fp, "rb") as f:
dat = f.read()
else:
dat = requests.get(url).content
with open(fp + ".tmp", "wb") as f:
f.write(dat)
os.rename(fp + ".tmp", fp)
return dat
def get_image(file_path_or_url):
if os.path.exists(file_path_or_url):
return Image.open(file_path_or_url)
else:
return Image.open(io.BytesIO(fetch(file_path_or_url)))
def folder(x):
# get the folder of this file path
import os
return os.path.split(os.path.abspath(x))[0]
``` |
{
"source": "Aakash-kaushik/robert_bot",
"score": 3
} |
#### File: Aakash-kaushik/robert_bot/main.py
```python
import os, re, codecs, csv, random
import unicodedata,itertools, torch
import torch.nn as nn
from torch import optim
corpus_name="cornell_movie_dialogs_corpus"
corpus=os.path.join("data",corpus_name)
def print_lines(file,n=10):
"""Shows some lines ftom the text file"""
with open(file,'rb') as datafile:
lines=datafile.readlines()
for line in lines[:n]:
print(line)
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_lines(file,fields):
"""loads lines and splits then into fields and return a dict"""
lines={}
with open(file,"r",encoding="iso-8859-1") as f:
for line in f:
values=line.split(" +++$+++ ")
line_obj={}
for idx,field in enumerate(fields):
line_obj[field]=values[idx]
lines[line_obj[fields[0]]]=line_obj
return lines
def load_conv(file,lines,fields):
convs=[]
with open(file,"r",encoding="iso-8859-1") as f:
for line in f:
values=line.split(" +++$+++ ")
conv_obj={}
for idx,field in enumerate(fields):
conv_obj[field]=values[idx]
line_id_pattern=re.compile('L[0-9]+')
line_ids=line_id_pattern.findall(conv_obj[fields[-1]])
conv_obj["lines"]=[]
for line_id in line_ids:
conv_obj["lines"].append(lines[line_id])
convs.append(conv_obj)
return convs
def sentence_pair_extract(convs):
qa_pairs=[]
for conv in convs:
for i in range(len(conv["lines"])-1):
input_line=conv["lines"][i]["text"].strip()
target_line=conv["lines"][i+1]["text"].strip()
if input_line and target_line:
qa_pairs.append([input_line,target_line])
return qa_pairs
#paths to files
movie_file=os.path.join(corpus,"movie_lines.txt")
character_file=os.path.join(corpus,"movie_conversations.txt")
formatted_file=os.path.join(corpus,"formatted_movie_lines.txt")
delimiter="\t"
delimiter=str(codecs.decode(delimiter,"unicode_escape"))
lines=load_lines(movie_file,["lineID", "characterID", "movieID", "character", "text"])
convs=load_conv(character_file,lines,["character1ID", "character2ID", "movieID", "utteranceIDs"])
with open(formatted_file, "w", encoding="utf-8") as outputfile:
writer=csv.writer(outputfile,delimiter=delimiter,lineterminator="\n")
for pair in sentence_pair_extract(convs):
writer.writerow(pair)
#text_manip part
formatted_file=os.path.join(corpus,"formatted_movie_lines.txt")
pad_token=0
sos_token=1
eos_token=2
class Voc:
def __init__(self,name):
self.name=name
self.trimmed=False
self.word2idx={}
self.word2count={}
self.idx2word={pad_token:"PAD",sos_token:"SOS",eos_token:"EOS"}
self.num_words=3
def add_sentence(self,sentence):
for word in sentence.split(" "):
self.add_word(word)
def add_word(self,word):
if word not in self.word2idx:
self.word2idx[word]=self.num_words
self.word2count[word]=1
self.idx2word[self.num_words]=word
self.num_words+=1
else:
self.word2count[word]+=1
def trim(self,min_thr):
if self.trimmed:
return
self.trimmed=True
keep_word=[]
for k,v in self.word2count.items():
if v>=min_thr:
keep_word.append(k)
self.word2idx={}
self.word2count={}
self.idx2word={pad_token:"PAD",sos_token:"SOS",eos_token:"EOS"}
self.num_words=3
for word in keep_word:
self.add_word(word)
max_length=10
def unicode_to_ascii(s):
return "".join(c for c in unicodedata.normalize('NFD',s) if unicodedata.category(c)!="Mn")
def normalize_string(s):
s=unicode_to_ascii(s.lower().strip())
s=re.sub(r"([.!?])",r" \1",s)
s=re.sub(r"[^a-zA-Z.!?]+",r" ",s)
s=re.sub(r"\s+",r" ",s).strip()
return s
def read_vocs(formatted_file,corpus_name):
lines=open(formatted_file,encoding="utf-8").read().strip().split("\n")
pairs=[[normalize_string(s) for s in l.split("\t")] for l in lines]
voc=Voc(corpus_name)
return voc, pairs
def filter_pair(p):
"""Returns True if pairs are smaller than max_length"""
return len(p[0].split(" ")) <= max_length and len(p[1].split(" ")) <= max_length
def filter_pairs(pairs):
"""returns pairs of with filter_pair()==True"""
return [pair for pair in pairs if filter_pair(pair)]
def load_prepare_data(corpus,corpus_name,formatted_file,save_dir):
voc,pairs=read_vocs(formatted_file,corpus_name)
pairs=filter_pairs(pairs)
for pair in pairs:
voc.add_sentence(pair[0])
voc.add_sentence(pair[1])
return voc, pairs
save_dir=os.path.join("data","save")
voc,pairs=load_prepare_data(corpus,corpus_name,formatted_file,save_dir)
def trim_rare_words(voc,pairs,min_count):
voc.trim(min_count)
keep_pairs=[]
for pair in pairs:
input_sentence=pair[0]
output_sentence=pair[1]
keep_input=True
keep_output=True
for word in input_sentence.split(" "):
if word not in voc.word2idx:
keep_input=False
break
for word in output_sentence.split(" "):
if word not in voc.word2idx:
keep_output=False
break
if keep_output and keep_input:
keep_pairs.append(pair)
return keep_pairs
min_count=3 #pairs with word_count less than min_count will be removedby trim_rare_words()
pairs=trim_rare_words(voc,pairs,min_count)
def indexes_from_sentence(voc,sentence):
return [voc.word2idx[word] for word in sentence.split(" ")]+[eos_token]
def zero_padding(l,fillval=pad_token):
return list(itertools.zip_longest(*l,fillvalue=fillval))
def binary_matrix(l,val=pad_token):
m=[]
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token==pad_token:
m[i].append(0)
else:
m[i].append(1)
return m
def input_var(l,voc):
indexes_batch=[indexes_from_sentence(voc,sentence) for sentence in l]
lengths=torch.tensor([len(indexes) for indexes in indexes_batch])
pad_list=zero_padding(indexes_batch)
pad_var=torch.LongTensor(pad_list)
return pad_var,lengths
def output_var(l,voc):
indexes_batch=[indexes_from_sentence(voc,sentence) for sentence in l]
max_target_length=max([len(indexes) for indexes in indexes_batch])
pad_list=zero_padding(indexes_batch)
mask=binary_matrix(pad_list)
mask=torch.BoolTensor(mask)
pad_var=torch.LongTensor(pad_list)
return pad_var,mask,max_target_length
def batch2train_data(voc,pair_batch):
pair_batch.sort(key=lambda x: len(x[0].split(" ")),reverse=True)
input_batch,output_batch=[],[]
for pair in pair_batch:
input_batch.append(pair[0])
output_batch.append(pair[1])
inp,lengths=input_var(input_batch,voc)
output,mask,max_target_length=output_var(output_batch,voc)
return inp,lengths,output,mask,max_target_length
#model part
class encoder_rnn(nn.Module):
def __init__(self,hidden_size,embedding,n_layers=1,dropout=0):
super(encoder_rnn,self).__init__()
self.n_layers=n_layers
self.hidden_size=hidden_size
self.embedding=embedding
self.gru=nn.GRU(hidden_size,hidden_size, n_layers,dropout=(0 if n_layers==1 else dropout),bidirectional=True)
def forward(self,input_seq,input_lengths,hidden=None):
embedded=self.embedding(input_seq)
packed=nn.utils.rnn.pack_padded_sequence(embedded,input_lengths)
outputs,hidden=self.gru(packed,hidden)
outputs,_=nn.utils.rnn.pad_packed_sequence(outputs)
outputs=outputs[:,:,:self.hidden_size]+outputs[:,:,self.hidden_size:]
return outputs,hidden
class Attn(nn.Module):
def __init__(self,method,hidden_size):
super(Attn,self).__init__()
self.method=method
if self.method not in ["dot","general","concat"]:
raise ValueError(self.method,"is not an appropriate attention method")
self.hidden_size=hidden_size
if self.method=="general":
self.attn=nn.Linear(self.hidden_size,hidden_size)
elif self.method=="concat":
self.attn=nn.Linear(self.hidden_size*2,hidden_size)
self.v=nn.Parameter(torch.FloatTensor(hidden_size))
def dot_score(self,hidden,encoder_output):
return torch.sum(hidden,encoder_output,dim=2)
def general_score(self,hidden,encoder_output):
energy=self.attn(encoder_output)
return torch.sum(energy*hidden,dim=2)
def concat_score(self,hidden,encoder_output):
energy=self.attn(torch.cat((hidden.expand(encoder_output.size(0),-1,-1),encoder_output),2)).tanh()
return torch.sum(self.v*energy,dim=2)
def forward(self,hidden,encoder_outputs):
if self.method=="general":
attn_energies=self.general_score(hidden,encoder_outputs)
elif self.method=="concat":
attn_energies=self.concat_score(hidden,encoder_outputs)
elif self.method=="dot":
attn_energies=self.dot_score(hidden,encoder_outputs)
attn_energies=attn_energies.t()
return nn.functional.softmax(attn_energies,dim=1).unsqueeze(1)
class attn_decoder_rnn(nn.Module):
def __init__(self,attn_model,embedding,hidden_size,output_size,n_layers=1,dropout=0.1):
super(attn_decoder_rnn,self).__init__()
self.attn_model=attn_model
self.hidden_size=hidden_size
self.output_size=output_size
self.n_layers=n_layers
self.dropout=dropout
self.embedding=embedding
self.embedding_dropout=nn.Dropout(dropout)
self.gru=nn.GRU(hidden_size,hidden_size,n_layers,dropout=(0 if n_layers==1 else dropout))
self.concat=nn.Linear(hidden_size*2,hidden_size)
self.out=nn.Linear(hidden_size,output_size)
self.attn=Attn(attn_model,hidden_size)
def forward(self,input_step,last_hidden,encoder_outputs):
embedded=self.embedding(input_step)
embedded=self.embedding_dropout(embedded)
rnn_output,hidden=self.gru(embedded,last_hidden)
attn_weights=self.attn(rnn_output,encoder_outputs)
context=attn_weights.bmm(encoder_outputs.transpose(0,1))
rnn_output=rnn_output.squeeze(0)
context=context.squeeze(1)
concat_input=torch.cat((rnn_output,context),1)
concat_output=torch.tanh(self.concat(concat_input))
output=self.out(concat_output)
output=nn.functional.softmax(output,dim=1)
return output,hidden
#train part
def mask_nll_loss(inp,target,mask):
n_total=mask.sum()
cross_entropy = -torch.log(torch.gather(inp,1,target.view(-1,1)).squeeze(1))
loss=cross_entropy.masked_select(mask).mean()
loss=loss.to(device)
return loss, n_total.item()
def train(input_variable,lengths,target_variable, mask,max_target_length,encoder,decoder,embedding,
encoder_optimizer,decoder_optimizer,batch_size,clip,max_length=15):
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_variable=input_variable.to(device)
lengths=lengths.to(device)
target_variable=target_variable.to(device)
mask=mask.to(device)
loss=0
print_losses=[]
n_totals=0
encoder_outputs, encoder_hidden = encoder(input_variable,lengths)
decoder_input=torch.LongTensor([[sos_token for _ in range(batch_size)]])
decoder_input=decoder_input.to(device)
decoder_hidden=encoder_hidden[:decoder.n_layers]
use_teacher_forcing= True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
for t in range(max_target_length):
decoder_output,decoder_hidden=decoder(decoder_input,decoder_hidden,encoder_outputs)
decoder_input=target_variable[t].view(1,-1)
mask_loss,n_total=mask_nll_loss(decoder_output,target_variable[t],mask[t])
loss+=mask_loss
print_losses.append(mask_loss.item()*n_total)
n_totals += n_total
else:
for t in range(max_target_length):
decoder_output,decoder_hidden=decoder(decoder_input,decoder_hidden,encoder_outputs)
_,topi=decoder_output.topk(1)
decoder_input=torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
decoder_input=decoder_input.to(device)
mask_loss,n_total=mask_nll_loss(decoder_output,target_variable[t],mask[t])
loss+=mask_loss
print_losses.append(mask_loss.item()*n_total)
n_totals+=n_total
loss.backward()
_=torch.nn.utils.clip_grad_norm_(encoder.parameters(),clip)
_=torch.nn.utils.clip_grad_norm_(decoder.parameters(),clip)
encoder_optimizer.step()
decoder_optimizer.step()
return sum(print_losses)/n_totals
def train_iters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer, embedding, encoder_n_layers,
decoder_n_layers, save_dir, n_iter, batch_size, print_every, save_every, clip, corpus_name, load_filename):
training_batches=[batch2train_data(voc,[random.choice(pairs) for _ in range(batch_size)]) for _ in range(n_iteration)]
print("initializing")
start_iter=1
print_loss=0
if load_filename:
start_iter=checkpoint['iteration']+1
print('Training')
for iteration in range(start_iter,n_iteration+1):
training_batch=training_batches[iteration-1]
input_variable,lengths,target_variable,mask,max_target_length=training_batch
loss=train(input_variable,lengths,target_variable,mask,max_target_length,encoder,decoder,embedding,encoder_optimizer,decoder_optimizer,batch_size,clip)
print_loss+=loss
if iteration % print_every == 0:
print_loss_avg=print_loss/print_every
print("iteration: {}; Percent complete: {:.1f}%; Average loss: {:.4f}".format(iteration,iteration/n_iteration*100,print_loss_avg))
print_loss=0
if iteration % save_every == 0:
directory=os.path.join(save_dir,model_name,corpus_name,'{}-{}_{}'.format(encoder_n_layers,decoder_n_layers,hidden_size))
if not os.path.exists(directory):
os.makedirs(directory)
torch.save({"iteration":iteration,
"en":encoder.state_dict(),
"de":decoder.state_dict(),
"en_opt":encoder_optimizer.state_dict(),
"de_opt":decoder_optimizer.state_dict(),
"loss":loss,
"voc_dict":voc.__dict__,
"embedding":embedding.state_dict()
},os.path.join(directory,"{}_{}.tar".format(iteration,"checkpoint")))
#eval part
class greedy_search_decoder(nn.Module):
def __init__(self,encoder,decoder):
super(greedy_search_decoder,self).__init__()
self.encoder=encoder
self.decoder=decoder
def forward(self,input_seq,input_length,max_length):
encoder_outputs,encoder_hidden=self.encoder(input_seq,input_length)
decoder_hidden=encoder_hidden[:decoder.n_layers]
decoder_input=torch.ones(1,1,device=device,dtype=torch.long)* sos_token
all_tokens=torch.zeros([0],device=device,dtype=torch.long)
all_scores=torch.zeros([0],device=device)
for _ in range(max_length):
decoder_output,decoder_hidden=self.decoder(decoder_input,decoder_hidden,encoder_outputs)
decoder_scores,decoder_input=torch.max(decoder_output,dim=1)
all_tokens=torch.cat((all_tokens,decoder_input),dim=0)
all_scores=torch.cat((all_scores,decoder_scores),dim=0)
decoder_input=torch.unsqueeze(decoder_input,0)
return all_tokens,all_scores
def evaluate(encoder,decoder,searcher,voc,sentence,max_length=max_length):
indexes_batch=[indexes_from_sentence(voc,sentence)]
lengths=torch.tensor([len(indexes) for indexes in indexes_batch])
input_batch=torch.LongTensor(indexes_batch).transpose(0,1)
input_batch=input_batch.to(device)
lengths=lengths.to(device)
tokens,scores=searcher(input_batch,lengths,max_length)
decoded_words=[voc.idx2word[token.item()] for token in tokens]
return decoded_words
def eval_input(encoder,decoder,searcher,voc):
input_sentence=""
while(1):
try:
input_sentence=input("> ")
if input_sentence=='q' or input_sentence=="quit": break
input_sentence=normalize_string(input_sentence)
output_words=evaluate(encoder,decoder,searcher,voc,input_sentence)
output_words[:]=[x for x in output_words if not(x=='EOS' or x=='PAD')]
print("Robert:",' '.join(output_words))
except KeyError:
print("Error: Encountered unknown word.")
'''#training
model_name = 'cb_model'
attn_model = 'dot'
hidden_size = 500
encoder_n_layers = 2
decoder_n_layers = 2
dropout = 0.1
batch_size = 64
# Set checkpoint to load from; set to None if starting from scratch
attn_model="general"
load_filename=None
embedding = nn.Embedding(voc.num_words, hidden_size)
encoder = encoder_rnn(hidden_size, embedding, encoder_n_layers, dropout)
decoder = attn_decoder_rnn(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
encoder = encoder.to(device)
decoder = decoder.to(device)
clip=50.0
teacher_forcing_ratio=1.0
learning_rate=0.0001
decoder_learning_ratio=5.0
n_iteration=5000
print_every=1
save_every=500
encoder.train()
decoder.train()
encoder_optimizer=optim.Adam(encoder.parameters(),lr=learning_rate)
decoder_optimizer=optim.Adam(decoder.parameters(),lr=learning_rate*decoder_learning_ratio)
if load_filename:
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
print("starting training")
train_iters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size,
print_every, save_every, clip, corpus_name, load_filename)'''
#eval
model_name = 'cb_model'
attn_model = 'dot'
attn_model = 'general'
#attn_model = 'concat'
hidden_size = 500
encoder_n_layers = 2
decoder_n_layers = 2
dropout = 0.1
batch_size = 64
# Set checkpoint to load from; set to None if starting from scratch
loadFilename = r"checkpoint/5000_checkpoint.tar"
checkpoint_iter = 5000
if loadFilename:
# If loading on same machine the model was trained on
checkpoint = torch.load(loadFilename)
# If loading a model trained on GPU to CPU
#checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
encoder_sd = checkpoint['en']
decoder_sd = checkpoint['de']
encoder_optimizer_sd = checkpoint['en_opt']
decoder_optimizer_sd = checkpoint['de_opt']
embedding_sd = checkpoint['embedding']
voc.__dict__ = checkpoint['voc_dict']
# Initialize word embeddings
embedding = nn.Embedding(voc.num_words, hidden_size)
if loadFilename:
embedding.load_state_dict(embedding_sd)
# Initialize encoder & decoder models
encoder = encoder_rnn(hidden_size, embedding, encoder_n_layers, dropout)
decoder = attn_decoder_rnn(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
if loadFilename:
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
# Use appropriate device
encoder = encoder.to(device)
decoder = decoder.to(device)
print('Robert: Hi, I am Robert')
encoder.eval()
decoder.eval()
# Initialize search module
searcher = greedy_search_decoder(encoder, decoder)
# Begin chatting (uncomment and run the following line to begin)
eval_input(encoder, decoder, searcher, voc)
``` |
{
"source": "AakashKhatu/iDontNeedThis",
"score": 3
} |
#### File: i_dont_need_this/qr/views.py
```python
from django.http import HttpResponse
import requests
from django.contrib.auth.decorators import login_required
def scanned(request):
rpi_ip = "192.168.0.10:5000"
box_id = request.GET["id"]
box_state = request.GET["open"]
# return HttpResponse(
# "invalid arguments in url")
print("qr code of box with id: {0} scanned".format(box_id))
url = "http://"+rpi_ip+"/?open="+box_state
response = requests.request("GET", url)
return HttpResponse(
"Box is currently {0}".format(
"opened" if box_state == "True" else "Closed"))
```
#### File: i_dont_need_this/web/otp.py
```python
import requests
import random
def send_otp(number):
url = "https://www.fast2sms.com/dev/bulk"
otp = random.randint(10000, 99999)
querystring = {"authorization": "<KEY>",
"sender_id": "FSTSMS", "language": "english", "route": "qt",
"numbers": number, "message": "8528",
"variables": "{AA}", "variables_values": otp}
headers = {
'cache-control': "no-cache"
}
response = requests.request(
"GET", url, headers=headers, params=querystring)
return (response.ok, otp)
```
#### File: i_dont_need_this/web/views.py
```python
from django.shortcuts import render, redirect
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import login
from django.contrib.auth.models import User
from .otp import send_otp
import json
from django.http import HttpResponse
from django.contrib.auth import logout
def logout_view(request):
logout(request)
return redirect('index')
def about_view(request):
return render(request, 'web/about.html', {})
def faq_view(request):
return render(request, 'web/faq.html', {})
def submit_view(request):
return render(request, 'web/submit.html', {})
def pay_view(request):
return render(request, 'web/paymentGateway.html', {})
def otp_view(request):
resp = send_otp(request.GET['phone'])
response_data = {}
response_data['result'] = resp[0]
response_data['otp'] = resp[1]
return HttpResponse(json.dumps(response_data), content_type="application/json")
class index(TemplateView):
def get(self, request):
if request.user.is_authenticated:
c = {"phnum": request.user.username}
else:
c = {"phnum": None}
return render(request, "web/index.html", {})
def post(self, request):
try:
user = User.objects.get(username=request.POST.get('phone'))
print("user exists", user)
except User.DoesNotExist:
user = User.objects.create_user(
request.POST.get('phone'), '<EMAIL>', 'password')
user.save()
print("created user", user)
if request.POST.get('otphidden') == request.POST.get('password'):
print("validated")
user = login(request, user)
return render(request, "web/index.html", {})
class dash(TemplateView):
def get(self, request):
return render(request, "web/FrontEnd.html", {})
def post(self, request):
otp = send_otp(request.POST.get('phone'))
if 'login' in request.POST:
try:
user = User.objects.get(username=request.POST.get('phone'))
print("user exists", user)
except User.DoesNotExist:
user = User.objects.create_user(
request.POST.get('phone'), '<EMAIL>', 'password')
user.save()
print("created user", user)
if otp == request.POST.get('password'):
print("validated")
else:
print("LOL")
user = login(request, user)
return render(request, "web/FrontEnd.html", {})
else:
print(request.POST)
return redirect("submit")
class profile(LoginRequiredMixin, TemplateView):
login_url = '/'
def get(self, request):
return render(request, "web/profile.html", {})
def post(self, request):
print(request.POST)
return render(request, "web/FrontEnd.html", {})
def handler404(request, exception, template_name="404.html"):
response = render_to_response("web/404.html")
response.status_code = 404
return response
``` |
{
"source": "AakashKhatu/lbry-sdk",
"score": 2
} |
#### File: unit/stream/test_managed_stream.py
```python
import os
import shutil
import unittest
from unittest import mock
import asyncio
from lbry.blob.blob_file import MAX_BLOB_SIZE
from lbry.blob_exchange.serialization import BlobResponse
from lbry.blob_exchange.server import BlobServerProtocol
from lbry.dht.node import Node
from lbry.dht.peer import KademliaPeer
from lbry.extras.daemon.storage import StoredStreamClaim
from lbry.stream.managed_stream import ManagedStream
from lbry.stream.descriptor import StreamDescriptor
from tests.unit.blob_exchange.test_transfer_blob import BlobExchangeTestBase
def get_mock_node(loop):
mock_node = mock.Mock(spec=Node)
mock_node.joined = asyncio.Event(loop=loop)
mock_node.joined.set()
return mock_node
class TestManagedStream(BlobExchangeTestBase):
async def create_stream(self, blob_count: int = 10):
self.stream_bytes = b''
for _ in range(blob_count):
self.stream_bytes += os.urandom((MAX_BLOB_SIZE - 1))
# create the stream
file_path = os.path.join(self.server_dir, "test_file")
with open(file_path, 'wb') as f:
f.write(self.stream_bytes)
descriptor = await StreamDescriptor.create_stream(self.loop, self.server_blob_manager.blob_dir, file_path)
self.sd_hash = descriptor.calculate_sd_hash()
return descriptor
async def setup_stream(self, blob_count: int = 10):
await self.create_stream(blob_count)
self.stream = ManagedStream(
self.loop, self.client_config, self.client_blob_manager, self.sd_hash, self.client_dir
)
async def test_status_file_completed(self):
await self._test_transfer_stream(10)
self.assertTrue(self.stream.output_file_exists)
self.assertTrue(self.stream.completed)
with open(self.stream.full_path, 'w+b') as outfile:
outfile.truncate(1)
self.assertTrue(self.stream.output_file_exists)
self.assertFalse(self.stream.completed)
async def _test_transfer_stream(self, blob_count: int, mock_accumulate_peers=None, stop_when_done=True):
await self.setup_stream(blob_count)
mock_node = mock.Mock(spec=Node)
def _mock_accumulate_peers(q1, q2):
async def _task():
pass
q2.put_nowait([self.server_from_client])
return q2, self.loop.create_task(_task())
mock_node.accumulate_peers = mock_accumulate_peers or _mock_accumulate_peers
await self.stream.save_file(node=mock_node)
await self.stream.finished_write_attempt.wait()
self.assertTrue(os.path.isfile(self.stream.full_path))
if stop_when_done:
await self.stream.stop()
self.assertTrue(os.path.isfile(self.stream.full_path))
with open(self.stream.full_path, 'rb') as f:
self.assertEqual(f.read(), self.stream_bytes)
await asyncio.sleep(0.01)
async def test_transfer_stream(self):
await self._test_transfer_stream(10)
self.assertEqual(self.stream.status, "finished")
self.assertFalse(self.stream._running.is_set())
async def test_delayed_stop(self):
await self._test_transfer_stream(10, stop_when_done=False)
self.assertEqual(self.stream.status, "finished")
self.assertTrue(self.stream._running.is_set())
await asyncio.sleep(0.5, loop=self.loop)
self.assertTrue(self.stream._running.is_set())
await asyncio.sleep(2, loop=self.loop)
self.assertEqual(self.stream.status, "finished")
self.assertFalse(self.stream._running.is_set())
@unittest.SkipTest
async def test_transfer_hundred_blob_stream(self):
await self._test_transfer_stream(100)
async def test_transfer_stream_bad_first_peer_good_second(self):
await self.setup_stream(2)
mock_node = mock.Mock(spec=Node)
q = asyncio.Queue()
bad_peer = KademliaPeer(self.loop, "127.0.0.1", b'2' * 48, tcp_port=3334)
def _mock_accumulate_peers(q1, q2):
async def _task():
pass
q2.put_nowait([bad_peer])
self.loop.call_later(1, q2.put_nowait, [self.server_from_client])
return q2, self.loop.create_task(_task())
mock_node.accumulate_peers = _mock_accumulate_peers
await self.stream.save_file(node=mock_node)
await self.stream.finished_writing.wait()
self.assertTrue(os.path.isfile(self.stream.full_path))
with open(self.stream.full_path, 'rb') as f:
self.assertEqual(f.read(), self.stream_bytes)
await self.stream.stop()
# self.assertIs(self.server_from_client.tcp_last_down, None)
# self.assertIsNot(bad_peer.tcp_last_down, None)
async def test_client_chunked_response(self):
self.server.stop_server()
class ChunkedServerProtocol(BlobServerProtocol):
def send_response(self, responses):
to_send = []
while responses:
to_send.append(responses.pop())
for byte in BlobResponse(to_send).serialize():
self.transport.write(bytes([byte]))
self.server.server_protocol_class = ChunkedServerProtocol
self.server.start_server(33333, '127.0.0.1')
self.assertEqual(0, len(self.client_blob_manager.completed_blob_hashes))
await asyncio.wait_for(self._test_transfer_stream(10), timeout=2)
self.assertEqual(11, len(self.client_blob_manager.completed_blob_hashes))
async def test_create_and_decrypt_one_blob_stream(self, blobs=1, corrupt=False):
descriptor = await self.create_stream(blobs)
# copy blob files
shutil.copy(os.path.join(self.server_blob_manager.blob_dir, self.sd_hash),
os.path.join(self.client_blob_manager.blob_dir, self.sd_hash))
self.stream = ManagedStream(self.loop, self.client_config, self.client_blob_manager, self.sd_hash,
self.client_dir)
for blob_info in descriptor.blobs[:-1]:
shutil.copy(os.path.join(self.server_blob_manager.blob_dir, blob_info.blob_hash),
os.path.join(self.client_blob_manager.blob_dir, blob_info.blob_hash))
if corrupt and blob_info.length == MAX_BLOB_SIZE:
with open(os.path.join(self.client_blob_manager.blob_dir, blob_info.blob_hash), "rb+") as handle:
handle.truncate()
handle.flush()
await self.stream.save_file()
await self.stream.finished_writing.wait()
if corrupt:
return self.assertFalse(os.path.isfile(os.path.join(self.client_dir, "test_file")))
with open(os.path.join(self.client_dir, "test_file"), "rb") as f:
decrypted = f.read()
self.assertEqual(decrypted, self.stream_bytes)
self.assertEqual(True, self.client_blob_manager.get_blob(self.sd_hash).get_is_verified())
self.assertEqual(
True, self.client_blob_manager.get_blob(self.stream.descriptor.blobs[0].blob_hash).get_is_verified()
)
#
# # its all blobs + sd blob - last blob, which is the same size as descriptor.blobs
# self.assertEqual(len(descriptor.blobs), len(await downloader_storage.get_all_finished_blobs()))
# self.assertEqual(
# [descriptor.sd_hash, descriptor.blobs[0].blob_hash], await downloader_storage.get_blobs_to_announce()
# )
#
# await downloader_storage.close()
# await self.storage.close()
async def test_create_and_decrypt_multi_blob_stream(self):
await self.test_create_and_decrypt_one_blob_stream(10)
# async def test_create_truncate_and_handle_stream(self):
# # The purpose of this test is just to make sure it can finish even if a blob is corrupt/truncated
# await asyncio.wait_for(self.test_create_and_decrypt_one_blob_stream(corrupt=True), timeout=5)
```
#### File: torba/client/baseaccount.py
```python
import json
import time
import asyncio
import random
import typing
from typing import Dict, Tuple, Type, Optional, Any, List
from torba.client.mnemonic import Mnemonic
from torba.client.bip32 import PrivateKey, PubKey, from_extended_key_string
from torba.client.hash import aes_encrypt, aes_decrypt, sha256
from torba.client.constants import COIN
if typing.TYPE_CHECKING:
from torba.client import baseledger, wallet as basewallet
class AddressManager:
name: str
__slots__ = 'account', 'public_key', 'chain_number', 'address_generator_lock'
def __init__(self, account, public_key, chain_number):
self.account = account
self.public_key = public_key
self.chain_number = chain_number
self.address_generator_lock = asyncio.Lock()
@classmethod
def from_dict(cls, account: 'BaseAccount', d: dict) \
-> Tuple['AddressManager', 'AddressManager']:
raise NotImplementedError
@classmethod
def to_dict(cls, receiving: 'AddressManager', change: 'AddressManager') -> Dict:
d: Dict[str, Any] = {'name': cls.name}
receiving_dict = receiving.to_dict_instance()
if receiving_dict:
d['receiving'] = receiving_dict
change_dict = change.to_dict_instance()
if change_dict:
d['change'] = change_dict
return d
def apply(self, d: dict):
pass
def to_dict_instance(self) -> Optional[dict]:
raise NotImplementedError
def _query_addresses(self, **constraints):
return self.account.ledger.db.get_addresses(
accounts=[self.account],
chain=self.chain_number,
**constraints
)
def get_private_key(self, index: int) -> PrivateKey:
raise NotImplementedError
def get_public_key(self, index: int) -> PubKey:
raise NotImplementedError
async def get_max_gap(self):
raise NotImplementedError
async def ensure_address_gap(self):
raise NotImplementedError
def get_address_records(self, only_usable: bool = False, **constraints):
raise NotImplementedError
async def get_addresses(self, only_usable: bool = False, **constraints) -> List[str]:
records = await self.get_address_records(only_usable=only_usable, **constraints)
return [r['address'] for r in records]
async def get_or_create_usable_address(self) -> str:
addresses = await self.get_addresses(only_usable=True, limit=10)
if addresses:
return random.choice(addresses)
addresses = await self.ensure_address_gap()
return addresses[0]
class HierarchicalDeterministic(AddressManager):
""" Implements simple version of Bitcoin Hierarchical Deterministic key management. """
name: str = "deterministic-chain"
__slots__ = 'gap', 'maximum_uses_per_address'
def __init__(self, account: 'BaseAccount', chain: int, gap: int, maximum_uses_per_address: int) -> None:
super().__init__(account, account.public_key.child(chain), chain)
self.gap = gap
self.maximum_uses_per_address = maximum_uses_per_address
@classmethod
def from_dict(cls, account: 'BaseAccount', d: dict) -> Tuple[AddressManager, AddressManager]:
return (
cls(account, 0, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})),
cls(account, 1, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1}))
)
def apply(self, d: dict):
self.gap = d.get('gap', self.gap)
self.maximum_uses_per_address = d.get('maximum_uses_per_address', self.maximum_uses_per_address)
def to_dict_instance(self):
return {'gap': self.gap, 'maximum_uses_per_address': self.maximum_uses_per_address}
def get_private_key(self, index: int) -> PrivateKey:
return self.account.private_key.child(self.chain_number).child(index)
def get_public_key(self, index: int) -> PubKey:
return self.account.public_key.child(self.chain_number).child(index)
async def get_max_gap(self) -> int:
addresses = await self._query_addresses(order_by="n asc")
max_gap = 0
current_gap = 0
for address in addresses:
if address['used_times'] == 0:
current_gap += 1
else:
max_gap = max(max_gap, current_gap)
current_gap = 0
return max_gap
async def ensure_address_gap(self) -> List[str]:
async with self.address_generator_lock:
addresses = await self._query_addresses(limit=self.gap, order_by="n desc")
existing_gap = 0
for address in addresses:
if address['used_times'] == 0:
existing_gap += 1
else:
break
if existing_gap == self.gap:
return []
start = addresses[0]['pubkey'].n+1 if addresses else 0
end = start + (self.gap - existing_gap)
new_keys = await self._generate_keys(start, end-1)
await self.account.ledger.announce_addresses(self, new_keys)
return new_keys
async def _generate_keys(self, start: int, end: int) -> List[str]:
if not self.address_generator_lock.locked():
raise RuntimeError('Should not be called outside of address_generator_lock.')
keys = [self.public_key.child(index) for index in range(start, end+1)]
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
return [key.address for key in keys]
def get_address_records(self, only_usable: bool = False, **constraints):
if only_usable:
constraints['used_times__lt'] = self.maximum_uses_per_address
if 'order_by' not in constraints:
constraints['order_by'] = "used_times asc, n asc"
return self._query_addresses(**constraints)
class SingleKey(AddressManager):
""" Single Key address manager always returns the same address for all operations. """
name: str = "single-address"
__slots__ = ()
@classmethod
def from_dict(cls, account: 'BaseAccount', d: dict)\
-> Tuple[AddressManager, AddressManager]:
same_address_manager = cls(account, account.public_key, 0)
return same_address_manager, same_address_manager
def to_dict_instance(self):
return None
def get_private_key(self, index: int) -> PrivateKey:
return self.account.private_key
def get_public_key(self, index: int) -> PubKey:
return self.account.public_key
async def get_max_gap(self) -> int:
return 0
async def ensure_address_gap(self) -> List[str]:
async with self.address_generator_lock:
exists = await self.get_address_records()
if not exists:
await self.account.ledger.db.add_keys(self.account, self.chain_number, [self.public_key])
new_keys = [self.public_key.address]
await self.account.ledger.announce_addresses(self, new_keys)
return new_keys
return []
def get_address_records(self, only_usable: bool = False, **constraints):
return self._query_addresses(**constraints)
class BaseAccount:
mnemonic_class = Mnemonic
private_key_class = PrivateKey
public_key_class = PubKey
address_generators: Dict[str, Type[AddressManager]] = {
SingleKey.name: SingleKey,
HierarchicalDeterministic.name: HierarchicalDeterministic,
}
def __init__(self, ledger: 'baseledger.BaseLedger', wallet: 'basewallet.Wallet', name: str,
seed: str, private_key_string: str, encrypted: bool,
private_key: Optional[PrivateKey], public_key: PubKey,
address_generator: dict, modified_on: float) -> None:
self.ledger = ledger
self.wallet = wallet
self.id = public_key.address
self.name = name
self.seed = seed
self.modified_on = modified_on
self.private_key_string = private_key_string
self.password: Optional[str] = None
self.private_key_encryption_init_vector: Optional[bytes] = None
self.seed_encryption_init_vector: Optional[bytes] = None
self.encrypted = encrypted
self.serialize_encrypted = encrypted
self.private_key = private_key
self.public_key = public_key
generator_name = address_generator.get('name', HierarchicalDeterministic.name)
self.address_generator = self.address_generators[generator_name]
self.receiving, self.change = self.address_generator.from_dict(self, address_generator)
self.address_managers = {am.chain_number: am for am in {self.receiving, self.change}}
ledger.add_account(self)
wallet.add_account(self)
@classmethod
def generate(cls, ledger: 'baseledger.BaseLedger', wallet: 'basewallet.Wallet',
name: str = None, address_generator: dict = None):
return cls.from_dict(ledger, wallet, {
'name': name,
'seed': cls.mnemonic_class().make_seed(),
'address_generator': address_generator or {}
})
@classmethod
def get_private_key_from_seed(cls, ledger: 'baseledger.BaseLedger', seed: str, password: str):
return cls.private_key_class.from_seed(
ledger, cls.mnemonic_class.mnemonic_to_seed(seed, password)
)
@classmethod
def keys_from_dict(cls, ledger: 'baseledger.BaseLedger', d: dict) \
-> Tuple[str, Optional[PrivateKey], PubKey]:
seed = d.get('seed', '')
private_key_string = d.get('private_key', '')
private_key = None
public_key = None
encrypted = d.get('encrypted', False)
if not encrypted:
if seed:
private_key = cls.get_private_key_from_seed(ledger, seed, '')
public_key = private_key.public_key
elif private_key_string:
private_key = from_extended_key_string(ledger, private_key_string)
public_key = private_key.public_key
if public_key is None:
public_key = from_extended_key_string(ledger, d['public_key'])
return seed, private_key, public_key
@classmethod
def from_dict(cls, ledger: 'baseledger.BaseLedger', wallet: 'basewallet.Wallet', d: dict):
seed, private_key, public_key = cls.keys_from_dict(ledger, d)
name = d.get('name')
if not name:
name = 'Account #{}'.format(public_key.address)
return cls(
ledger=ledger,
wallet=wallet,
name=name,
seed=seed,
private_key_string=d.get('private_key', ''),
encrypted=d.get('encrypted', False),
private_key=private_key,
public_key=public_key,
address_generator=d.get('address_generator', {}),
modified_on=d.get('modified_on', time.time())
)
def to_dict(self):
private_key_string, seed = self.private_key_string, self.seed
if not self.encrypted and self.private_key:
private_key_string = self.private_key.extended_key_string()
if not self.encrypted and self.serialize_encrypted:
assert None not in [self.seed_encryption_init_vector, self.private_key_encryption_init_vector]
private_key_string = aes_encrypt(
self.password, private_key_string, self.private_key_encryption_init_vector
)
seed = aes_encrypt(self.password, self.seed, self.seed_encryption_init_vector)
return {
'ledger': self.ledger.get_id(),
'name': self.name,
'seed': seed,
'encrypted': self.serialize_encrypted,
'private_key': private_key_string,
'public_key': self.public_key.extended_key_string(),
'address_generator': self.address_generator.to_dict(self.receiving, self.change),
'modified_on': self.modified_on
}
def apply(self, d: dict):
if d.get('modified_on', 0) > self.modified_on:
self.name = d['name']
self.modified_on = d.get('modified_on', time.time())
assert self.address_generator.name == d['address_generator']['name']
for chain_name in ('change', 'receiving'):
if chain_name in d['address_generator']:
chain_object = getattr(self, chain_name)
chain_object.apply(d['address_generator'][chain_name])
@property
def hash(self) -> bytes:
return sha256(json.dumps(self.to_dict()).encode())
async def get_details(self, show_seed=False, **kwargs):
satoshis = await self.get_balance(**kwargs)
details = {
'id': self.id,
'name': self.name,
'coins': round(satoshis/COIN, 2),
'satoshis': satoshis,
'encrypted': self.encrypted,
'public_key': self.public_key.extended_key_string(),
'address_generator': self.address_generator.to_dict(self.receiving, self.change)
}
if show_seed:
details['seed'] = self.seed
return details
def decrypt(self, password: str) -> None:
assert self.encrypted, "Key is not encrypted."
try:
seed, seed_iv = aes_decrypt(password, self.seed)
pk_string, pk_iv = aes_decrypt(password, self.private_key_string)
except ValueError: # failed to remove padding, password is wrong
return
try:
Mnemonic().mnemonic_decode(seed)
except IndexError: # failed to decode the seed, this either means it decrypted and is invalid
# or that we hit an edge case where an incorrect password gave valid padding
return
try:
private_key = from_extended_key_string(
self.ledger, pk_string
)
except (TypeError, ValueError):
return
self.seed = seed
self.seed_encryption_init_vector = seed_iv
self.private_key = private_key
self.private_key_encryption_init_vector = pk_iv
self.password = password
self.encrypted = False
def encrypt(self, password: str) -> None:
assert not self.encrypted, "Key is already encrypted."
assert isinstance(self.private_key, PrivateKey)
self.seed = aes_encrypt(password, self.seed, self.seed_encryption_init_vector)
self.private_key_string = aes_encrypt(
password, self.private_key.extended_key_string(), self.private_key_encryption_init_vector
)
self.private_key = None
self.password = None
self.encrypted = True
async def ensure_address_gap(self):
addresses = []
for address_manager in self.address_managers.values():
new_addresses = await address_manager.ensure_address_gap()
addresses.extend(new_addresses)
return addresses
async def get_addresses(self, **constraints) -> List[str]:
rows = await self.ledger.db.select_addresses('address', accounts=[self], **constraints)
return [r[0] for r in rows]
def get_address_records(self, **constraints):
return self.ledger.db.get_addresses(accounts=[self], **constraints)
def get_address_count(self, **constraints):
return self.ledger.db.get_address_count(accounts=[self], **constraints)
def get_private_key(self, chain: int, index: int) -> PrivateKey:
assert not self.encrypted, "Cannot get private key on encrypted wallet account."
return self.address_managers[chain].get_private_key(index)
def get_public_key(self, chain: int, index: int) -> PubKey:
return self.address_managers[chain].get_public_key(index)
def get_balance(self, confirmations: int = 0, **constraints):
if confirmations > 0:
height = self.ledger.headers.height - (confirmations-1)
constraints.update({'height__lte': height, 'height__gt': 0})
return self.ledger.db.get_balance(accounts=[self], **constraints)
async def get_max_gap(self):
change_gap = await self.change.get_max_gap()
receiving_gap = await self.receiving.get_max_gap()
return {
'max_change_gap': change_gap,
'max_receiving_gap': receiving_gap,
}
def get_utxos(self, **constraints):
return self.ledger.get_utxos(wallet=self.wallet, accounts=[self], **constraints)
def get_utxo_count(self, **constraints):
return self.ledger.get_utxo_count(wallet=self.wallet, accounts=[self], **constraints)
def get_transactions(self, **constraints):
return self.ledger.get_transactions(wallet=self.wallet, accounts=[self], **constraints)
def get_transaction_count(self, **constraints):
return self.ledger.get_transaction_count(wallet=self.wallet, accounts=[self], **constraints)
async def fund(self, to_account, amount=None, everything=False,
outputs=1, broadcast=False, **constraints):
assert self.ledger == to_account.ledger, 'Can only transfer between accounts of the same ledger.'
tx_class = self.ledger.transaction_class
if everything:
utxos = await self.get_utxos(**constraints)
await self.ledger.reserve_outputs(utxos)
tx = await tx_class.create(
inputs=[tx_class.input_class.spend(txo) for txo in utxos],
outputs=[],
funding_accounts=[self],
change_account=to_account
)
elif amount > 0:
to_address = await to_account.change.get_or_create_usable_address()
to_hash160 = to_account.ledger.address_to_hash160(to_address)
tx = await tx_class.create(
inputs=[],
outputs=[
tx_class.output_class.pay_pubkey_hash(amount//outputs, to_hash160)
for _ in range(outputs)
],
funding_accounts=[self],
change_account=self
)
else:
raise ValueError('An amount is required.')
if broadcast:
await self.ledger.broadcast(tx)
else:
await self.ledger.release_tx(tx)
return tx
``` |
{
"source": "AakashKT/analytic_ss_cpu",
"score": 2
} |
#### File: AakashKT/analytic_ss_cpu/visualize.py
```python
import os, sys, math, random, time, csv, copy, argparse
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import cv2
fig = None
ax1 = None
ax2 = None
data = {}
showOccluders = False
showSpherical = True
prev_mousex = 0
prev_mousey = 0
def plotXY(pixel):
global ax1, data, showOccluders
ax1.clear()
ax2.clear()
pix_str = str(pixel[0]) + '_' + str(pixel[1])
pix = copy.deepcopy(data[pix_str])
num_lights = int(pix[0])
pix = pix[1:]
for l in range(num_lights):
ne = int(pix[0])
pix = pix[1:]
phi_theta = pix[:ne*2]
pix = pix[ne*2:]
xy = pix[:ne*3]
pix = pix[ne*3:]
xy = xy + xy[0:3]
xy = np.array(xy).reshape(ne+1, 3)
phi_theta = phi_theta + phi_theta[0:2]
phi_theta = np.array(phi_theta).reshape(ne+1, 2)
if showSpherical:
ax1.set_xlim(0, 2*np.pi, auto=False)
ax1.set_ylim(0, np.pi, auto=False)
ax1.invert_yaxis()
ax1.set_aspect('equal', adjustable='box')
ax1.plot(phi_theta[:, 0], phi_theta[:, 1], color='b')
# ax1.plot([0, 2*np.pi], [np.pi/2.0, np.pi/2.0], color='k')
else:
ax1.plot(xy[:, 0], xy[:, 1], color='b')
# Occluders
num_occ = int(pix[0])
pix = pix[1:]
for o in range(num_occ):
ne = int(pix[0])
pix = pix[1:]
phi_theta = pix[:ne*2]
pix = pix[ne*2:]
xy = pix[:ne*3]
pix = pix[ne*3:]
xy = xy + xy[0:3]
xy = np.array(xy).reshape(ne+1, 3)
phi_theta = phi_theta + phi_theta[0:2]
phi_theta = np.array(phi_theta).reshape(ne+1, 2)
if showOccluders:
if showSpherical:
ax1.plot(phi_theta[:, 0], phi_theta[:, 1], color='r')
ax2.plot(phi_theta[:, 0], phi_theta[:, 1], color='r', alpha=0.3)
else:
ax1.plot(xy[:, 0], xy[:, 1], color='r')
ax2.plot(xy[:, 0], xy[:, 1], color='r', alpha=0.3)
# Clipped
# print(pix)
num_clip = int(pix[0])
pix = pix[1:]
for o in range(num_clip):
# print(pix[0])
ne = int(pix[0])
pix = pix[1:]
phi_theta = pix[:ne*2]
pix = pix[ne*2:]
xy = pix[:ne*3]
pix = pix[ne*3:]
xy = xy + xy[0:3]
xy = np.array(xy).reshape(ne+1, 3)
phi_theta = phi_theta + phi_theta[0:2]
phi_theta = np.array(phi_theta).reshape(ne+1, 2)
if showSpherical:
ax2.set_xlim(0, 2*np.pi, auto=False)
ax2.set_ylim(0, np.pi, auto=False)
ax2.invert_yaxis()
ax2.set_aspect('equal', adjustable='box')
ax2.plot(phi_theta[:, 0], phi_theta[:, 1], color='g')
# ax1.plot([0, 2*np.pi], [np.pi/2.0, np.pi/2.0], color='k')
else:
ax2.plot(xy[:, 0], xy[:, 1], color='g')
fig.canvas.draw()
ax1.figure.canvas.draw()
ax2.figure.canvas.draw()
def onclick(event):
global prev_mousex, prev_mousey
# print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
# ('double' if event.dblclick else 'single', event.button,
# event.x, event.y, event.xdata, event.ydata))
prev_mousex = int(event.xdata)
prev_mousey = int(event.ydata)
plotXY(( int(event.xdata), int(event.ydata) ))
def on_key(event):
global showOccluders, showSpherical
# print('you pressed', event.key, event.xdata, event.ydata)
if event.key == 'b' or event.key == 'B':
showOccluders = not showOccluders
plotXY((prev_mousex, prev_mousey))
elif event.key == 'h' or event.key == 'H':
showSpherical = not showSpherical
plotXY((prev_mousex, prev_mousey))
elif event.key == 'z' or event.key == 'Z':
extent = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig('sph_poly.png', bbox_inches=extent, transparent=True, dpi=500)
extent = ax2.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig('sph_pol_1.png', bbox_inches=extent, transparent=True, dpi=500)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pixel_file', type=str, default='Pixel data from PBRT')
parser.add_argument('--image_file', type=str, default='Pixel data from PBRT')
args = parser.parse_args()
f = csv.reader(open(args.pixel_file))
for l in f:
temp = [float(i) for i in l]
data[str( int(temp[0]) ) + '_' + str( int(temp[1])) ] = temp[2:]
nrows = 1
ncols = 3
fig = plt.figure(figsize=(20, 15))
button_press_id = fig.canvas.mpl_connect('button_press_event', onclick)
key_press_id = fig.canvas.mpl_connect('key_press_event', on_key)
ax1 = fig.add_subplot(nrows, ncols, 2)
ax1.set_xlim(0, 2*np.pi, auto=False)
ax1.set_ylim(0, np.pi, auto=False)
ax1.invert_yaxis()
ax1.set_aspect('equal', adjustable='box')
ax2 = fig.add_subplot(nrows, ncols, 3)
ax2.set_aspect('equal', adjustable='box')
# Plot everything
# plotXY((0, 0))
render = cv2.imread(args.image_file)
img_ax = fig.add_subplot(nrows, ncols, 1)
img_ax.imshow(cv2.cvtColor(render, cv2.COLOR_BGR2RGB))
plt.show()
``` |
{
"source": "AakashKT/interface-master-config",
"score": 2
} |
#### File: AakashKT/interface-master-config/peers.py
```python
import os
import json
import re
import sys
import subprocess
import time
import urllib.request as urllib2
import multiprocessing as mp
from charmhelpers.core import host
from charmhelpers.core.hookenv import (
open_port,
open_ports,
status_set,
config,
unit_public_ip,
unit_private_ip,
)
from charmhelpers.core.host import (
service_start,
service_stop,
log,
mkdir,
write_file,
)
from charmhelpers.fetch import (
apt_install,
apt_update,
apt_upgrade
)
from charms.reactive.helpers import (
mark_invoked,
was_invoked,
)
from charms.reactive import (
when,
when_not,
when_file_changed,
hook,
RelationBase,
scopes,
set_state,
remove_state
)
CONF_FILE = '/tmp';
#########################################################################
# Common functions
#########################################################################
def run_command(command=None):
if command is None:
return False;
log('Running Command "%s"' % command);
try:
return subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).decode('utf-8').replace('\n', '');
except subprocess.CalledProcessError as e:
log('Error running "%s" : %s' % (command, e.output));
return False;
def get_config(key):
conf = config(key);
return conf;
def retrieve(key):
try:
conf = open('/tmp/ovn_conf', 'r');
except:
return '';
plain_text = conf.read();
conf.close();
if plain_text == '':
return '';
else:
data = json.loads(plain_text);
return data[key];
def store(key, value):
conf = open('/tmp/ovn_conf', 'r');
plain_text = conf.read();
conf.close();
conf = open('/tmp/ovn_conf', 'w+');
data = {};
if plain_text != '':
data = json.loads(plain_text);
data[key] = value;
conf.truncate(0);
conf.seek(0, 0);
conf.write(json.dumps(data));
conf.close();
#########################################################################
# Relation Class
#########################################################################
class MasterConfigPeer(RelationBase):
scope = scopes.UNIT;
@hook("{peers:master-config}-relation-{joined}")
def joined(self):
conv = self.conversation();
conv.set_state("{relation_name}.connected");
@hook("{peers:master-config}-relation-{changed}")
def changed(self):
conv = self.conversation();
worker_id = conv.get_local(key='worker_id');
if worker_id != None and conv.get_remote(worker_id):
conv.set_state("{relation_name}.master.data.available");
elif conv.get_remote('cert_to_sign'):
conv.set_state("{relation_name}.worker.cert.available");
@hook("{peers:master-config}-relation-{departed}")
def departed(self):
conv = self.conversation();
conv.remove_state("{relation_name}.connected");
conv.remove_state("{relation_name}.master.data.available");
conv.remove_state("{relation_name}.worker.cert.available");
def set_worker_id(self, worker_id):
convs = self.conversations();
for conv in convs:
conv.set_local(key='worker_id', value=worker_id);
def get_worker_data(self):
convs = self.conversations();
final_data = [];
for conv in convs:
worker_unit = {};
cert = conv.get_remote('cert_to_sign');
worker_hostname = conv.get_remote('worker_hostname');
worker_unit['cert_to_sign'] = cert;
worker_unit['worker_hostname'] = worker_hostname;
final_data.append(worker_unit);
return final_data;
def send_worker_data(self, data):
convs = self.conversations();
for conv in convs:
conv.set_remote(data=data);
def send_signed_certs(self, certs):
convs = self.conversations();
for conv in convs:
for key, value in certs.items():
data_str = json.dumps(value);
conv.set_remote(key=key, value=data_str);
def get_signed_cert(self, worker_hostname):
convs = self.conversations();
final = None;
for conv in convs:
data = conv.get_remote(worker_hostname);
if data != '' and data != None:
data = json.loads(data);
final = data;
break;
return final;
``` |
{
"source": "aakash-lambton/project",
"score": 2
} |
#### File: aakash-lambton/project/Program.py
```python
import pymongo
import random
def create_database(db):
user_collection = db['users']
user_collection.drop()
user_collection = db['users']
post_collection = db['posts']
post_collection.drop()
post_collection = db['posts']
comment_collection = db['comments']
comment_collection.drop()
comment_collection = db['comments']
like_collection = db['likes']
like_collection.drop()
like_collection = db['likes']
status_collection = db['status']
status_collection.drop()
status_collection = db['status']
followers_collection = db['followers']
followers_collection.drop()
followers_collection = db['followers']
user_data = [
{
"_id": 1,
"name": "Aakash",
"email": "<EMAIL>",
"phone":"647632134",
"friends": 456,
"pictures": 34,
"contacts": 90,
"isVerified": True
},
{
"_id": 2,
"name": "<NAME>",
"email": "<EMAIL>",
"phone": "6476546784",
"friends": 665,
"pictures": 76,
"contacts": 50,
"isVerified": True
},
{
"_id": 3,
"name": "<NAME>",
"email": "<EMAIL>",
"phone": "6478765545",
"friends": 987,
"pictures": 64,
"contacts": 75,
"isVerified": False
},
{
"_id": 4,
"name": "<NAME>",
"email": "<EMAIL>",
"phone": "6478672134",
"friends": 654,
"pictures": 68,
"contacts": 46,
"isVerified": True
},
{
"_id": 5,
"name": "Harkaran",
"email": "<EMAIL>",
"phone": "64768664335",
"friends": 786,
"pictures": 74,
"contacts": 87,
"isVerified": False
},
{
"_id": 6,
"name": "Dipanshu",
"email": "<EMAIL>",
"phone": "416082134",
"friends": 756,
"pictures": 86,
"contacts": 34,
"isVerified": True
},
{
"_id": 7,
"name": "Hrsimran",
"email": "<EMAIL>",
"phone": "643732939",
"friends": 234,
"pictures": 74,
"contacts": 70,
"isVerified": False
},
{
"_id": 8,
"name": "Harpreet ",
"email": "<EMAIL>",
"phone": "324776566",
"friends": 856,
"pictures": 94,
"contacts": 50,
"isVerified": True
},
]
user_status = [
{
"status": "Having Dinner at Bahamas",
"uploadTime": "20:44",
"location": "indonesia",
"likes": 46,
"comments": 34,
"user": "Anmol"
},
{
"status": "Playing cricket at Key Land Field",
"uploadTime": "10:44",
"location": "india",
"likes": 56,
"comments": 14,
"user": "Aakash"
},
{
"status": "Watching Movie at Cineplex Theatre",
"uploadTime": "16:44",
"location": "Canada",
"likes": 96,
"comments": 66,
"user": "Harjant"
},
{
"status": "Reading novel at pearson library",
"uploadTime": "19:34",
"location": "Toronto",
"likes": 51,
"comments": 34,
"user": "Prabhjot"
},
{
"status": "Playing Golf at Wee Golf Course",
"uploadTime": "11:22",
"location": "USA",
"likes": 12,
"comments": 3,
"user": "Harkaran"
},
]
followers = [
{
"name": "Ali",
"active": False,
"lastSeen": "20-8-2020",
"followers": 943,
"username": "ali_zumair"
},
{
"name": "Alex",
"active": True,
"lastSeen": "10-8-2020",
"followers": 443,
"username": "alex_scott"
},
{
"name": "Lee",
"active": False,
"lastSeen": "10-3-2020",
"followers": 333,
"username": "lee_you"
},
{
"name": "joe",
"active": True,
"lastSeen": "09-1-2020",
"followers": 567,
"username": "joe_Tribiani"
},
{
"name": "Ross",
"active": False,
"lastSeen": "05-7-2020",
"followers": 133,
"username": "ross_geller"
}
]
#ADD DATA INTO COLLECTION
user_ids = user_collection.insert_many(user_data)
status_collection.insert_many(user_status)
followers_collection.insert_many(followers)
user_id_list = user_ids.inserted_ids
like_id = 1
post_id = 1
comment_id = 1
#ADD DUMMY POSTS
for user_id in user_ids.inserted_ids:
post_data = [{"_id": post_id,
"user_id": user_id,
"content": 'Dummy post', "view_count": 10,
"likes": [{"like_id": like_id}],
"comments": [{"comment_id": comment_id}]}]
like_id += 1
comment_id += 1
post_id += 1
post_collection.insert_many(post_data)
comment_id = 1
comment_all = []
for p_id in range(1, post_id):
comment_data = [{"_id": comment_id, "post_id": p_id,
"user_id": random.choice(user_id_list),
"comment": "Looks good"}]
comment_collection.insert_many(comment_data)
comment_all.append(comment_id)
comment_id += 1
like_id = 1
for p_id in range(1, post_id):
like_data = [{"_id": like_id, "post_id": p_id,
"user_id": random.choice(user_id_list),
"comment_id": random.choice(comment_all)}]
like_collection.insert_many(like_data)
like_id += 1
#PRINT ALL USERS
def read_all_users(db):
user_collection = db['users']
for user in user_collection.find():
print(user)
#PRINT SINGLE USER
def read_single_users(db):
user_id = int(input("Enter user id: "))
user_collection = db['users']
for user in user_collection.find({"_id": user_id}):
print(user)
#READ ALL POSTS
def read_all_post(db):
post_collection = db['posts']
for post in post_collection.find():
print(post)
#PRINT SINGLE POST
def read_single_post(db):
user_id = int(input("Enter user id: "))
post_collection = db['posts']
for post in post_collection.find({"user_id": user_id}):
print(post)
#PRINT ALL COMMENTS
def read_all_comments(db):
comment_collection = db['comments']
for comment in comment_collection.find():
print(comment)
#PRINT SINGLE COMMENTS
def read_single_comment(db):
user_id = int(input("Enter user id: "))
comment_collection = db['comments']
for comment in comment_collection.find({"user_id": user_id}):
print(comment)
#READ POST DATA
def read_post_comment(db):
post_id = int(input("Enter post id: "))
comment_collection = db['comments']
for comment in comment_collection.find({"post_id": post_id}):
print(comment)
#INSERT NEW USER INTO COLLECTION
def insert_user(db):
users = db["users"]
name = input("User name: ")
email = input("User Email: ")
ids = users.insert_many([{"name": name, "email": email}])
print(ids.inserted_ids)
#DELETE COMMENT
def delete_comment(db):
comment_id = int(input("Enter comment Id: "))
comment_collection = db['comments']
comment = comment_collection.find_one({"_id": comment_id})
db.post.update(
{"_id": comment["post_id"]},
{"$pull": {"comments": {"comment_id": comment["_id"]}}}
)
comment_collection.delete_one({"_id": comment_id})
#UPDATE POST CONTENT
def update_post_content(db):
post_id = int(input("Enter post Id: "))
post_content = input("Enter post content: ")
post_query = {"_id": post_id}
update_data = {"$set": {"content": post_content}}
db.posts.update_one(post_query, update_data)
if __name__ == '__main__': #CONNECT TO MONGO ATLAS
client = pymongo.MongoClient("mongodb+srv://akash:[email protected]/db?retryWrites=true&w=majority")
database = client["feeddb"]
create_database(database)
print("Reading all users")
read_all_users(database)
print("Reading single user")
read_single_users(database)
print("Reading all posts")
read_all_post(database)
print("Reading single post")
read_single_post(database)
print("Reading all comments")
read_all_comments(database)
print("Reading single comment")
read_single_comment(database)
print("Reading all comments of a post")
read_post_comment(database)
print("Inserting new user")
insert_user(database)
print("Deleting comment")
delete_comment(database)
print("Reading all comments")
read_all_comments(database)
print("Updating the post")
update_post_content(database)
print("Reading all posts")
read_all_post(database)
``` |
{
"source": "AakashMallik/cnn-firefly",
"score": 2
} |
#### File: common/datasets/factory.py
```python
from ..interfaces import FactoryInterface
from cifar10 import Cifar10
class DatasetFactory(FactoryInterface):
@classmethod
def generate(cls, name=None, options={}):
return Cifar10()
```
#### File: src/utils/device.py
```python
import torch
def get_device():
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("[ GPU: {} (1/{}) ]".format(
torch.cuda.get_device_name(0),
torch.cuda.device_count())
)
else:
device = torch.device("cpu")
print("[ Running on the CPU ]")
return device
``` |
{
"source": "AakashMallik/CVPR2020-FGVC7",
"score": 3
} |
#### File: CVPR2020-FGVC7/dataset/dataset_factory.py
```python
from os import path
# list of datasets
from dataset.fgvc7_dataset import FGVC7_Dataset
class DatasetFactory:
def __init__(self, org_data_dir="data",):
self.FOLDS = 5
self.org_data_dir = org_data_dir
def get_dataset(self, mode, dataset_name, transformer=None, fold_number=None):
if mode not in ["train", "test", "val"]:
print("[ Dataset Mode should either be train/test/val ]")
exit()
else:
dataset_dir = path.join(self.org_data_dir, dataset_name)
dataset = None
if dataset_name == "fgvc7":
print("[ Dataset : fgvc7 <", mode, "/",
"raw" if fold_number is None else fold_number, "> ]")
dataset = FGVC7_Dataset(
mode, dataset_dir, transformer, fold_number)
else:
print("[ Dataset not found ]")
exit()
print("[ Transformer : ", str(transformer), " ]")
return dataset
```
#### File: CVPR2020-FGVC7/dataset/fgvc7_dataset.py
```python
import torch
import pandas as pd
from skimage import io
from os import path
from torch.utils.data import Dataset
from dataset.utils import (fold_creator)
NUMBER_OF_FOLDS = 5
DATASET_NAME = 'fgvc7'
class FGVC7_Dataset(Dataset):
def __init__(self, mode, dataset_path, transformer=None, fold_number=None):
if transformer is None:
print("[ No Transformer passed in - ", DATASET_NAME, " ]")
exit()
if fold_number is not None and fold_number >= NUMBER_OF_FOLDS:
print("[ Fold limit exceeded in - ", DATASET_NAME, " ]")
exit()
self.mode = mode
self.transformer = transformer
self.dataset_path = dataset_path
if fold_number is None:
# If fold not selected
self.csv_path = path.join(dataset_path, mode + ".csv")
self.image_dir = path.join(dataset_path, "images")
else:
# if fold selected
self.create_folds()
self.csv_path = path.join(
"folds", DATASET_NAME, str(fold_number), mode + ".csv")
self.image_dir = path.join(
"folds", DATASET_NAME, str(fold_number), mode)
self.data_frame = pd.read_csv(self.csv_path)
def create_folds(self):
fold_creator(
self.dataset_path,
path.join("folds", DATASET_NAME),
NUMBER_OF_FOLDS
)
def get_csv_path(self):
return self.csv_path
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image_name = str(self.data_frame.iloc[idx, 0]) + ".jpg"
image_path = path.join(self.image_dir, image_name)
image = io.imread(image_path)
if self.mode == "test":
return self.transformer(image)
else:
label = torch.tensor(
self.data_frame.iloc[idx, 1:].to_numpy(dtype=float)
)
return self.transformer(image), label
```
#### File: CVPR2020-FGVC7/loss/utils.py
```python
import torch
class ClassificationLossWrapper:
def __init__(self, loss_obj):
self.loss = loss_obj
def to(self, device):
self.loss.to(device)
def __call__(self, output, target):
return self.loss(output, torch.argmax(target, dim=1))
class RegressionLossWrapper:
def __init__(self, loss_obj):
self.loss = loss_obj
def to(self, device):
self.loss.to(device)
def __call__(self, output, target):
return self.loss(output, torch.argmax(target, dim=1).view(-1, 1).float())
class MixedLossWrapper:
def __init__(self, reg_loss_obj, class_loss_obj, classification_coefficient):
self.reg_loss = reg_loss_obj
self.class_loss = class_loss_obj
self.class_coeff = classification_coefficient
self.reg_coeff = 1 - classification_coefficient
def to(self, device):
self.reg_loss.to(device)
self.class_loss.to(device)
def __call__(self, output, target):
mixed_loss = self.reg_coeff * \
self.reg_loss(output[:, 0].view(-1, 1), torch.argmax(
target, dim=1).view(-1, 1).float())
mixed_loss += self.class_coeff * \
self.class_loss(output[:, 1:], torch.argmax(target, dim=1))
return mixed_loss
```
#### File: CVPR2020-FGVC7/model/model_factory.py
```python
import torch
from torch import nn
from os import path
import pretrainedmodels
import torchvision.models as models
# from model.efficientnet import EfficientNet
from efficientnet_pytorch import EfficientNet
from model.layer_utils import GeM
class ModelFactory():
def __init__(self):
pass
def get_model(self, model_name, num_classes, pred_type, hyper_params=None, tuning_type='feature-extraction', pre_trained_path=None, weight_type=None):
if pred_type == 'regression':
adjusted_num_classes = 1
elif pred_type == 'mixed':
adjusted_num_classes = num_classes + 1
else:
adjusted_num_classes = num_classes
model = None
if model_name == 'efficientnet-b4':
print("[ Model : Efficientnet B4 ]")
model = EfficientNet.from_pretrained(
'efficientnet-b4'
)
if tuning_type == 'feature-extraction':
for param in model.parameters():
param.requires_grad = False
# changing avg pooling to Generalized Mean Avg
model._avg_pooling = GeM()
num_ftrs = model._fc.in_features
model._fc = nn.Sequential(
nn.Linear(num_ftrs, 1000, bias=True),
nn.ReLU(),
nn.Dropout(p=hyper_params['fc_drop_out']),
nn.Linear(1000, adjusted_num_classes, bias=True)
)
# if hyper_params is not None:
# model._bn_mom = hyper_params['batch_norm_momentum']
if model_name == 'efficientnet-b5':
print("[ Model : Efficientnet B5 ]")
model = EfficientNet.from_pretrained(
'efficientnet-b5'
)
if tuning_type == 'feature-extraction':
for param in model.parameters():
param.requires_grad = False
# changing avg pooling to Generalized Mean Avg
# model._avg_pooling = GeM()
num_ftrs = model._fc.in_features
model._fc = nn.Sequential(
nn.Linear(num_ftrs, 1000, bias=True),
nn.ReLU(),
nn.Dropout(p=hyper_params['fc_drop_out']),
nn.Linear(1000, adjusted_num_classes, bias=True)
)
# if hyper_params is not None:
# model._bn_mom = hyper_params['batch_norm_momentum']
if model_name == 'efficientnet-b7':
print("[ Model : Efficientnet B7 ]")
model = EfficientNet.from_pretrained(
'efficientnet-b7'
)
if tuning_type == 'feature-extraction':
for param in model.parameters():
param.requires_grad = False
# changing avg pooling to Generalized Mean Avg
model._avg_pooling = GeM()
num_ftrs = model._fc.in_features
model._fc = nn.Sequential(
nn.Linear(num_ftrs, 1000, bias=True),
nn.ReLU(),
nn.Dropout(p=hyper_params['fc_drop_out']),
nn.Linear(1000, adjusted_num_classes, bias=True)
)
# if hyper_params is not None:
# model._bn_mom = hyper_params['batch_norm_momentum']
if model_name == 'densenet-161':
print("[ Model : Densenet 161 ]")
model = models.densenet161(pretrained=True)
if tuning_type == 'feature-extraction':
for param in model.parameters():
param.requires_grad = False
num_ftrs = model.classifier.in_features
model.classifier = nn.Sequential(
nn.Linear(num_ftrs, adjusted_num_classes)
)
if model_name == 'resnet-34':
print("[ Model : Resnet 34 ]")
model = pretrainedmodels.__dict__[
'resnet34'](pretrained='imagenet')
if tuning_type == 'feature-extraction':
for param in model.parameters():
param.requires_grad = False
model.avgpool = nn.AdaptiveAvgPool2d(1)
num_ftrs = model.last_linear.in_features
model.last_linear = nn.Sequential(
nn.Linear(num_ftrs, adjusted_num_classes)
)
if model_name == 'se-resnet-152':
print("[ Model : SeResnet 152 ]")
model = pretrainedmodels.__dict__[
'se_resnet152'](pretrained='imagenet')
if tuning_type == 'feature-extraction':
for param in model.parameters():
param.requires_grad = False
num_ftrs = model.last_linear.in_features
model._fc = nn.Sequential(
nn.Linear(num_ftrs, 1000, bias=True),
nn.ReLU(),
nn.Dropout(p=hyper_params['fc_drop_out']),
nn.Linear(1000, adjusted_num_classes, bias=True)
)
tuning_type and print("[ Tuning type : ", tuning_type, " ]")
print("[ Prediction type : ", pred_type, " ]")
# if model needs to resume from pretrained weights
if pre_trained_path:
weight_path = 'weights.pth'
if weight_type == 'best_val_kaggle_metric':
weight_path = 'weights_kaggle_metric.pth'
elif weight_type == 'best_val_loss':
weight_path = 'weights_loss.pth'
weight_path = path.join(
'results', pre_trained_path, weight_path)
if path.exists(weight_path):
print("[ Loading checkpoint : ",
pre_trained_path, " ]")
model.load_state_dict(torch.load(
weight_path
# ,map_location={'cuda:1': 'cuda:0'}
))
else:
print("[ Provided pretrained weight path is invalid ]")
exit()
print(
"[ Weight type : ", weight_type if weight_type else "Last Epoch", " ]")
return model
```
#### File: CVPR2020-FGVC7/scheduler/scheduler_factory.py
```python
from torch.optim.lr_scheduler import (StepLR, CosineAnnealingLR)
from transformers import get_cosine_schedule_with_warmup
class SchedulerFactory:
def __init__(self):
pass
def get_scheduler(self, optimiser, name, hyper_params, epochs, iter_per_epoch):
scheduler = None
if name == 'step-lr':
print("[ Scheduler : Step LR ]")
scheduler = StepLR(
optimiser,
hyper_params['step'],
hyper_params['lr_decay']
)
elif name == 'cosineAnnealing-lr':
print("[ Scheduler : Cosine Annealing LR ]")
scheduler = CosineAnnealingLR(
optimiser,
T_max=epochs*iter_per_epoch
)
elif name == 'cosineAnnealing-warmup-lr':
print("[ Scheduler : Cosine Annealing LR with Warmup ]")
scheduler = get_cosine_schedule_with_warmup(
optimiser,
num_warmup_steps=iter_per_epoch * 5,
num_training_steps=iter_per_epoch * epochs
)
return scheduler
```
#### File: CVPR2020-FGVC7/utils/custom_bar.py
```python
from tqdm import (trange, tqdm)
from math import ceil
class CustomBar:
def __init__(self, epoch, len_training_data_set, batch_size):
self.bar = tqdm(
total=epoch * ceil(len_training_data_set / batch_size),
desc="Progress",
postfix=[
dict(batch_idx=0),
ceil(len_training_data_set / batch_size),
dict(epoch_idx=0),
epoch
],
bar_format='{desc}: {percentage:3.0f}%|{bar}| [ETA:{remaining}] [Batch:{postfix[0][batch_idx]}/{postfix[1]} Epoch:{postfix[2][epoch_idx]}/{postfix[3]}]'
)
def step(self):
self.bar.update()
def update_epoch_info(self,i):
self.bar.postfix[2]["epoch_idx"] = i + 1
def update_batch_info(self, batch_ndx):
self.bar.postfix[0]["batch_idx"] = batch_ndx + 1
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.bar.close()
```
#### File: CVPR2020-FGVC7/utils/wandb_update.py
```python
import wandb
import torch
from utils.print_util import cprint
def wandb_init(config):
cprint("[ Setting up project on W&B ]", type="info3")
wandb.init(project="plantpatho-2020")
wandb.config.experiment_name = config['experiment_name']
wandb.config.seed = config['seed']
wandb.config.model = config['model']['name']
wandb.config.prediction_type = config['model']['pred_type']
wandb.config.optimiser = config['optimiser']['name']
wandb.config.learning_rate = config['optimiser']['hyper_params']['learning_rate']
wandb.config.loss_function = config['loss_function']['name']
wandb.config.resize_dims = config['train_dataset']['resize_dims']
wandb.config.epochs = config["epochs"]
wandb.config.batch_size = config["batch_size"]
# saving config files to W&B
wandb.save('./config/' + config['experiment_name'] + '.yml')
return True
def publish_intermediate(results, best_val_loss, best_kaggle_metric, output_list, target_list):
wandb.run.summary["best_val_loss"] = best_val_loss
wandb.run.summary["best_kaggle_metric"] = best_kaggle_metric
# saving confusion matrix (image)
# wandb.sklearn.plot_confusion_matrix(
# torch.argmax(target_list, dim=1).numpy(),
# torch.argmax(output_list, dim=1).numpy(),
# ['H', 'MD', 'R', 'S']
# )
return wandb.log(results)
``` |
{
"source": "aakashnand/trino-python-client",
"score": 2
} |
#### File: tests/unit/test_client.py
```python
import httpretty
import pytest
import requests
import time
from unittest import mock
from urllib.parse import urlparse
from requests_kerberos.exceptions import KerberosExchangeError
from trino.client import TrinoQuery, TrinoRequest, TrinoResult
from trino.auth import KerberosAuthentication, _OAuth2TokenBearer
from trino import constants
import trino.exceptions
@mock.patch("trino.client.TrinoRequest.http")
def test_trino_initial_request(mock_requests, sample_post_response_data):
mock_requests.Response.return_value.json.return_value = sample_post_response_data
req = TrinoRequest(
host="coordinator",
port=8080,
user="test",
source="test",
catalog="test",
schema="test",
http_scheme="http",
session_properties={},
)
http_resp = TrinoRequest.http.Response()
http_resp.status_code = 200
status = req.process(http_resp)
assert status.next_uri == sample_post_response_data["nextUri"]
assert status.id == sample_post_response_data["id"]
def test_request_headers(mock_get_and_post):
get, post = mock_get_and_post
catalog = "test_catalog"
schema = "test_schema"
user = "test_user"
source = "test_source"
accept_encoding_header = "accept-encoding"
accept_encoding_value = "identity,deflate,gzip"
client_info_header = constants.HEADER_CLIENT_INFO
client_info_value = "some_client_info"
req = TrinoRequest(
host="coordinator",
port=8080,
user=user,
source=source,
catalog=catalog,
schema=schema,
http_scheme="http",
session_properties={},
http_headers={
accept_encoding_header: accept_encoding_value,
client_info_header: client_info_value,
},
redirect_handler=None,
)
def assert_headers(headers):
assert headers[constants.HEADER_CATALOG] == catalog
assert headers[constants.HEADER_SCHEMA] == schema
assert headers[constants.HEADER_SOURCE] == source
assert headers[constants.HEADER_USER] == user
assert headers[constants.HEADER_SESSION] == ""
assert headers[accept_encoding_header] == accept_encoding_value
assert headers[client_info_header] == client_info_value
assert len(headers.keys()) == 8
req.post("URL")
_, post_kwargs = post.call_args
assert_headers(post_kwargs["headers"])
req.get("URL")
_, get_kwargs = get.call_args
assert_headers(get_kwargs["headers"])
def test_request_session_properties_headers(mock_get_and_post):
get, post = mock_get_and_post
req = TrinoRequest(
host="coordinator",
port=8080,
user="test_user",
session_properties={
"a": "1",
"b": "2",
"c": "more=v1,v2"
}
)
def assert_headers(headers):
assert headers[constants.HEADER_SESSION] == "a=1,b=2,c=more%3Dv1%2Cv2"
req.post("URL")
_, post_kwargs = post.call_args
assert_headers(post_kwargs["headers"])
req.get("URL")
_, get_kwargs = get.call_args
assert_headers(get_kwargs["headers"])
def test_additional_request_post_headers(mock_get_and_post):
"""
Tests that the `TrinoRequest.post` function can take addtional headers
and that it combines them with the existing ones to perform the request.
"""
_, post = mock_get_and_post
req = TrinoRequest(
host="coordinator",
port=8080,
user="test",
source="test",
catalog="test",
schema="test",
http_scheme="http",
session_properties={},
)
sql = 'select 1'
additional_headers = {
'X-Trino-Fake-1': 'one',
'X-Trino-Fake-2': 'two',
}
combined_headers = req.http_headers
combined_headers.update(additional_headers)
req.post(sql, additional_headers)
# Validate that the post call was performed including the addtional headers
_, post_kwargs = post.call_args
assert post_kwargs['headers'] == combined_headers
def test_request_invalid_http_headers():
with pytest.raises(ValueError) as value_error:
TrinoRequest(
host="coordinator",
port=8080,
user="test",
http_headers={constants.HEADER_USER: "invalid_header"},
)
assert str(value_error.value).startswith("cannot override reserved HTTP header")
def test_enabling_https_automatically_when_using_port_443(mock_get_and_post):
_, post = mock_get_and_post
req = TrinoRequest(
host="coordinator",
port=constants.DEFAULT_TLS_PORT,
user="test",
)
req.post("SELECT 1")
post_args, _ = post.call_args
parsed_url = urlparse(post_args[0])
assert parsed_url.scheme == constants.HTTPS
def test_https_scheme(mock_get_and_post):
_, post = mock_get_and_post
req = TrinoRequest(
host="coordinator",
port=constants.DEFAULT_TLS_PORT,
user="test",
http_scheme=constants.HTTPS,
)
req.post("SELECT 1")
post_args, _ = post.call_args
parsed_url = urlparse(post_args[0])
assert parsed_url.scheme == constants.HTTPS
assert parsed_url.port == constants.DEFAULT_TLS_PORT
def test_http_scheme_with_port(mock_get_and_post):
_, post = mock_get_and_post
req = TrinoRequest(
host="coordinator",
port=constants.DEFAULT_TLS_PORT,
user="test",
http_scheme=constants.HTTP,
)
req.post("SELECT 1")
post_args, _ = post.call_args
parsed_url = urlparse(post_args[0])
assert parsed_url.scheme == constants.HTTP
assert parsed_url.port == constants.DEFAULT_TLS_PORT
def test_request_timeout():
timeout = 0.1
http_scheme = "http"
host = "coordinator"
port = 8080
url = http_scheme + "://" + host + ":" + str(port) + constants.URL_STATEMENT_PATH
def long_call(request, uri, headers):
time.sleep(timeout * 2)
return (200, headers, "delayed success")
httpretty.enable()
for method in [httpretty.POST, httpretty.GET]:
httpretty.register_uri(method, url, body=long_call)
# timeout without retry
for request_timeout in [timeout, (timeout, timeout)]:
req = TrinoRequest(
host=host,
port=port,
user="test",
http_scheme=http_scheme,
max_attempts=1,
request_timeout=request_timeout,
)
with pytest.raises(requests.exceptions.Timeout):
req.get(url)
with pytest.raises(requests.exceptions.Timeout):
req.post("select 1")
httpretty.disable()
httpretty.reset()
OAUTH_SERVER_URL_NO_HEADER = "http://coordinator/no_header"
OAUTH_SERVER_URL_FAIL_SERVER = "http://coordinator/fail_server"
OAUTH_SERVER_URL_SERVER_DENIED = "http://coordinator/server_denied_accesss"
OAUTH_SERVER_URL_SERVER_SUCCESS = "http://coordinator/statement_url_suceess"
OAUTH_REDIRECT_SERVER = "https://coordinator/as/authorization.oauth2"
OAUTH_SERVER_URL_LOOP = "https://coordinator/oauth2/token/loop"
OAUTH_SERVER_URL_1 = "https://coordinator/oauth2/token/<PASSWORD>"
OAUTH_SERVER_URL_2 = "https://coordinator/oauth2/token/<PASSWORD>"
OAUTH_SERVER_URL_FORCE_FAIL = "https://coordinator/oauth2/token/force_fail"
OAUTH_SERVER_URL_DENY_ACCESS = "https://coordinator/oauth2/token/deny_access"
OAUTH_DENY_ERROR_TEXT = '{"error": "OAuth server returned an error: error=access_denied, error_description=null, error_uri=null, state=EncodedState"}' # NOQA: E501
OAUTH_TEST_TOKEN = "<PASSWORD>"
def oauth2_test_url_handler(url):
print(url, end='')
class OAuthTestReq:
def __init__(self, method, url):
self.method = method
self.url = url
def __call__(self, str, callback_func):
if (self.method == 'post'):
callback_func(self.get_statement_post_response())
elif (self.method == 'get'):
callback_func(self.get_token_url_response())
def get_statement_request(self):
req = mock.Mock()
req.url = self.url
req.headers = requests.structures.CaseInsensitiveDict()
req.register_hook = mock.Mock(side_effect=self)
return req
def get_token_request(self):
req = mock.Mock()
req.url = self.url
req.headers = requests.structures.CaseInsensitiveDict()
req.register_hook = mock.Mock(side_effect=self)
return req
def get_statement_post_response(self):
statement_resp = mock.Mock()
statement_resp.status_code = 401
if (self.url == OAUTH_SERVER_URL_NO_HEADER):
statement_resp.headers = requests.structures.CaseInsensitiveDict()
elif (self.url == OAUTH_SERVER_URL_FAIL_SERVER):
statement_resp.headers = requests.structures.CaseInsensitiveDict([
('Www-Authenticate',
'Bearer x_redirect_server=\"{OAUTH_REDIRECT_SERVER}\",'
f'x_token_server=\"{OAUTH_SERVER_URL_FORCE_FAIL}\",'
'Basic realm=\"Trino\"')])
elif (self.url == OAUTH_SERVER_URL_SERVER_DENIED):
statement_resp.headers = requests.structures.CaseInsensitiveDict([
('Www-Authenticate',
'Bearer x_redirect_server=\"{OAUTH_REDIRECT_SERVER}\",'
f'x_token_server=\"{OAUTH_SERVER_URL_DENY_ACCESS}\",'
'Basic realm=\"Trino\"')])
elif (self.url == OAUTH_SERVER_URL_SERVER_SUCCESS):
statement_resp.status_code = 200
statement_resp.headers = requests.structures.CaseInsensitiveDict([
('Www-Authenticate',
f'Bearer x_redirect_server=\"{OAUTH_REDIRECT_SERVER}\",'
f'x_token_server=\"{OAUTH_SERVER_URL_1}\",'
'Basic realm=\"Trino\"')])
else:
statement_resp.headers = requests.structures.CaseInsensitiveDict([
('Www-Authenticate',
f'Bearer x_redirect_server=\"{OAUTH_REDIRECT_SERVER}\",'
f'x_token_server=\"{OAUTH_SERVER_URL_1}\",'
'Basic realm=\"Trino\"')])
statement_resp.register_hook = mock.Mock(side_effect=self)
statement_resp.url = self.url
return statement_resp
def get_token_url_response(self):
token_resp = mock.Mock()
token_resp.status_code = 200
# Success cases
if self.url == OAUTH_SERVER_URL_1:
token_resp.text = f'{{"nextUri":"{OAUTH_SERVER_URL_2}"}}'
elif self.url == OAUTH_SERVER_URL_2:
token_resp.text = f'{{"token":"{OAUTH_TEST_TOKEN}"}}'
# Failure cases
elif self.url == OAUTH_SERVER_URL_FORCE_FAIL:
token_resp.status_code = 500
elif self.url == OAUTH_SERVER_URL_DENY_ACCESS:
token_resp.text = OAUTH_DENY_ERROR_TEXT
elif self.url == OAUTH_SERVER_URL_LOOP:
token_resp.text = f'{{"nextUri":"{OAUTH_SERVER_URL_LOOP}"}}'
return token_resp
def call_response_hook(str, callback_func):
statement_resp = mock.Mock()
statement_resp.headers = requests.structures.CaseInsensitiveDict([
('Www-Authenticate',
f'Bearer x_redirect_server=\"{OAUTH_REDIRECT_SERVER}\",'
f'x_token_server=\"{OAUTH_SERVER_URL_1}\",'
'Basic realm=\"Trino\"')])
statement_resp.status_code = 401
callback_func(statement_resp)
@mock.patch("requests.Session.get")
@mock.patch("requests.Session.post")
def test_oauth2_authentication_flow(http_session_post, http_session_get, capsys):
http_session = requests.Session()
# set up the patched session, with the correct response
oauth_test = OAuthTestReq("post", "http://coordinator/statement_url")
http_session_post.return_value = oauth_test.get_statement_post_response()
http_session_get.side_effect = oauth_test.get_token_url_response()
oauth = _OAuth2TokenBearer(http_session, oauth2_test_url_handler)
statement_req = oauth_test.get_statement_request()
oauth(statement_req)
oauth_test = OAuthTestReq("get", OAUTH_SERVER_URL_1)
token_req = oauth_test.get_token_request()
oauth(token_req)
oauth_test = OAuthTestReq("get", OAUTH_SERVER_URL_2)
token_req = oauth_test.get_token_request()
oauth(token_req)
# Finally resend the original request, and respond back with status code 200
oauth_test = OAuthTestReq("post", "http://coordinator/statement_url_suceess")
# statement_req.register_hook = mock.Mock(side_effect=oauth_test)
statement_req = oauth_test.get_statement_request()
http_session_post.return_value = oauth_test.get_statement_post_response()
oauth(statement_req)
out, err = capsys.readouterr()
assert out == OAUTH_REDIRECT_SERVER
assert statement_req.headers['Authorization'] == "Bearer " + OAUTH_TEST_TOKEN
@mock.patch("requests.Session.get")
@mock.patch("requests.Session.post")
def test_oauth2_exceed_max_attempts(http_session_post, http_session_get):
http_session = requests.Session()
# set up the patched session, with the correct response
oauth_test = OAuthTestReq("post", "http://coordinator/statement_url")
http_session_post.return_value = oauth_test.get_statement_post_response()
http_session_get.side_effect = oauth_test.get_token_url_response()
oauth = _OAuth2TokenBearer(http_session, oauth2_test_url_handler)
statement_req = oauth_test.get_statement_request()
oauth(statement_req)
with pytest.raises(trino.exceptions.TrinoAuthError) as exp:
for i in range(0, 5):
oauth_test = OAuthTestReq("get", OAUTH_SERVER_URL_1)
token_req = oauth_test.get_token_request()
oauth(token_req)
assert str(exp.value) == "Exceeded max attempts while getting the token"
@mock.patch("requests.Session.post")
def test_oauth2_authentication_missing_headers(http_session_post):
http_session = requests.Session()
oauth_test = OAuthTestReq("post", OAUTH_SERVER_URL_NO_HEADER)
http_session_post.return_value = oauth_test.get_statement_post_response()
oauth = _OAuth2TokenBearer(http_session, oauth2_test_url_handler)
with pytest.raises(trino.exceptions.TrinoAuthError) as exp:
statement_req = oauth_test.get_statement_request()
oauth(statement_req)
assert str(exp.value) == "Error: header WWW-Authenticate not available in the response."
@mock.patch("requests.Session.get")
@mock.patch("requests.Session.post")
def test_oauth2_authentication_fail_token_server(http_session_post, http_session_get):
http_session = requests.Session()
oauth_test = OAuthTestReq("post", OAUTH_SERVER_URL_FAIL_SERVER)
http_session_post.return_value = oauth_test.get_statement_post_response()
oauth = _OAuth2TokenBearer(http_session, oauth2_test_url_handler)
http_session_get.side_effect = oauth_test.get_token_url_response()
statement_req = oauth_test.get_statement_request()
oauth(statement_req)
with pytest.raises(trino.exceptions.TrinoAuthError) as exp:
oauth_test = OAuthTestReq("get", OAUTH_SERVER_URL_FORCE_FAIL)
token_req = oauth_test.get_token_request()
oauth(token_req)
assert "Error while getting the token response status" in str(exp.value)
@mock.patch("requests.Session.get")
@mock.patch("requests.Session.post")
def test_oauth2_authentication_access_denied(http_session_post, http_session_get):
http_session = requests.Session()
oauth_test = OAuthTestReq("post", OAUTH_SERVER_URL_SERVER_DENIED)
http_session_post.return_value = oauth_test.get_statement_post_response()
oauth = _OAuth2TokenBearer(http_session, oauth2_test_url_handler)
http_session_get.side_effect = oauth_test.get_token_url_response()
statement_req = oauth_test.get_statement_request()
oauth(statement_req)
with pytest.raises(trino.exceptions.TrinoAuthError) as exp:
oauth_test = OAuthTestReq("get", OAUTH_SERVER_URL_FORCE_FAIL)
token_req = oauth_test.get_token_request()
oauth(token_req)
assert "Error while getting the token" in str(exp.value)
@mock.patch("trino.client.TrinoRequest.http")
def test_trino_fetch_request(mock_requests, sample_get_response_data):
mock_requests.Response.return_value.json.return_value = sample_get_response_data
req = TrinoRequest(
host="coordinator",
port=8080,
user="test",
source="test",
catalog="test",
schema="test",
http_scheme="http",
session_properties={},
)
http_resp = TrinoRequest.http.Response()
http_resp.status_code = 200
status = req.process(http_resp)
assert status.next_uri == sample_get_response_data["nextUri"]
assert status.id == sample_get_response_data["id"]
assert status.rows == sample_get_response_data["data"]
@mock.patch("trino.client.TrinoRequest.http")
def test_trino_fetch_error(mock_requests, sample_get_error_response_data):
mock_requests.Response.return_value.json.return_value = sample_get_error_response_data
req = TrinoRequest(
host="coordinator",
port=8080,
user="test",
source="test",
catalog="test",
schema="test",
http_scheme="http",
session_properties={},
)
http_resp = TrinoRequest.http.Response()
http_resp.status_code = 200
with pytest.raises(trino.exceptions.TrinoUserError) as exception_info:
req.process(http_resp)
error = exception_info.value
assert error.error_code == 1
assert error.error_name == "SYNTAX_ERROR"
assert error.error_type == "USER_ERROR"
assert error.error_exception == "io.trino.spi.TrinoException"
assert "stack" in error.failure_info
assert len(error.failure_info["stack"]) == 36
assert "suppressed" in error.failure_info
assert (
error.message
== "line 1:15: Schema must be specified when session schema is not set"
)
assert error.error_location == (1, 15)
assert error.query_id == "20210817_140827_00000_arvdv"
@pytest.mark.parametrize(
"error_code, error_type, error_message",
[
(503, trino.exceptions.Http503Error, "service unavailable"),
(504, trino.exceptions.Http504Error, "gateway timeout"),
(404, trino.exceptions.HttpError, "error 404"),
],
)
def test_trino_connection_error(monkeypatch, error_code, error_type, error_message):
monkeypatch.setattr(TrinoRequest.http.Response, "json", lambda x: {})
req = TrinoRequest(
host="coordinator",
port=8080,
user="test",
source="test",
catalog="test",
schema="test",
http_scheme="http",
session_properties={},
)
http_resp = TrinoRequest.http.Response()
http_resp.status_code = error_code
with pytest.raises(error_type) as error:
req.process(http_resp)
assert error_message in str(error)
class RetryRecorder(object):
def __init__(self, error=None, result=None):
self.__name__ = "RetryRecorder"
self._retry_count = 0
self._error = error
self._result = result
def __call__(self, *args, **kwargs):
self._retry_count += 1
if self._error is not None:
raise self._error
if self._result is not None:
return self._result
@property
def retry_count(self):
return self._retry_count
def test_authentication_fail_retry(monkeypatch):
post_retry = RetryRecorder(error=KerberosExchangeError())
monkeypatch.setattr(TrinoRequest.http.Session, "post", post_retry)
get_retry = RetryRecorder(error=KerberosExchangeError())
monkeypatch.setattr(TrinoRequest.http.Session, "get", get_retry)
attempts = 3
kerberos_auth = KerberosAuthentication()
req = TrinoRequest(
host="coordinator",
port=8080,
user="test",
http_scheme=constants.HTTPS,
auth=kerberos_auth,
max_attempts=attempts,
)
with pytest.raises(KerberosExchangeError):
req.post("URL")
assert post_retry.retry_count == attempts
with pytest.raises(KerberosExchangeError):
req.get("URL")
assert post_retry.retry_count == attempts
def test_503_error_retry(monkeypatch):
http_resp = TrinoRequest.http.Response()
http_resp.status_code = 503
post_retry = RetryRecorder(result=http_resp)
monkeypatch.setattr(TrinoRequest.http.Session, "post", post_retry)
get_retry = RetryRecorder(result=http_resp)
monkeypatch.setattr(TrinoRequest.http.Session, "get", get_retry)
attempts = 3
req = TrinoRequest(
host="coordinator", port=8080, user="test", max_attempts=attempts
)
req.post("URL")
assert post_retry.retry_count == attempts
req.get("URL")
assert post_retry.retry_count == attempts
def test_504_error_retry(monkeypatch):
http_resp = TrinoRequest.http.Response()
http_resp.status_code = 504
post_retry = RetryRecorder(result=http_resp)
monkeypatch.setattr(TrinoRequest.http.Session, "post", post_retry)
get_retry = RetryRecorder(result=http_resp)
monkeypatch.setattr(TrinoRequest.http.Session, "get", get_retry)
attempts = 3
req = TrinoRequest(
host="coordinator", port=8080, user="test", max_attempts=attempts
)
req.post("URL")
assert post_retry.retry_count == attempts
req.get("URL")
assert post_retry.retry_count == attempts
class FakeGatewayResponse(object):
def __init__(self, http_response, redirect_count=1):
self.__name__ = "FakeGatewayResponse"
self.http_response = http_response
self.redirect_count = redirect_count
self.count = 0
def __call__(self, *args, **kwargs):
self.count += 1
if self.count == self.redirect_count:
return self.http_response
http_response = TrinoRequest.http.Response()
http_response.status_code = 301
http_response.headers["Location"] = "http://1.2.3.4:8080/new-path/"
assert http_response.is_redirect
return http_response
def test_trino_result_response_headers():
"""
Validates that the `TrinoResult.response_headers` property returns the
headers associated to the TrinoQuery instance provided to the `TrinoResult`
class.
"""
mock_trino_query = mock.Mock(respone_headers={
'X-Trino-Fake-1': 'one',
'X-Trino-Fake-2': 'two',
})
result = TrinoResult(
query=mock_trino_query,
)
assert result.response_headers == mock_trino_query.response_headers
def test_trino_query_response_headers(sample_get_response_data):
"""
Validates that the `TrinoQuery.execute` function can take addtional headers
that are pass the the provided request instance post function call and it
returns a `TrinoResult` instance.
"""
class MockResponse(mock.Mock):
# Fake response class
@property
def headers(self):
return {
'X-Trino-Fake-1': 'one',
'X-Trino-Fake-2': 'two',
}
def json(self):
return sample_get_response_data
req = TrinoRequest(
host="coordinator",
port=8080,
user="test",
source="test",
catalog="test",
schema="test",
http_scheme="http",
session_properties={},
)
sql = 'execute my_stament using 1, 2, 3'
additional_headers = {
constants.HEADER_PREPARED_STATEMENT: 'my_statement=added_prepare_statement_header'
}
# Patch the post function to avoid making the requests, as well as to
# validate that the function was called with the right arguments.
with mock.patch.object(req, 'post', return_value=MockResponse()) as mock_post:
query = TrinoQuery(
request=req,
sql=sql
)
result = query.execute(additional_http_headers=additional_headers)
# Validate the the post function was called with the right argguments
mock_post.assert_called_once_with(sql, additional_headers)
# Validate the result is an instance of TrinoResult
assert isinstance(result, TrinoResult)
``` |
{
"source": "aakashns/cloudman",
"score": 3
} |
#### File: cloudman/gcp/network.py
```python
from cloudman.utils.logger import log
from cloudman.gcp.utils import run
def list_networks():
"""Get list of networks in current project"""
res = run('compute networks list')
return [str(net['name']) for net in res]
def has_network(name):
"""Check if a network with given name exists"""
nets = list_networks()
return name in nets
def create_network(name):
"""Create a networks with the given name"""
log("Creating network '" + name + "'. This may take a while...", prefix=True)
return run('compute networks create ' + name)
def delete_network(name):
"""Delete a networks with the given name"""
log("Deleting network '" + name + "'. This may take a while...", prefix=True)
return run('compute networks delete ' + name + ' -q')
```
#### File: cloudman/utils/logger.py
```python
from __future__ import print_function
from sys import stderr
from termcolor import colored
def log(msg, error=False, prefix=False):
"""Print a message to stdout"""
if error:
print(colored('ERROR:', color='red'), '[cloudman]', msg, "\n", file=stderr)
else:
print(('[cloudman] ' if prefix else '') + msg, "\n")
``` |
{
"source": "aakashns/swiftai",
"score": 2
} |
#### File: swiftai/swiftai/object_detection.py
```python
from fastai.imports import *
from matplotlib import patches, patheffects
from fastai.dataset import *
def wh_bb(a): return np.array([a[1], a[0], a[3]+a[1]-1, a[2]+a[0]-1])
def bb_wh(a): return np.array([a[1],a[0],a[3]-a[1]+1,a[2]-a[0]+1])
def show_img(im, figsize=None, ax=None, grid=False):
if not ax: fig, ax = plt.subplots(figsize=figsize)
ax.imshow(im)
if grid:
ax.set_xticks(np.linspace(0, 224, 9))
ax.set_yticks(np.linspace(0, 224, 9))
ax.grid()
ax.set_yticklabels([])
ax.set_xticklabels([])
else:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
return ax
def draw_outline(o, lw):
o.set_path_effects([patheffects.Stroke(linewidth=lw, foreground='black'),
patheffects.Normal()])
def draw_rect(ax, b, color='white'):
patch = ax.add_patch(patches.Rectangle(
b[:2], *b[-2:], fill=False, edgecolor=color, lw=2))
draw_outline(patch, 4)
def draw_text(ax, xy, txt, sz=14, color='white'):
text = ax.text(*xy, txt, verticalalignment='top',
color=color, fontsize=sz, weight='bold')
draw_outline(text, 1)
class ConcatLblDataset(Dataset):
def __init__(self, ds, y2):
self.ds, self.y2 = ds, y2
self.sz = ds.sz
def __len__(self): return len(self.ds)
def __getitem__(self, i):
x, y = self.ds[i]
return (x, (y, self.y2[i]))
def append_y(md, md2):
md.trn_dl.dataset = ConcatLblDataset(md.trn_ds, md2.trn_y)
md.val_dl.dataset = ConcatLblDataset(md.val_ds, md2.val_y)
class VOCData:
def __init__(self, fname, img_path):
j = json.load(Path(fname).open())
self.img_path = img_path
self.cats = {o['id']: o['name'] for o in j['categories']}
self.ids = [o['id'] for o in j['images']]
self.fnames = {o['id']: o['file_name'] for o in j['images']}
self.anns = collections.defaultdict(lambda:[])
for o in j['annotations']:
if not o['ignore']:
b, c = wh_bb(o['bbox']), o['category_id']
self.anns[o['image_id']].append((b, c))
self._lrg_anns = None
@property
def lrg_anns(self):
if self._lrg_anns is None:
largest = lambda ann: max(ann, key=lambda x: np.product(x[0][-2:]-x[0][:2]))
self._lrg_anns = {id: largest(ann) for id,ann in self.anns.items()}
return self._lrg_anns
def open_img(self, id):
return open_image(self.img_path/self.fnames[id])
def show(self, id, figsize=(16,8), anns=None, largest=False):
if anns is None:
anns = [self.lrg_anns[id]] if largest else self.anns[id]
im = self.open_img(id)
print(im.shape)
ax = show_img(im, figsize=figsize)
for b,c in anns:
b = bb_wh(b)
draw_rect(ax, b)
draw_text(ax, b[:2], self.cats[c], sz=16)
def show_batch(self, x, y):
fig, axes = plt.subplots(3, 4, figsize=(16, 12))
for i, ax in enumerate(axes.flat):
bb = [bb_wh(o) for o in y[0][i].reshape(-1,4)]
ax = show_img(x[i], ax=ax)
for b, c in zip(bb, y[1][i]):
if (b[2] > 1):
draw_rect(ax, b)
draw_text(ax, b[:2], self.cats[int(c)])
plt.tight_layout()
def get_cls_df(self, largest=False, dedupe=False, named=False):
fns = [self.fnames[i] for i in self.ids]
if largest:
cts = [self.cats[self.lrg_anns[i][1]] for i in self.ids]
else:
wrap = set if dedupe else list
ct_ids = [wrap([a[1] for a in self.anns[i]]) for i in self.ids]
cts = [' '.join(self.cats[ci] if named else str(ci) for ci in ids) for ids in ct_ids]
return pd.DataFrame({'fn': fns, 'cat': cts}, columns=['fn', 'cat'])
def get_bb_df(self, largest=False):
fns = [self.fnames[i] for i in self.ids]
if largest:
bbs = np.array([' '.join(str(p) for p in self.lrg_anns[i][0]) for i in self.ids])
else:
_bbs = [np.concatenate([a[0] for a in self.anns[i]]) for i in self.ids]
bbs = [' '.join(str(p) for p in a) for a in _bbs]
return pd.DataFrame({ 'fn': fns, 'bbox': bbs}, columns=['fn', 'bbox'])
``` |
{
"source": "aakashparsi/HTS",
"score": 3
} |
#### File: HTS/hts/convenience.py
```python
from typing import Dict, Optional, Union
import numpy
import pandas
from hts._t import ArrayLike, MethodT, NAryTreeT, TransformT
from hts.functions import to_sum_mat
from hts.revision import RevisionMethod
def _to_numpy(v: ArrayLike, kind: str = "forecasts") -> numpy.ndarray:
if isinstance(v, numpy.ndarray):
if v.ndim > 1:
raise ValueError(f"`{kind}` values must be of a dimension 1")
return v
elif isinstance(v, pandas.DataFrame):
if len(v.columns) > 1:
raise ValueError(
f"If providing `{kind}` as a DataFrame, it must have one column only"
)
col_name = v.columns[0]
return v[col_name].values
elif isinstance(v, pandas.Series):
return v.values
else:
raise ValueError(
f"`{kind}` must be a dict mapping string to array, series or DataFrame"
)
def _sanitize_errors_dict(errors: Dict[str, float]) -> Dict[str, float]:
for k, v in errors.items():
if not isinstance(v, float):
raise ValueError("`errors` dict must be a mapping from string to float")
return errors
def _sanitize_residuals_dict(
residuals: Dict[str, ArrayLike]
) -> Dict[str, numpy.ndarray]:
for k, v in residuals.items():
residuals[k] = _to_numpy(v, kind="residuals")
return residuals
def _sanitize_forecasts_dict(
forecasts: Dict[str, ArrayLike]
) -> Dict[str, pandas.DataFrame]:
for k, v in forecasts.items():
as_array = _to_numpy(v, kind="forecasts")
forecasts[k] = pandas.DataFrame({"yhat": as_array})
return forecasts
def _calculate_errors(
method: str,
errors: Optional[Dict[str, float]] = None,
residuals: Optional[Dict[str, numpy.ndarray]] = None,
):
errors_or_residuals = (
True if (errors is not None or residuals is not None) else False
)
if not errors_or_residuals:
raise ValueError(
f"Method {method} requires either errors or residuals to be provided"
)
if residuals is not None:
residuals = _sanitize_residuals_dict(residuals)
if errors is None:
errors = {}
for k, v in residuals.items():
errors[k] = numpy.mean(numpy.array(v) ** 2)
return _sanitize_errors_dict(errors)
def revise_forecasts(
method: str,
forecasts: Dict[str, ArrayLike],
errors: Optional[Dict[str, float]] = None,
residuals: Optional[Dict[str, ArrayLike]] = None,
summing_matrix: numpy.ndarray = None,
nodes: NAryTreeT = None,
transformer: TransformT = None,
):
"""
Convenience function to get revised forecast for pre-computed base forecasts
Parameters
----------
method : str
The reconciliation method to use
forecasts : Dict[str, ArrayLike]
A dict mapping key name to its forecasts (including in-sample forecasts). Required, can be
of type ``numpy.ndarray`` of ``ndim == 1``, ``pandas.Series``, or single columned ``pandas.DataFrame``
errors : Dict[str, float]
A dict mapping key name to the in-sample errors. Required for methods: ``OLS``, ``WLSS``, ``WLSV`` if
``residuals`` is not passed
residuals : Dict[str, ArrayLike]
A dict mapping key name to the residuals of in-sample forecasts. Required for methods: ``OLS``, ``WLSS``,
``WLSV``, can be of type ``numpy.ndarray`` of ndim == 1, ``pandas.Series``, or single columned
``pandas.DataFrame``. If passing residuals, ``errors`` dict is not required and will instead be calculated
using MSE metric: ``numpy.mean(numpy.array(residual) ** 2)``
summing_matrix : numpy.ndarray
Not required if ``nodes`` argument is passed, or if using ``BU`` approach
nodes : NAryTreeT
The tree of nodes as specified in :py:class:`HierarchyTree <hts.hierarchy.HierarchyTree>`. Required if not
if using ``AHP``, ``PHA``, ``FP`` methods, or if using passing the ``OLS``, ``WLSS``, ``WLSV`` methods
and not passing the ``summing_matrix`` parameter
transformer : TransformT
A transform with the method: ``inv_func`` that will be applied to the forecasts
Returns
-------
revised forecasts : ``pandas.DataFrame``
The revised forecasts
"""
if nodes:
summing_matrix, sum_mat_labels = to_sum_mat(nodes)
if method in [MethodT.AHP.name, MethodT.PHA.name, MethodT.FP.name] and not nodes:
raise ValueError(f"Method {method} requires an NAryTree to be passed")
if method in [MethodT.OLS.name, MethodT.WLSS.name, MethodT.WLSV.name]:
errors = _calculate_errors(method=method, errors=errors, residuals=residuals)
if not (all([forecasts, errors]) or (not summing_matrix)):
raise ValueError(
f"Method {method} requires forecasts, errors, and residuals to be passed, as "
f"well as an NAryTree or a summing matrix"
)
revision = RevisionMethod(
name=method, sum_mat=summing_matrix, transformer=transformer
)
sanitized_forecasts = _sanitize_forecasts_dict(forecasts)
revised = revision.revise(forecasts=sanitized_forecasts, mse=errors, nodes=nodes)
return pandas.DataFrame(revised, columns=list(sanitized_forecasts.keys()))
```
#### File: hts/core/utils.py
```python
import os
import pickle
from typing import Dict, List, Optional, Tuple
import numpy
import pandas
from hts._t import (
HTSFitResultT,
LowMemoryFitResultT,
ModelFitResultT,
NAryTreeT,
TimeSeriesModelT,
)
from hts.hierarchy.utils import make_iterable
from hts.utilities.distribution import (
DistributorBaseClass,
MapDistributor,
MultiprocessingDistributor,
)
def _do_fit(
nodes: NAryTreeT,
function_kwargs,
n_jobs: int,
disable_progressbar: bool,
show_warnings: bool,
distributor: Optional[DistributorBaseClass],
) -> HTSFitResultT:
distributor = _get_distributor(
n_jobs=n_jobs,
disable_progressbar=disable_progressbar,
show_warnings=show_warnings,
distributor=distributor,
)
result = distributor.map_reduce(
_do_actual_fit, data=nodes, function_kwargs=function_kwargs
)
distributor.close()
return result
def _do_actual_fit(node: NAryTreeT, function_kwargs: Dict) -> ModelFitResultT:
instantiated_model = function_kwargs["model_instance"](
node=node,
transform=function_kwargs["transform"],
**function_kwargs["model_args"]
)
if not function_kwargs["low_memory"]:
model_instance = instantiated_model.fit(**function_kwargs["fit_kwargs"])
return model_instance
else:
return _fit_serialize_model(instantiated_model, function_kwargs)
def _fit_serialize_model(
model: TimeSeriesModelT, function_kwargs: Dict
) -> LowMemoryFitResultT:
tmp = function_kwargs["tmp_dir"]
path = os.path.join(tmp, model.node.key + ".pkl")
model_instance = model.fit(**function_kwargs["fit_kwargs"])
with open(path, "wb") as p:
pickle.dump(model_instance, p)
return model.node.key, path
def _do_predict(
models: List[Tuple[str, ModelFitResultT, NAryTreeT]],
function_kwargs: Dict,
n_jobs: int,
disable_progressbar: bool,
show_warnings: bool,
distributor: DistributorBaseClass,
) -> HTSFitResultT:
distributor = _get_distributor(
n_jobs=n_jobs,
disable_progressbar=disable_progressbar,
show_warnings=show_warnings,
distributor=distributor,
)
result = distributor.map_reduce(
_do_actual_predict, data=models, function_kwargs=function_kwargs
)
distributor.close()
return result
def _model_mapping_to_iterable(
model_mapping: Dict[str, ModelFitResultT], nodes: NAryTreeT
) -> List[Tuple[str, ModelFitResultT, NAryTreeT]]:
prediction_triplet = []
for node in make_iterable(nodes, prop=None):
if isinstance(model_mapping[node.key], tuple):
model = model_mapping[node.key][1]
else:
model = model_mapping[node.key]
prediction_triplet.append((node.key, model, node))
return prediction_triplet
def _do_actual_predict(
model: Tuple[str, ModelFitResultT, NAryTreeT], function_kwargs: Dict
) -> Tuple[str, pandas.DataFrame, numpy.ndarray, numpy.ndarray]:
key, file_or_model, node = model
if function_kwargs["low_memory"]:
model_instance = _load_serialized_model(
tmp_dir=function_kwargs["tmp_dir"], file_name=file_or_model
)
else:
model_instance = file_or_model
model_instance = model_instance.predict(
node=node,
steps_ahead=function_kwargs["steps_ahead"],
**function_kwargs["predict_kwargs"]
)
return key, model_instance.forecast, model_instance.mse, model_instance.residual
def _load_serialized_model(tmp_dir, file_name):
path = os.path.join(tmp_dir, file_name)
with open(path, "rb") as p:
return pickle.load(p)
def _get_distributor(
n_jobs: int,
disable_progressbar: bool,
show_warnings: bool,
distributor: Optional[DistributorBaseClass],
):
if distributor is None:
if n_jobs == 0:
distributor = MapDistributor(
disable_progressbar=disable_progressbar,
progressbar_title="Fitting models: ",
)
else:
distributor = MultiprocessingDistributor(
n_workers=n_jobs,
disable_progressbar=disable_progressbar,
progressbar_title="Fitting models",
show_warnings=show_warnings,
)
if not isinstance(distributor, DistributorBaseClass):
raise ValueError("the passed distributor is not an DistributorBaseClass object")
return distributor
```
#### File: hts/viz/geo.py
```python
import logging
import string
from itertools import chain
import numpy
from hts._t import HierarchyVisualizerT, NAryTreeT
logger = logging.getLogger(__name__)
def get_min_max_ll(geos):
fl = list(chain.from_iterable([g[0] for g in geos]))
mx_lat = max([x[0] for x in fl])
mx_lon = max([x[1] for x in fl])
mn_lat = min([x[0] for x in fl])
mn_lon = min([x[1] for x in fl])
return mx_lat, mx_lon, mn_lat, mn_lon
class HierarchyVisualizer(HierarchyVisualizerT):
def __init__(self, tree: NAryTreeT):
self.tree = tree
@property
def as_df(self):
return self.tree.to_pandas()
def get_geos(self):
try:
from h3 import h3
except ImportError: # pragma: no cover
logger.error(
"h3-py must be installed for geo hashing capabilities. Exiting."
"Install it with: pip install scikit-hts[geo]"
)
return
h3s = [
col for col in self.as_df.columns if all(c in string.hexdigits for c in col)
]
return [
(h3.h3_to_geo_boundary(g), self.as_df[g].fillna(0).sum(), g) for g in h3s
]
def h3_to_lat_long(self):
return
def create_map(self):
try:
import branca.colormap as cm
from folium import Map
from folium.vector_layers import Polygon
except ImportError: # pragma: no cover
logger.error(
"Mapping requires folium==0.10.0 to be installed, geo mapping will not work."
"Install it with: pip install scikit-hts[geo]"
)
return
_map = Map(tiles="cartodbpositron")
geos = self.get_geos()
max_lat, max_lon, min_lat, min_lon = get_min_max_ll(geos)
geos = [
(i, numpy.log(j + 1) / (self.tree.get_node_height(k) + 1), k)
for i, j, k in geos
]
mx, mn = max([j for i, j, k in geos]), min([j for i, j, k in geos])
geos = [(i, (j - mn) / (mx - mn), k) for i, j, k in geos]
for points, count, h in sorted(geos, key=lambda x: x[1]):
tooltip = f"hex: {h}"
polygon = Polygon(
locations=points,
tooltip=tooltip,
fill=True,
color=cm.linear.OrRd_03.rgb_hex_str(count),
fill_color=cm.linear.OrRd_03.rgb_hex_str(count),
fill_opacity=0.3,
weight=3,
opacity=0.4,
)
polygon.add_to(_map)
_map.fit_bounds([[min_lat, min_lon], [max_lat, max_lon]])
return _map
```
#### File: tests/integ/test_fit_model.py
```python
from collections import namedtuple
import numpy
import pandas
import pytest
# from fbprophet import Prophet
from pmdarima import AutoARIMA
from hts.model import AutoArimaModel, FBProphetModel, HoltWintersModel, SarimaxModel
from hts.model.base import TimeSeriesModel
# def test_instantiate_fb_model_uv(uv_tree):
# fb = FBProphetModel(node=uv_tree)
# assert isinstance(fb, TimeSeriesModel)
# fb = FBProphetModel(node=uv_tree, capacity_max=1)
# assert isinstance(fb, TimeSeriesModel)
# fb = FBProphetModel(node=uv_tree, capacity_min=1)
# assert isinstance(fb, TimeSeriesModel)
# def test_fit_predict_fb_model_mv(mv_tree):
# exog = pandas.DataFrame({"precipitation": [1], "temp": [20]})
# fb = FBProphetModel(node=mv_tree)
# assert isinstance(fb, TimeSeriesModel)
# fb.fit()
# fb.predict(mv_tree, exogenous_df=exog)
# assert isinstance(fb.forecast, pandas.DataFrame)
# assert isinstance(fb.residual, numpy.ndarray)
# assert isinstance(fb.mse, float)
# def test_fit_predict_fb_model_mv(mv_tree):
# exog = pandas.DataFrame({"precipitation": [1, 2], "temp": [20, 30]})
# fb = FBProphetModel(node=mv_tree)
# assert isinstance(fb, TimeSeriesModel)
# fb.fit()
# fb.predict(mv_tree, exogenous_df=exog)
# assert isinstance(fb.forecast, pandas.DataFrame)
# assert isinstance(fb.residual, numpy.ndarray)
# assert isinstance(fb.mse, float)
# def test_fit_predict_fb_model_uv(uv_tree):
# fb = FBProphetModel(node=uv_tree)
# fb.fit()
# assert isinstance(fb.model, Prophet)
# fb.predict(uv_tree)
# assert isinstance(fb.forecast, pandas.DataFrame)
# assert isinstance(fb.residual, numpy.ndarray)
# assert isinstance(fb.mse, float)
def test_fit_predict_ar_model_mv(mv_tree):
ar = AutoArimaModel(node=mv_tree)
ar.fit(max_iter=1)
assert isinstance(ar.model, AutoARIMA)
exog = pandas.DataFrame({"precipitation": [1], "temp": [20]})
ar.predict(mv_tree, steps_ahead=1, exogenous_df=exog)
assert isinstance(ar.forecast, pandas.DataFrame)
assert isinstance(ar.residual, numpy.ndarray)
assert isinstance(ar.mse, float)
def test_fit_predict_ar_model_uv(uv_tree):
ar = AutoArimaModel(
node=uv_tree,
)
ar.fit(max_iter=1)
assert isinstance(ar.model, AutoARIMA)
ar.predict(uv_tree)
assert isinstance(ar.forecast, pandas.DataFrame)
assert isinstance(ar.residual, numpy.ndarray)
assert isinstance(ar.mse, float)
def test_fit_predict_sarimax_model_uv(uv_tree):
sar = SarimaxModel(
node=uv_tree,
max_iter=1,
)
fitted_sar = sar.fit()
assert isinstance(fitted_sar, SarimaxModel)
sar.predict(uv_tree)
assert isinstance(sar.forecast, pandas.DataFrame)
assert isinstance(sar.residual, numpy.ndarray)
assert isinstance(sar.mse, float)
def test_fit_predict_hw_model_uv(uv_tree):
hw = HoltWintersModel(
node=uv_tree,
)
fitted_hw = hw.fit()
assert isinstance(fitted_hw, HoltWintersModel)
hw.predict(uv_tree)
assert isinstance(hw.forecast, pandas.DataFrame)
assert isinstance(hw.residual, numpy.ndarray)
assert isinstance(hw.mse, float)
def test_fit_predict_hw_model_uv_with_transform(uv_tree):
Transform = namedtuple("Transform", ["func", "inv_func"])
transform_pos_neg = Transform(func=numpy.exp, inv_func=lambda x: -x)
hw = HoltWintersModel(node=uv_tree, transform=transform_pos_neg)
fitted_hw = hw.fit()
assert isinstance(fitted_hw, HoltWintersModel)
preds = hw.predict(uv_tree)
assert not (preds.forecast.values > 0).any()
assert isinstance(hw.forecast, pandas.DataFrame)
assert isinstance(hw.residual, numpy.ndarray)
assert isinstance(hw.mse, float)
def test_fit_predict_model_invalid_transform(uv_tree):
Transform = namedtuple("Transform", ["func_invalid_arg", "inv_func"])
transform_pos_neg = Transform(func_invalid_arg=numpy.exp, inv_func=lambda x: -x)
with pytest.raises(ValueError):
HoltWintersModel(node=uv_tree, transform=transform_pos_neg)
from hts.utilities.load_data import load_mobility_data
from hts.hierarchy import HierarchyTree
from hts.core import regressor
import pandas as pd
def test_ar_model():
dummydf = pd.DataFrame(
[
[10, 1, 4, 5, 100, 10, 10, 110],
[20, 2, 8, 10, 25, 5, 5, 45],
[30, 3, 12, 15, 400, 20, 20, 430],
[40, 4, 16, 20, 225, 15, 15, 265],
],
columns = ['target1', 'exog11', 'exog12', 'exog13', 'target2', 'exog21', 'exog22', 'total'],
index = ['2021-01', '2021-02', '2021-03', '2021-04']
)
dummydf.index = pd.to_datetime(dummydf.index)
dummydf.index = dummydf.index.to_period('M')
exogdf = pd.DataFrame(
[
[1, 16, 25, 2, 2],
# [4, 64, 100, 1, 1]
],
columns = ['exog11', 'exog12', 'exog13', 'exog21', 'exog22'],
index = ['2021-05']
)
exogdf.index = pd.to_datetime(exogdf.index)
exogdf.index = exogdf.index.to_period('M')
hier = {
'total': ['target1', 'target2']
}
exogenous = {
'target1': ['exog11', 'exog12'],
'target2': ['exog21', 'exog22']
}
print(exogdf)
ht = HierarchyTree.from_nodes(hier, dummydf, exogenous=exogenous)
from collections import namedtuple
Transform = namedtuple('Transform', ['func', 'inv_func'])
sqrtTransformation = Transform(func=numpy.sqrt, inv_func=numpy.square)
htsmodel = regressor.HTSRegressor(model = 'auto_arima', revision_method = 'PHA', n_jobs = 0, transform = sqrtTransformation)
htsfit = htsmodel.fit(dummydf, hier, exogenous = exogenous)
print("fitting completed\n")
pred = htsfit.predict(steps_ahead = 5, exogenous_df=exogdf)
print(pred)
return "DONE"
``` |
{
"source": "aakashparsi/tsmoothie",
"score": 4
} |
#### File: tsmoothie/tsmoothie/regression_basis.py
```python
import numpy as np
def polynomial(degree, basis_len):
"""Create basis for polynomial regression.
Returns
-------
X_base : array
Basis for polynomial regression.
"""
X = np.arange(basis_len, dtype=np.float64)
X_base = np.repeat([X], degree, axis=0).T
X_base = np.power(X_base, np.arange(1, degree + 1))
return X_base
def linear_spline(knots, basis_len):
"""Create basis for linear spline regression.
Returns
-------
X_base : array
Basis for linear spline regression.
"""
n_knots = len(knots)
X = np.arange(basis_len)
X_base = np.zeros((basis_len, n_knots + 1))
X_base[:, 0] = X
X_base[:, 1:] = X[:, None] - knots[None, :]
X_base[X_base < 0] = 0
return X_base
def cubic_spline(knots, basis_len):
"""Create basis for cubic spline regression.
Returns
-------
X_base : array
Basis for cubic spline regression.
"""
n_knots = len(knots)
X = np.arange(basis_len)
X_base = np.zeros((basis_len, n_knots + 3))
X_base[:, 0] = X
X_base[:, 1] = X_base[:, 0] * X_base[:, 0]
X_base[:, 2] = X_base[:, 1] * X_base[:, 0]
X_base[:, 3:] = np.power(X[:, None] - knots[None, :], 3)
X_base[X_base < 0] = 0
return X_base
def natural_cubic_spline(knots, basis_len):
"""Create basis for natural cubic spline regression.
Returns
-------
X_base : array
Basis for natural cubic spline regression.
"""
n_knots = len(knots)
X = np.arange(basis_len)
X_base = np.zeros((basis_len, n_knots - 1))
X_base[:, 0] = X
numerator1 = X[:, None] - knots[None, :n_knots - 2]
numerator1[numerator1 < 0] = 0
numerator2 = X[:, None] - knots[None, n_knots - 1]
numerator2[numerator2 < 0] = 0
numerator = np.power(numerator1, 3) - np.power(numerator2, 3)
denominator = knots[n_knots - 1] - knots[:n_knots - 2]
numerator1_dd = X[:, None] - knots[None, n_knots - 2]
numerator1_dd[numerator1_dd < 0] = 0
numerator2_dd = X[:, None] - knots[None, n_knots - 1]
numerator2_dd[numerator2_dd < 0] = 0
numerator_dd = np.power(numerator1_dd, 3) - np.power(numerator2_dd, 3)
denominator_dd = knots[n_knots - 1] - knots[n_knots - 2]
dd = numerator_dd / denominator_dd
X_base[:, 1:] = numerator / denominator - dd
return X_base
def gaussian_kernel(knots, sigma, basis_len):
"""Create basis for gaussian kernel regression.
Returns
-------
X_base : array
Basis for gaussian kernel regression.
"""
n_knots = len(knots)
X = np.arange(basis_len) / basis_len
X_base = - np.square(X[:, None] - knots) / (2 * sigma)
X_base = np.exp(X_base)
return X_base
def binner(knots, basis_len):
"""Create basis for binner regression.
Returns
-------
X_base : array
Basis for binner regression.
"""
n_knots = len(knots)
X = np.arange(basis_len)
X_base = np.zeros((basis_len, n_knots + 1))
X_base[:, 0] = X <= knots[0]
X_base[:, 1:-1] = np.logical_and(
X[:, None] <= knots[1:][None, :],
X[:, None] > knots[:(n_knots - 1)][None, :])
X_base[:, n_knots] = knots[-1] < X
return X_base
def lowess(smooth_fraction, basis_len):
"""Create basis for LOWESS.
Returns
-------
X_base : array
Basis for LOWESS.
"""
X = np.arange(basis_len)
r = int(np.ceil(smooth_fraction * basis_len))
r = min(r, basis_len - 1)
X = X[:, None] - X[None, :]
h = np.sort(np.abs(X), axis=1)[:, r]
X_base = np.abs(X / h).clip(0.0, 1.0)
X_base = np.power(1 - np.power(X_base, 3), 3)
return X_base
return X_base
``` |
{
"source": "aakashpatel379/radviz",
"score": 3
} |
#### File: aakashpatel379/radviz/Server.py
```python
from flask import Flask, render_template, request
from sklearn.cluster import KMeans
import ast
import json
import pandas as pd
from flask_cors import CORS
import plotly
import plotly.graph_objs as go
import plotly.express as px
app = Flask(__name__, template_folder="templates")
cors = CORS(app)
df = pd.read_csv('./winequality-red.csv')
@app.route('/')
def home():
json_response = json.loads(df.to_json(orient='records'))
print(json_response)
return render_template('home.html', data=json.dumps(json_response))
@app.route('/displaycsv', methods=['POST', 'GET'])
def displaycsv():
global df
df = pd.read_csv('./winequality-white.csv')
json_response = json.loads(df.to_json(orient='records'))
return json.dumps(json_response)
@app.route('/labelUpdate', methods=['POST', 'GET'])
def updateLabel():
arguments = request.args
df_a1 = pd.read_csv('./dataset1_processed_updated.csv')
numcols =['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
labelcol = arguments['label']
numcols.append(labelcol)
global df_al
df_a1_filtered = df_a1[numcols]
json_response = json.loads(df_a1_filtered.to_json(orient='records'))
return json.dumps(json_response)
@app.route('/applyAlgoParameters', methods=['POST', 'GET'])
def applyAlgoParamsKMeans():
arguments = request.args
rstate = arguments['randomstate']
init = str(arguments['init'])
if('means' in init):
init =init.strip()
init+="++"
csvname = arguments['name']
colormode = arguments['mode']
print(csvname+":"+colormode)
global df
df = pd.read_csv('./' + csvname + '.csv')
labelcol = df.columns
labelcol = labelcol[-1]
classes = df[labelcol].unique()
if colormode != "Class-Based":
del df[labelcol]
kmeans = KMeans(n_clusters=len(classes),init=init,random_state=int(rstate))
kmeans.fit(df)
y_kmeans = kmeans.predict(df)
labels = list(y_kmeans)
labels = kmeans.labels_
df['labels'] = labels
json_response = json.loads(df.to_json(orient='records'))
return json.dumps(json_response)
@app.route('/load', methods=['POST', 'GET'])
def readcsv():
arguments = request.args
csvname = arguments['name']
print(csvname)
global df
df = pd.read_csv('./' + csvname + '.csv')
json_response = json.loads(df.to_json(orient='records'))
return json.dumps(json_response)
@app.route('/correlationMatrix', methods=['POST', 'GET'])
def calculateCorrelationMatrix():
arguments = request.args
try:
cid = int(arguments['clusterid'])
except ValueError:
cid = arguments['clusterid']
global df
labelcol = df.columns
labelcol = labelcol[-1]
df_filtered = df[df[labelcol] == cid]
x = list(df_filtered.columns)
x.pop()
fig = go.Figure(data=go.Heatmap(
z=df_filtered.corr(),
x=x,
y=x,
colorscale='Reds'
))
fig['layout'].update(title="Correlation Matrix")
json_response=json.loads(fig.to_json())
return json.dumps(json_response)
@app.route('/visualizeA1Assignment', methods=['POST', 'GET'])
def getAssignment1Data():
df_a1 = pd.read_csv('./dataset1_processed_updated.csv')
df_a1_filtered = df_a1[['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week', 'salary']]
json_response = json.loads(df_a1_filtered.to_json(orient='records'))
return render_template('a1_dataset.html', data=json.dumps(json_response))
@app.route('/clusterizeA1', methods=['POST', 'GET'])
def clusterizeA1Data():
df_a1 = pd.read_csv('./dataset1_processed_updated.csv')
arguments = request.args
lbl = arguments['label']
numcols = ['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week', lbl]
colormode = arguments['mode']
df_a1_filtered = df_a1[numcols]
classes = df_a1_filtered[lbl].unique()
if colormode != "Class-Based":
del df_a1_filtered[lbl]
kmeans = KMeans(n_clusters=len(classes))
kmeans.fit(df_a1_filtered)
y_kmeans = kmeans.predict(df_a1_filtered)
labels =list(y_kmeans)
labels = kmeans.labels_
df_a1_filtered['labels'] = labels
json_response = json.loads(df_a1_filtered.to_json(orient='records'))
return json.dumps(json_response)
@app.route('/clusterize', methods=['POST', 'GET'])
def clusterizeData():
arguments = request.args
csvname = arguments['name']
colormode = arguments['mode']
print(csvname+":"+colormode)
global df
df = pd.read_csv('./' + csvname + '.csv')
labelcol = df.columns
labelcol = labelcol[-1]
classes = df[labelcol].unique()
if colormode != "Class-Based":
del df[labelcol]
kmeans = KMeans(n_clusters=len(classes))
kmeans.fit(df)
y_kmeans = kmeans.predict(df)
labels =list(y_kmeans)
labels = kmeans.labels_
df['labels'] = labels
json_response = json.loads(df.to_json(orient='records'))
return json.dumps(json_response)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "aakashrana1995/svnit-tnp",
"score": 2
} |
#### File: company/migrations/0017_auto_20170530_1724.py
```python
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth.models import Group, User, Permission
def load_coordinator_group(apps, schema_editor):
group, created = Group.objects.get_or_create(name='Coordinator')
company_permissions = Permission.objects.filter(
content_type__app_label='company'
).exclude(
codename__startswith='add',
content_type__model__in=['company', 'job'],
).exclude(
codename__startswith='delete',
content_type__model__in=['company', 'branch', 'job'],
)
consent_permissions = Permission.objects.filter(codename='change_consentdeadline')
for p in company_permissions:
group.permissions.add(p)
for p in consent_permissions:
group.permissions.add(p)
class Migration(migrations.Migration):
dependencies = [
('company', '0016_auto_20170529_2226'),
]
operations = [
migrations.RunPython(load_coordinator_group),
]
```
#### File: consent/templatetags/consent_extras.py
```python
from django import template
from django.contrib.auth.models import Group
register = template.Library()
@register.filter
def is_coordinator(user):
return user.groups.filter(name='Coordinator').exists()
``` |
{
"source": "Aakash-Roy/allenact",
"score": 2
} |
#### File: allenact/scripts/dmain.py
```python
import sys
import os
import time
import random
import string
from pathlib import Path
from typing import Optional
import subprocess
# Add to PYTHONPATH the path of the parent directory of the current file's directory
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(Path(__file__)))))
from allenact.main import get_argument_parser as get_main_arg_parser
from allenact.utils.system import init_logging, get_logger
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
def get_argument_parser():
"""Creates the argument parser."""
parser = get_main_arg_parser()
parser.description = f"distributed {parser.description}"
parser.add_argument(
"--runs_on",
required=True,
type=str,
help="Comma-separated IP addresses of machines",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh -f {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i mykey.pem -f ubuntu@{addr}'. "
"The option `-f` should be used for non-interactive session",
)
parser.add_argument(
"--env_activate_path",
required=True,
type=str,
help="Path to the virtual environment's `activate` script. It must be the same across all machines",
)
parser.add_argument(
"--allenact_path",
required=False,
type=str,
default="allenact",
help="Path to allenact top directory. It must be the same across all machines",
)
# Required distributed_ip_and_port
idx = [a.dest for a in parser._actions].index("distributed_ip_and_port")
parser._actions[idx].required = True
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
def get_raw_args():
raw_args = sys.argv[1:]
filtered_args = []
remove: Optional[str] = None
enclose_in_quotes: Optional[str] = None
for arg in raw_args:
if remove is not None:
remove = None
elif enclose_in_quotes is not None:
# Within backslash expansion: close former single, open double, create single, close double, reopen single
inner_quote = r"\'\"\'\"\'"
# Convert double quotes into backslash double for later expansion
filtered_args.append(
inner_quote + arg.replace('"', r"\"").replace("'", r"\"") + inner_quote
)
enclose_in_quotes = None
elif arg in [
"--runs_on",
"--ssh_cmd",
"--env_activate_path",
"--allenact_path",
"--extra_tag",
"--machine_id",
]:
remove = arg
elif arg == "--config_kwargs":
enclose_in_quotes = arg
filtered_args.append(arg)
else:
filtered_args.append(arg)
return filtered_args
def wrap_single(text):
return f"'{text}'"
def wrap_single_nested(text):
# Close former single, start backslash expansion (via $), create new single quote for expansion:
quote_enter = r"'$'\'"
# New closing single quote for expansion, close backslash expansion, reopen former single:
quote_leave = r"\'''"
return f"{quote_enter}{text}{quote_leave}"
def wrap_double(text):
return f'"{text}"'
def id_generator(size=4, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
# Assume we can ssh into each of the `runs_on` machines through port 22
if __name__ == "__main__":
# Tool must be called from AllenAct project's root directory
cwd = os.path.abspath(os.getcwd())
assert cwd == ABS_PATH_OF_TOP_LEVEL_DIR, (
f"`dmain.py` called from {cwd}."
f"\nIt should be called from AllenAct's top level directory {ABS_PATH_OF_TOP_LEVEL_DIR}."
)
args = get_args()
init_logging(args.log_level)
raw_args = get_raw_args()
if args.seed is None:
seed = random.randint(0, 2 ** 31 - 1)
raw_args.extend(["-s", f"{seed}"])
get_logger().info(f"Using random seed {seed} in all workers (none was given)")
all_addresses = args.runs_on.split(",")
get_logger().info(f"Running on IP addresses {all_addresses}")
assert args.distributed_ip_and_port.split(":")[0] in all_addresses, (
f"Missing listener IP address {args.distributed_ip_and_port.split(':')[0]}"
f" in list of worker addresses {all_addresses}"
)
time_str = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time()))
global_job_id = id_generator()
killfilename = os.path.join(
os.path.expanduser("~"), ".allenact", f"{time_str}_{global_job_id}.killfile"
)
os.makedirs(os.path.dirname(killfilename), exist_ok=True)
code_src = "."
with open(killfilename, "w") as killfile:
for it, addr in enumerate(all_addresses):
code_tget = f"{addr}:{args.allenact_path}/"
get_logger().info(f"rsync {code_src} to {code_tget}")
os.system(f"rsync -rz {code_src} {code_tget}")
job_id = id_generator()
command = " ".join(
["python", "main.py"]
+ raw_args
+ [
"--extra_tag",
f"{args.extra_tag}{'__' if len(args.extra_tag) > 0 else ''}machine{it}",
]
+ ["--machine_id", f"{it}"]
)
logfile = (
f"{args.output_dir}/log_{time_str}_{global_job_id}_{job_id}_machine{it}"
)
env_and_command = wrap_single_nested(
f"for NCCL_SOCKET_IFNAME in $(route | grep default) ; do : ; done && export NCCL_SOCKET_IFNAME"
f" && cd {args.allenact_path}"
f" && mkdir -p {args.output_dir}"
f" && source {args.env_activate_path} &>> {logfile}"
f" && echo pwd=$(pwd) &>> {logfile}"
f" && echo output_dir={args.output_dir} &>> {logfile}"
f" && echo python_version=$(python --version) &>> {logfile}"
f" && echo python_path=$(which python) &>> {logfile}"
f" && set | grep NCCL_SOCKET_IFNAME &>> {logfile}"
f" && echo &>> {logfile}"
f" && {command} &>> {logfile}"
)
screen_name = f"allenact_{time_str}_{global_job_id}_{job_id}_machine{it}"
screen_command = wrap_single(
f"screen -S {screen_name} -dm bash -c {env_and_command}"
)
ssh_command = f"{args.ssh_cmd.format(addr=addr)} {screen_command}"
get_logger().debug(f"SSH command {ssh_command}")
subprocess.run(ssh_command, shell=True, executable="/bin/bash")
get_logger().info(f"{addr} {screen_name}")
killfile.write(f"{addr} {screen_name}\n")
get_logger().info("")
get_logger().info(f"Running screen ids saved to {killfilename}")
get_logger().info("")
get_logger().info("DONE")
``` |
{
"source": "Aakash-Roy/cal-auto-google-calendar",
"score": 3
} |
#### File: Aakash-Roy/cal-auto-google-calendar/cal_setup.py
```python
print(r"""
//\\ || // ___ || //''''''// \\ //
// \\ __ ___ || // __ ___ / || || // ___ \\ //
// \\ / _` ||// / _` '___ ||-----, ||___ // / _ \ \\//
//------\\| (_| |-||\\ | (_| | / || || || \\| (_) | ||
// \\\__,_| || \\ \__,_| ___/ || || || \\\___/ ||
""")
print("\n****************************************************************")
'''
Import the following libraries
'''
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# You can change the scope of the application, make sure to delete token.pickle file first
SCOPES = ['https://www.googleapis.com/auth/calendar']
CREDENTIALS_FILE = 'credentials.json' # Give path to your credentials.json file
def get_calendar_service():
cred = None
'''
The file token.pickle stores the user's access and refresh tokens, and is created automatically when
the authorization flow completes for the first time. In other words when the user give access to this
channel
'''
if os.path.exists('token.pickle'):
with open('token.pickle','rb') as token:
cred = pickle.load(token)
if not cred or not cred.valid:
if cred and cred.expired and cred.refresh_token:
cred.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CREDENTIALS_FILE, SCOPES)
cred = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(cred, token)
service = build('calendar','v3',credentials=cred)
return service
``` |
{
"source": "Aakash-Roy/sportsanalysis",
"score": 4
} |
#### File: Aakash-Roy/sportsanalysis/sportsanalysis.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
plt.rcParams['figure.figsize'] = (14, 8)
matches = pd.read_csv('matches.csv')
def display_menu():
print("Which of the following analysis you want to see?")
print("------------------------------------------------")
print("1. Toss Winner")
print("2. Toss Decision")
print("3. Top players")
print("4. Win by runs")
print("5. Win by wickets")
print("------------------------------------------------")
def toss_winner():
sns.countplot(x='toss_winner', data=matches)
plt.show()
def toss_decision():
sns.countplot(x='toss_winner', data=matches)
plt.show()
def top_players():
top_players = matches.player_of_match.value_counts()[:10]
fig, ax = plt.subplots()
ax.set_ylim([0,20])
ax.set_ylabel("Count")
ax.set_title("Top player of the match Winners")
#top_players.plot.bar()
sns.barplot(x = top_players.index, y = top_players, orient='v');
plt.show()
def win_by_runs():
fig, ax = plt.subplots()
ax.set_title("Winning by Runs - Team Performance")
#top_players.plot.bar()
sns.boxplot(y = 'winner', x = 'win_by_runs', data=matches[matches['win_by_runs']>0], orient = 'h'); #palette="Blues");
plt.show()
def win_by_wickets():
fig, ax = plt.subplots()
ax.set_title("Winning by Wickets - Team Performance")
#top_players.plot.bar()
sns.boxplot(y = 'winner', x = 'win_by_wickets', data=matches[matches['win_by_wickets']>0], orient = 'h'); #palette="Blues");
plt.show()
while True:
display_menu()
choice = input("Enter your choice: ")
if choice == '1':
toss_winner()
elif choice == '2':
toss_decision()
elif choice == '3':
top_players()
elif choice == '4':
win_by_runs()
elif choice == '5':
win_by_wickets()
else:
break
``` |
{
"source": "Aakash-Roy/webscraping",
"score": 3
} |
#### File: Aakash-Roy/webscraping/webscraping.py
```python
from bs4 import BeautifulSoup
import requests
def display_menu():
print("Which of the following task you want to perform?")
print("------------------------------------------------")
print("1. Webpage")
print("2. Wikipedia Page")
print("------------------------------------------------")
def webpages():
url = input("Paste a webpage url: ")
source = requests.get(url)
s = BeautifulSoup(source.text,'html')
title = s.find('title')
print("This is with HTML tags: ",title)
notags = s.find('h1')
print(("This is not with HTML tags: ",notags.text))
links = s.find('a')
print(links)
print(links['href'])
print(links['class'])
link1 = s.find_all('a')
link2 = len(link1)
print("Total links in the webpage: ",link2)
for i in link1[:6]:
print(i)
links2 = link1[1]
print(links2)
print()
print('href is: ',links2['href'])
div = links2.find('div')
print(div)
print()
h = (div['class'])
print(h)
print(type(h))
print()
print("Class name of Div is: ","".join(div['class']))
def wikipage():
url1 = input("Paste a wikipedia page url: ")
wiki = requests.get(url1)
s2 = BeautifulSoup(wiki.text,'html')
print(s2.find(('title')))
content = s2.find_all('div',class_='toc')
for i in content:
print(i.text)
'''
To perform this program, we need to have a conditional loop
'''
while True:
display_menu()
choice = input("Enter your choice: ")
if choice == '1':
webpages()
elif choice == '2':
wikipage()
else:
break
``` |
{
"source": "aakash-saboo/semi_contrast_seg_fixed",
"score": 3
} |
#### File: datasets/prepare_dataset/create_splits.py
```python
import pickle
from batchgenerators.utilities.file_and_folder_operations import subfiles
import os
import numpy as np
def create_splits(output_dir, image_dir):
npy_files = subfiles(image_dir, suffix=".npy", join=False)
trainset_size = len(npy_files)*50//100
valset_size = len(npy_files)*25//100
testset_size = len(npy_files)*25//100
splits = []
for split in range(0, 5):
image_list = npy_files.copy()
trainset = []
valset = []
testset = []
for i in range(0, trainset_size):
patient = np.random.choice(image_list)
image_list.remove(patient)
trainset.append(patient)
for i in range(0, valset_size):
patient = np.random.choice(image_list)
image_list.remove(patient)
valset.append(patient)
for i in range(0, testset_size):
patient = np.random.choice(image_list)
image_list.remove(patient)
testset.append(patient)
split_dict = dict()
split_dict['train'] = trainset
split_dict['val'] = valset
split_dict['test'] = testset
splits.append(split_dict)
with open(os.path.join(output_dir, 'splits.pkl'), 'wb') as f:
pickle.dump(splits, f)
# some dataset may include an independent test set
def create_splits_1(output_dir, image_dir, test_dir):
npy_files = subfiles(image_dir, suffix=".npy", join=False)
test_files = subfiles(test_dir, suffix=".npy", join=False)
trainset_size = len(npy_files) * 3 // 4
valset_size = len(npy_files) - trainset_size
splits = []
for split in range(0, 5):
image_list = npy_files.copy()
trainset = []
valset = []
for i in range(0, trainset_size):
patient = np.random.choice(image_list)
image_list.remove(patient)
trainset.append(patient)
for i in range(0, valset_size):
patient = np.random.choice(image_list)
image_list.remove(patient)
valset.append(patient)
split_dict = dict()
split_dict['train'] = trainset
split_dict['val'] = valset
split_dict['test'] = test_files
splits.append(split_dict)
with open(os.path.join(output_dir, 'splits.pkl'), 'wb') as f:
pickle.dump(splits, f)
if __name__ == "__main__":
root_dir = "../../data/Hippocampus"
image_dir = "../../data/Hippocampus/preprocessed"
create_splits(root_dir, image_dir)
```
#### File: datasets/prepare_dataset/preprocessing.py
```python
from collections import defaultdict
from batchgenerators.augmentations.utils import resize_image_by_padding
from medpy.io import load
import os
import numpy as np
import shutil
import torch
import torch.nn.functional as F
def preprocess_data(root_dir):
image_dir = os.path.join(root_dir, 'imgs')
label_dir = os.path.join(root_dir, 'labels')
output_dir = os.path.join(root_dir, 'orig')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Created' + output_dir + '...')
class_stats = defaultdict(int)
total = 0
nii_files = subfiles(image_dir, suffix=".nii.gz", join=False)
for f in nii_files:
if f.startswith("."):
os.remove(os.path.join(image_dir, f))
continue
file_dir = os.path.join(output_dir, f.split('.')[0]+'.npy')
if not os.path.exists(file_dir):
image, _ = load(os.path.join(image_dir, f))
label, _ = load(os.path.join(label_dir, f.replace('image', 'label')))
# normalize images
image = (image - image.min()) / (image.max() - image.min())
print(label.max())
print(label.min())
image = image.transpose((0, 2, 1))
label = label.transpose((0, 2, 1))
total += image.shape[2]
# image = image[:, :, 0].transpose((0, 2, 1))
"""
# modify the label for MMWHS dataset
label[label == 500] = 1
label[label == 600] = 2
label[label == 420] = 3
label[label == 550] = 4
label[label == 205] = 5
label[label == 820] = 6
label[label == 850] = 7
"""
print(image.shape, label.shape)
result = np.stack((image, label)).transpose((3, 0, 1, 2))
print(result.shape)
np.save(os.path.join(output_dir, f.split('.')[0] + '.npy'), result)
print(f)
print(total)
def reshape_2d_data(input_dir, output_dir, target_size=(64, 64)):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Created' + output_dir + '...')
files_list = os.listdir(input_dir)
for f in files_list:
target_dir = os.path.join(output_dir, f)
if not os.path.exists(target_dir):
data = np.load(os.path.join(input_dir, f))
image = data[:, 0]
label = data[:, 1]
image_tensor = torch.from_numpy(image)
label_tensor = torch.from_numpy(label)
new_image = F.interpolate(image_tensor[None], size=target_size, mode="bilinear")
new_image = new_image.squeeze().cpu().numpy()
new_label = F.interpolate(label_tensor[None], size=target_size, mode="bilinear")
new_label = new_label.squeeze().cpu().numpy()
new_data = np.concatenate((new_image[:, None], new_label[:, None]), axis=1)
print(new_data.shape)
np.save(target_dir, new_data)
def reshape_three_dim_data(input_dir, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Created' + output_dir + '...')
files_list = os.listdir(input_dir)
for f in files_list:
target_dir = os.path.join(output_dir, f)
if not os.path.exists(target_dir):
data = np.load(os.path.join(input_dir, f))
image = data[:, 0]
label = data[:, 1]
image_tensor = torch.from_numpy(image)
label_tensor = torch.from_numpy(label)
new_image = F.interpolate(image_tensor[None, None], size=(160, 160), mode="bilinear")
new_image = new_image.squeeze().cpu().numpy()
new_label = F.interpolate(label_tensor[None, None], size=(160, 160), mode="bilinear")
new_label = new_label.squeeze().cpu().numpy()
new_data = np.concatenate((new_image[None], new_label[None]))
print(new_data.shape)
np.save(target_dir, new_data)
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y # lambda is another simplified way of defining a function
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
if __name__ == "__main__":
root_dir = "../../data/Hippocampus"
input_dir = "../../data/Hippocampus/orig"
target_dir = "../../data/Hippocampus/preprocessed"
preprocess_data(root_dir)
reshape_2d_data(input_dir, target_dir)
```
#### File: datasets/two_dim/data_augmentation.py
```python
import numpy as np
from batchgenerators.transforms import Compose, MirrorTransform
from batchgenerators.transforms.crop_and_pad_transforms import CenterCropTransform, RandomCropTransform
from batchgenerators.transforms.spatial_transforms import ResizeTransform, SpatialTransform
from batchgenerators.transforms.utility_transforms import NumpyToTensor
from batchgenerators.transforms.color_transforms import BrightnessTransform, GammaTransform
from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform
from torchvision import transforms
def get_transforms(mode="train", target_size=128):
tranform_list = []
if mode == "train":
tranform_list = [# CenterCropTransform(crop_size=target_size),
ResizeTransform(target_size=(target_size,target_size), order=1), # resize
MirrorTransform(axes=(1,)),
SpatialTransform(patch_size=(target_size, target_size), random_crop=False,
patch_center_dist_from_border=target_size // 2,
do_elastic_deform=True, alpha=(0., 1000.), sigma=(40., 60.),
do_rotation=True, p_rot_per_sample=0.5,
angle_x=(-0.1, 0.1), angle_y=(0, 1e-8), angle_z=(0, 1e-8),
scale=(0.5, 1.9), p_scale_per_sample=0.5,
border_mode_data="nearest", border_mode_seg="nearest"),
]
elif mode == "val":
tranform_list = [# CenterCropTransform(crop_size=target_size),
ResizeTransform(target_size=target_size, order=1),
]
elif mode == "test":
tranform_list = [# CenterCropTransform(crop_size=target_size),
ResizeTransform(target_size=target_size, order=1),
]
elif mode == "supcon":
tranform_list = [
BrightnessTransform(mu=1, sigma=1, p_per_sample=0.5),
GammaTransform(p_per_sample=0.5),
GaussianNoiseTransform(p_per_sample=0.5),
# SpatialTransform(patch_size=(target_size, target_size)
# do_elastic_deform=True, alpha=(0., 1000.), sigma=(40., 60.),
# do_rotation=True, p_rot_per_sample=0.5,
# angle_z=(0, 2 * np.pi),
# scale=(0.7, 1.25), p_scale_per_sample=0.5,
# border_mode_data="nearest", border_mode_seg="nearest"),
]
tranform_list.append(NumpyToTensor())
return TwoCropTransform(Compose(tranform_list))
elif mode == "simclr":
tranform_list = [
BrightnessTransform(mu=1, sigma=1, p_per_sample=0.5),
GammaTransform(p_per_sample=0.5),
GaussianNoiseTransform(p_per_sample=0.5),
SpatialTransform(patch_size=(target_size, target_size), random_crop=True,
do_elastic_deform=True, alpha=(0., 1000.), sigma=(40., 60.),
do_rotation=True, p_rot_per_sample=0.5,
angle_z=(0, 2 * np.pi),
scale=(0.7, 1.25), p_scale_per_sample=0.5,
border_mode_data="nearest", border_mode_seg="nearest"),
NumpyToTensor(),
]
return TwoCropTransform(Compose(tranform_list))
tranform_list.append(NumpyToTensor())
return Compose(tranform_list)
class TwoCropTransform:
"""Create two crops of the same image"""
def __init__(self, transform):
self.transform = transform
def __call__(self, **x):
return [self.transform(**x), self.transform(**x)]
```
#### File: aakash-saboo/semi_contrast_seg_fixed/inference.py
```python
import os
import pickle
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# from configs.Config import get_config
from configs.Config_mmwhs import get_config
from datasets.two_dim.NumpyDataLoader import NumpyDataSet
from networks.unet_con import SupConUnetInfer
from loss_functions.supcon_loss import SupConSegLoss, LocalConLoss, BlockConLoss
from loss_functions.metrics import SegmentationMetric
from util import AverageMeter
class InferenceExperiment(object):
def __init__(self, config):
self.config = config
pkl_dir = self.config.split_dir
with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
splits = pickle.load(f)
self.train_keys = splits[self.config.fold]['train'][0:2]
self.val_keys = splits[self.config.fold]['val'][0:2]
self.test_data_loader = NumpyDataSet(self.config.data_dir, target_size=self.config.img_size,
batch_size=2, keys=self.train_keys, do_reshuffle=False, mode="test")
self.model = SupConUnetInfer(num_classes=self.config.num_classes)
self.criterion = SupConSegLoss(temperature=0.7)
self.criterion1 = LocalConLoss(temperature=0.7)
self.criterion2 = BlockConLoss(temperature=0.7)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
self.model = nn.DataParallel(self.model)
self.device = torch.device(self.config.device if torch.cuda.is_available() else 'cpu')
self.model.to(self.device)
self.criterion.to(self.device)
self.criterion1.to(self.device)
self.criterion2.to(self.device)
# self.load_checkpoint()
self.save_folder = os.path.join(self.config.base_dir, "infer_" + self.config.name + str(datetime.now())[0:16])
if not os.path.exists(self.save_folder):
os.mkdir(self.save_folder)
def load_checkpoint(self):
if self.config.saved_model_path is None:
print('checkpoint_dir is empty, please provide directory to load checkpoint.')
exit(0)
else:
state_dict = torch.load(self.config.saved_model_path)['model']
self.model.load_state_dict(state_dict, strict=False)
# self.model.load_state_dict(state_dict)
def binfer(self):
self.model.eval()
co_losses = AverageMeter()
local_co_losses = AverageMeter()
block_co_losses = AverageMeter()
metric_val = SegmentationMetric(self.config.num_classes)
metric_val.reset()
bsz = 2
with torch.no_grad():
for (i, data_batch) in enumerate(self.test_data_loader):
"""
data = data_batch['data'][0].float().to(self.device)
labels = data_batch['seg'][0].long().to(self.device)
fnames = data_batch['fnames']
slice_idx = data_batch['slice_idxs']
"""
data1 = data_batch[0]['data'][0].float()
target1 = data_batch[0]['seg'][0].long()
data2 = data_batch[1]['data'][0].float()
target2 = data_batch[1]['seg'][0].long()
data = torch.cat([data1, data2], dim=0)
labels = torch.cat([target1, target2], dim=0).squeeze(dim=1) # of shape [2B, 512, 512]
features, output = self.model(data)
output_softmax = F.softmax(output, dim=1)
pred = torch.argmax(output_softmax, dim=1)
metric_val.update(labels, output_softmax)
# self.save_data(pred, fnames, slice_idx, 'seg')
features = F.normalize(features, p=2, dim=1)
# print(features.shape, labels.shape)
f1, f2 = torch.split(features, [bsz, bsz], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1) # [bsz, n_view, c, img_size, img_size]
l1, l2 = torch.split(labels, [bsz, bsz], dim=0)
labels = torch.cat([l1.unsqueeze(1), l2.unsqueeze(1)], dim=1)
labels = labels.cuda()
# print(features.device, labels.device)
co_loss = self.criterion(features, labels)
local_co_loss = self.criterion1(features, labels)
block_co_loss = self.criterion2(features, labels)
if co_loss == 0:
continue
co_losses.update(co_loss, bsz)
if local_co_loss == 0:
continue
local_co_losses.update(local_co_loss, bsz)
if block_co_loss == 0:
continue
block_co_losses.update(block_co_loss, bsz)
# self.save_data(features, fnames, slice_idx, 'features')
if i % 10 == 0:
_, _, Dice = metric_val.get()
print("Index:%d, mean Dice:%.4f" % (i, Dice))
print("Index:%d, mean contrastive loss:%.4f" % (i, co_losses.avg))
print("=====Inference Finished=====")
_, _, Dice = metric_val.get()
print("mean Dice:", Dice)
print("mean contrastive loss:", co_losses.avg.item())
print("mean local contrastive loss:", local_co_losses.avg.item())
print("mean block contrastive loss:", block_co_losses.avg.item())
def inference(self):
self.model.eval()
co_losses = AverageMeter()
metric_val = SegmentationMetric(self.config.num_classes)
metric_val.reset()
bsz = 4
with torch.no_grad():
for k in range(2):
key = self.val_keys[k:k+1]
data_loader = NumpyDataSet(self.config.data_dir, target_size=self.config.img_size,
batch_size=bsz, keys=key, do_reshuffle=False, mode="test")
feature_map = []
prediction = []
for (i, data_batch) in enumerate(data_loader):
data = data_batch['data'][0].float().to(self.device)
labels = data_batch['seg'][0].long().to(self.device)
slice_idx = data_batch['slice_idxs']
features, output = self.model(data)
# print(output.shape, labels.shape)
output_softmax = F.softmax(output, dim=1)
pred = torch.argmax(output_softmax, dim=1)
metric_val.update(labels.squeeze(), output_softmax)
# self.save_data(pred, fnames, slice_idx, 'seg')
features = F.normalize(features, p=2, dim=1)
for j in range(features.shape[0]):
# feature_map.append(features[j].cpu().numpy())
prediction.append(pred[j].cpu().numpy())
# print(features.shape, labels.shape)
"""
if i == 30:
print(slice_idx)
self.save_data(features.cpu().numpy(), key[0], 'features')
self.save_data(labels.cpu().numpy(), key[0], "labels")
"""
if i % 10 == 0:
_, _, Dice = metric_val.get()
print("Index:%d, mean Dice:%.4f" % (i, Dice))
# feature_map = np.stack(feature_map)
prediction = np.stack(prediction)
# self.save_data(feature_map, key, 'features')
self.save_data(prediction, key[0], 'prediction')
print("=====Inference Finished=====")
_, _, Dice = metric_val.get()
print("mean Dice:", Dice)
def save_data(self, data, key, mode):
if not os.path.exists(os.path.join(self.save_folder, mode)):
os.mkdir(os.path.join(self.save_folder, mode))
save_path = os.path.join(self.save_folder, mode + '_' + key)
np.save(save_path, data)
"""
for k in range(bsz):
slice = slice_idx[k][0].numpy()
file_name = fnames[k][0].split("preprocessed/")[1]
save_path = os.path.join(self.save_folder, mode, str(slice) + '_' + file_name)
np.save(save_path, data[k])
"""
if __name__ == "__main__":
c = get_config()
c.saved_model_path = os.path.abspath("output_experiment") + "/20210227-065712_Unet_mmwhs/" \
+ "checkpoint/" + "checkpoint_last.pth.tar"
# c.saved_model_path = os.path.abspath('save') + '/SupCon/mmwhs_models/' \
# + 'SupCon_mmwhs_adam_fold_0_lr_0.0001_decay_0.0001_bsz_4_temp_0.1_train_0.4_mlp_block_pretrained/' \
# + 'ckpt.pth'
c.fold = 0
print(c)
exp = InferenceExperiment(config=c)
exp.load_checkpoint()
exp.inference()
```
#### File: semi_contrast_seg_fixed/loss_functions/metrics.py
```python
import threading
import torch
import numpy as np
# PyTroch version
SMOOTH = 1e-5
def dice_pytorch(outputs: torch.Tensor, labels: torch.Tensor, N_class):
# You can comment out this line if you are passing tensors of equal shape
# But if you are passing output from UNet or something it will most probably
# be with the BATCH x 1 x H x W shape
outputs = outputs.squeeze().float()
labels = labels.squeeze().float()
dice = torch.ones(N_class-1).float()
# dice = torch.ones(N_class).float()
## for test
#outputs = torch.tensor([[1,1],[3,3]]).float()
#labels = torch.tensor([[0, 1], [2, 3]]).float()
for iter in range(1, N_class): ## ignore the background
# for iter in range(0, N_class):
predict_temp = torch.eq(outputs, iter)
label_temp = torch.eq(labels, iter)
intersection = predict_temp & label_temp
intersection = intersection.float().sum()
union = (predict_temp.float().sum() + label_temp.float().sum())
if intersection>0 and union>0:
dice_temp = (2*intersection)/(union)
else:
dice_temp = 0
#print(dice_temp)
dice[iter-1] = dice_temp #(intersection + SMOOTH) / (union + SMOOTH)
# dice[iter] = dice_temp
#print(dice)
return dice # Or thresholded.mean()
def iou_pytorch(outputs: torch.Tensor, labels: torch.Tensor):
# You can comment out this line if you are passing tensors of equal shape
# But if you are passing output from UNet or something it will most probably
# be with the BATCH x 1 x H x W shape
outputs = outputs.squeeze(1) # BATCH x 1 x H x W => BATCH x H x W
intersection = (outputs & labels).float().sum((1, 2)) # Will be zero if Truth=0 or Prediction=0
union = (outputs | labels).float().sum((1, 2)) # Will be zzero if both are 0
iou = (intersection + SMOOTH) / (union + SMOOTH) # We smooth our devision to avoid 0/0
thresholded = torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10 # This is equal to comparing with thresolds
return thresholded # Or thresholded.mean() if you are interested in average across the batch
################ Numpy version ################
# Well, it's the same function, so I'm going to omit the comments
def iou_numpy(outputs: np.array, labels: np.array):
outputs = outputs.squeeze()
intersection = (outputs & labels).sum((1, 2))
union = (outputs | labels).sum((1, 2))
iou = (intersection + SMOOTH) / (union + SMOOTH)
thresholded = np.ceil(np.clip(20 * (iou - 0.5), 0, 10)) / 10
return thresholded # Or thresholded.mean()
# Numpy version
# Well, it's the same function, so I'm going to omit the comments
def dice_numpy(outputs: np.array, labels: np.array):
outputs = outputs.squeeze()
intersection = (outputs & labels).sum((1, 2))
union = (outputs | labels).sum((1, 2))
dice = (intersection + SMOOTH) / (union + SMOOTH)
return dice # Or thresholded.mean()
class SegmentationMetric(object):
"""Computes pixAcc and mIoU metric scroes"""
def __init__(self, nclass):
self.nclass = nclass
self.lock = threading.Lock()
self.reset()
def update(self, labels, preds):
def evaluate_worker(self, label, pred):
correct, labeled = batch_pix_accuracy(
pred, label)
inter, union = batch_intersection_union(
pred, label, self.nclass)
with self.lock:
self.total_correct += correct
self.total_label += labeled
self.total_inter += inter
self.total_union += union
return
if isinstance(preds, torch.Tensor):
evaluate_worker(self, labels, preds)
elif isinstance(preds, (list, tuple)):
threads = [threading.Thread(target=evaluate_worker,
args=(self, label, pred),
)
for (label, pred) in zip(labels, preds)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
raise NotImplemented
def get(self, mode='mean'):
pixAcc = 1.0 * self.total_correct / (np.spacing(1) + self.total_label)
IoU = 1.0 * self.total_inter / (np.spacing(1) + self.total_union)
Dice = 2.0 * self.total_inter / (np.spacing(1) + self.total_union + self.total_inter)
if mode == 'mean':
mIoU = IoU.mean()
Dice = Dice.mean()
return pixAcc, mIoU, Dice
else:
return pixAcc, IoU, Dice
def reset(self):
self.total_inter = 0
self.total_union = 0
self.total_correct = 0
self.total_label = 0
return
def batch_pix_accuracy(output, target):
"""Batch Pixel Accuracy
Args:
predict: input 4D tensor
target: label 3D tensor
"""
# predict = torch.max(output, 1)[1]
predict = torch.argmax(output, dim=1)
# predict = output
# label: 0, 1, ..., nclass - 1
# Note: 0 is background
predict = predict.cpu().numpy().astype('int64') + 1
target = target.cpu().numpy().astype('int64') + 1
pixel_labeled = np.sum(target > 0)
pixel_correct = np.sum((predict == target)*(target > 0))
assert pixel_correct <= pixel_labeled, \
"Correct area should be smaller than Labeled"
return pixel_correct, pixel_labeled
def batch_intersection_union(output, target, nclass): #只区分背景和器官: nclass = 2
"""Batch Intersection of Union
Args:
predict: input 4D tensor #model的输出
target: label 3D Tensor #label
nclass: number of categories (int) #只区分背景和器官: nclass = 2
"""
predict = torch.max(output, dim=1)[1] #获得了预测结果
# predict = output
mini = 1
maxi = nclass-1 #nclass = 2, maxi=1
nbins = nclass-1 #nclass = 2, nbins=1
# label is: 0, 1, 2, ..., nclass-1
# Note: 0 is background
predict = predict.cpu().numpy().astype('int64')
target = target.cpu().numpy().astype('int64')
predict = predict * (target >= 0).astype(predict.dtype)
intersection = predict * (predict == target) # 得到TP和TN
# areas of intersection and union
area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi)) #统计(TP、TN)值为1的像素个数,获得TN
area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi)) #统计predict中值为1的像素个数,获得TN+FN
area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi)) #统计target中值为1的像素个数,获得TN+FP
area_union = area_pred + area_lab - area_inter #area_union:TN+FN+FP
assert (area_inter <= area_union).all(), \
"Intersection area should be smaller than Union area"
return area_inter, area_union
```
#### File: semi_contrast_seg_fixed/networks/unet_con.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class SupConUnet(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64,
kernel_size=3, do_instancenorm=True, mode="cls"):
super(SupConUnet, self).__init__()
self.encoder = UNet(num_classes, in_channels, initial_filter_size, kernel_size, do_instancenorm)
if mode == 'mlp':
self.head = nn.Sequential(nn.Conv2d(initial_filter_size, 256, kernel_size=1),
nn.Conv2d(256, num_classes, kernel_size=1))
elif mode == "cls":
self.head = nn.Conv2d(initial_filter_size, num_classes, kernel_size=1)
else:
raise NotImplemented("This mode is not supported yet")
def forward(self, x):
y = self.encoder(x)
output = self.head(y)
# output = F.normalize(self.head(y), dim=1)
return output
class SupConUnetInfer(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64,
kernel_size=3, do_instancenorm=True):
super(SupConUnetInfer, self).__init__()
self.encoder = UNet(num_classes, in_channels, initial_filter_size, kernel_size, do_instancenorm)
self.head = nn.Conv2d(initial_filter_size, num_classes, kernel_size=1)
def forward(self, x):
y = self.encoder(x)
output = self.head(y)
# output = F.normalize(self.head(y), dim=1)
return y, output
class UNet(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.expand_4_1 = self.expand(initial_filter_size*2**4, initial_filter_size*2**3)
self.expand_4_2 = self.expand(initial_filter_size*2**3, initial_filter_size*2**3)
self.upscale4 = nn.ConvTranspose2d(initial_filter_size*2**3, initial_filter_size*2**2, kernel_size=2, stride=2)
self.expand_3_1 = self.expand(initial_filter_size*2**3, initial_filter_size*2**2)
self.expand_3_2 = self.expand(initial_filter_size*2**2, initial_filter_size*2**2)
self.upscale3 = nn.ConvTranspose2d(initial_filter_size*2**2, initial_filter_size*2, 2, stride=2)
self.expand_2_1 = self.expand(initial_filter_size*2**2, initial_filter_size*2)
self.expand_2_2 = self.expand(initial_filter_size*2, initial_filter_size*2)
self.upscale2 = nn.ConvTranspose2d(initial_filter_size*2, initial_filter_size, 2, stride=2)
self.expand_1_1 = self.expand(initial_filter_size*2, initial_filter_size)
self.expand_1_2 = self.expand(initial_filter_size, initial_filter_size)
# Output layer for segmentation
# self.final = nn.Conv2d(initial_filter_size, num_classes, kernel_size=1) # kernel size for final layer = 1, see paper
self.softmax = torch.nn.Softmax2d()
# Output layer for "autoencoder-mode"
self.output_reconstruction_map = nn.Conv2d(initial_filter_size, out_channels=1, kernel_size=1)
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True))
return layer
@staticmethod
def expand(in_channels, out_channels, kernel_size=3):
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True),
)
return layer
@staticmethod
def center_crop(layer, target_width, target_height):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_width) // 2
xy2 = (layer_height - target_height) // 2
return layer[:, :, xy1:(xy1 + target_width), xy2:(xy2 + target_height)]
def forward(self, x, enable_concat=True):
concat_weight = 1
if not enable_concat:
concat_weight = 0
contr_1 = self.contr_1_2(self.contr_1_1(x))
pool = self.pool(contr_1)
contr_2 = self.contr_2_2(self.contr_2_1(pool))
pool = self.pool(contr_2)
contr_3 = self.contr_3_2(self.contr_3_1(pool))
pool = self.pool(contr_3)
contr_4 = self.contr_4_2(self.contr_4_1(pool))
pool = self.pool(contr_4)
center = self.center(pool)
crop = self.center_crop(contr_4, center.size()[2], center.size()[3])
concat = torch.cat([center, crop*concat_weight], 1)
expand = self.expand_4_2(self.expand_4_1(concat))
upscale = self.upscale4(expand)
crop = self.center_crop(contr_3, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_3_2(self.expand_3_1(concat))
upscale = self.upscale3(expand)
crop = self.center_crop(contr_2, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_2_2(self.expand_2_1(concat))
upscale = self.upscale2(expand)
crop = self.center_crop(contr_1, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_1_2(self.expand_1_1(concat))
return expand
class DownsampleUnet(nn.Module):
def __init__(self, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.softmax = torch.nn.Softmax2d()
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True))
return layer
@staticmethod
def center_crop(layer, target_width, target_height):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_width) // 2
xy2 = (layer_height - target_height) // 2
return layer[:, :, xy1:(xy1 + target_width), xy2:(xy2 + target_height)]
def forward(self, x, enable_concat=True):
concat_weight = 1
if not enable_concat:
concat_weight = 0
contr_1 = self.contr_1_2(self.contr_1_1(x))
pool = self.pool(contr_1)
contr_2 = self.contr_2_2(self.contr_2_1(pool))
pool = self.pool(contr_2)
contr_3 = self.contr_3_2(self.contr_3_1(pool))
pool = self.pool(contr_3)
contr_4 = self.contr_4_2(self.contr_4_1(pool))
pool = self.pool(contr_4)
center = self.center(pool)
return center
class GlobalConUnet(nn.Module):
def __init__(self, in_channels=1, initial_filter_size=64):
super().__init__()
self.encoder = DownsampleUnet(in_channels, initial_filter_size)
def forward(self, x):
y = self.encoder(x)
return y
class MLP(nn.Module):
def __init__(self, input_channels=512, num_class=128):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d(1)
self.f1 = nn.Linear(input_channels, input_channels)
self.f2 = nn.Linear(input_channels, num_class)
def forward(self, x):
x = self.gap(x)
y = self.f1(x.squeeze())
y = self.f2(y)
return y
class UpsampleUnet2(nn.Module):
def __init__(self, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.expand_4_1 = self.expand(initial_filter_size * 2 ** 4, initial_filter_size * 2 ** 3)
self.expand_4_2 = self.expand(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 3)
self.upscale4 = nn.ConvTranspose2d(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 2, kernel_size=2,
stride=2)
self.expand_3_1 = self.expand(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 2)
self.expand_3_2 = self.expand(initial_filter_size * 2 ** 2, initial_filter_size * 2 ** 2)
self.upscale3 = nn.ConvTranspose2d(initial_filter_size * 2 ** 2, initial_filter_size * 2, 2, stride=2)
self.expand_2_1 = self.expand(initial_filter_size * 2 ** 2, initial_filter_size * 2)
self.expand_2_2 = self.expand(initial_filter_size * 2, initial_filter_size * 2)
self.softmax = torch.nn.Softmax2d()
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True))
return layer
@staticmethod
def expand(in_channels, out_channels, kernel_size=3):
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True),
)
return layer
@staticmethod
def center_crop(layer, target_width, target_height):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_width) // 2
xy2 = (layer_height - target_height) // 2
return layer[:, :, xy1:(xy1 + target_width), xy2:(xy2 + target_height)]
def forward(self, x, enable_concat=True):
concat_weight = 1
if not enable_concat:
concat_weight = 0
contr_1 = self.contr_1_2(self.contr_1_1(x))
pool = self.pool(contr_1)
contr_2 = self.contr_2_2(self.contr_2_1(pool))
pool = self.pool(contr_2)
contr_3 = self.contr_3_2(self.contr_3_1(pool))
pool = self.pool(contr_3)
contr_4 = self.contr_4_2(self.contr_4_1(pool))
pool = self.pool(contr_4)
center = self.center(pool)
crop = self.center_crop(contr_4, center.size()[2], center.size()[3])
concat = torch.cat([center, crop * concat_weight], 1)
expand = self.expand_4_2(self.expand_4_1(concat))
upscale = self.upscale4(expand)
crop = self.center_crop(contr_3, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop * concat_weight], 1)
expand = self.expand_3_2(self.expand_3_1(concat))
upscale = self.upscale3(expand)
crop = self.center_crop(contr_2, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop * concat_weight], 1)
expand = self.expand_2_2(self.expand_2_1(concat))
return expand
class LocalConUnet2(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64):
super().__init__()
self.encoder = UpsampleUnet2(in_channels, initial_filter_size)
self.head = MLP(input_channels=initial_filter_size*2, num_class=num_classes)
def forward(self, x):
y = self.encoder(x)
return y
class UpsampleUnet3(nn.Module):
def __init__(self, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.expand_4_1 = self.expand(initial_filter_size * 2 ** 4, initial_filter_size * 2 ** 3)
self.expand_4_2 = self.expand(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 3)
self.upscale4 = nn.ConvTranspose2d(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 2, kernel_size=2,
stride=2)
self.expand_3_1 = self.expand(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 2)
self.expand_3_2 = self.expand(initial_filter_size * 2 ** 2, initial_filter_size * 2 ** 2)
self.softmax = torch.nn.Softmax2d()
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True))
return layer
@staticmethod
def expand(in_channels, out_channels, kernel_size=3):
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True),
)
return layer
@staticmethod
def center_crop(layer, target_width, target_height):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_width) // 2
xy2 = (layer_height - target_height) // 2
return layer[:, :, xy1:(xy1 + target_width), xy2:(xy2 + target_height)]
def forward(self, x, enable_concat=True):
concat_weight = 1
if not enable_concat:
concat_weight = 0
contr_1 = self.contr_1_2(self.contr_1_1(x))
pool = self.pool(contr_1)
contr_2 = self.contr_2_2(self.contr_2_1(pool))
pool = self.pool(contr_2)
contr_3 = self.contr_3_2(self.contr_3_1(pool))
pool = self.pool(contr_3)
contr_4 = self.contr_4_2(self.contr_4_1(pool))
pool = self.pool(contr_4)
center = self.center(pool)
crop = self.center_crop(contr_4, center.size()[2], center.size()[3])
concat = torch.cat([center, crop * concat_weight], 1)
expand = self.expand_4_2(self.expand_4_1(concat))
upscale = self.upscale4(expand)
crop = self.center_crop(contr_3, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop * concat_weight], 1)
expand = self.expand_3_2(self.expand_3_1(concat))
return expand
class LocalConUnet3(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64):
super().__init__()
self.encoder = UpsampleUnet3(in_channels, initial_filter_size)
self.head = MLP(input_channels=initial_filter_size*2*2, num_class=num_classes)
def forward(self, x):
y = self.encoder(x)
return y
```
#### File: semi_contrast_seg_fixed/networks/UNET.py
```python
import torch
import torch.nn as nn
class UNet(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.expand_4_1 = self.expand(initial_filter_size*2**4, initial_filter_size*2**3)
self.expand_4_2 = self.expand(initial_filter_size*2**3, initial_filter_size*2**3)
self.upscale4 = nn.ConvTranspose2d(initial_filter_size*2**3, initial_filter_size*2**2, kernel_size=2, stride=2)
self.expand_3_1 = self.expand(initial_filter_size*2**3, initial_filter_size*2**2)
self.expand_3_2 = self.expand(initial_filter_size*2**2, initial_filter_size*2**2)
self.upscale3 = nn.ConvTranspose2d(initial_filter_size*2**2, initial_filter_size*2, 2, stride=2)
self.expand_2_1 = self.expand(initial_filter_size*2**2, initial_filter_size*2)
self.expand_2_2 = self.expand(initial_filter_size*2, initial_filter_size*2)
self.upscale2 = nn.ConvTranspose2d(initial_filter_size*2, initial_filter_size, 2, stride=2)
self.expand_1_1 = self.expand(initial_filter_size*2, initial_filter_size)
self.expand_1_2 = self.expand(initial_filter_size, initial_filter_size)
# Output layer for segmentation
self.final = nn.Conv2d(initial_filter_size, num_classes, kernel_size=1) # kernel size for final layer = 1, see paper
self.softmax = torch.nn.Softmax2d()
# Output layer for "autoencoder-mode"
self.output_reconstruction_map = nn.Conv2d(initial_filter_size, out_channels=1, kernel_size=1)
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True))
return layer
@staticmethod
def expand(in_channels, out_channels, kernel_size=3):
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True),
)
return layer
@staticmethod
def center_crop(layer, target_width, target_height):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_width) // 2
xy2 = (layer_height - target_height) // 2
return layer[:, :, xy1:(xy1 + target_width), xy2:(xy2 + target_height)]
def forward(self, x, enable_concat=True, print_layer_shapes=False):
concat_weight = 1
if not enable_concat:
concat_weight = 0
contr_1 = self.contr_1_2(self.contr_1_1(x))
pool = self.pool(contr_1)
contr_2 = self.contr_2_2(self.contr_2_1(pool))
pool = self.pool(contr_2)
contr_3 = self.contr_3_2(self.contr_3_1(pool))
pool = self.pool(contr_3)
contr_4 = self.contr_4_2(self.contr_4_1(pool))
pool = self.pool(contr_4)
center = self.center(pool)
crop = self.center_crop(contr_4, center.size()[2], center.size()[3])
concat = torch.cat([center, crop*concat_weight], 1)
expand = self.expand_4_2(self.expand_4_1(concat))
upscale = self.upscale4(expand)
crop = self.center_crop(contr_3, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_3_2(self.expand_3_1(concat))
upscale = self.upscale3(expand)
crop = self.center_crop(contr_2, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_2_2(self.expand_2_1(concat))
upscale = self.upscale2(expand)
crop = self.center_crop(contr_1, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_1_2(self.expand_1_1(concat))
if enable_concat:
output = self.final(expand)
if not enable_concat:
output = self.output_reconstruction_map(expand)
return output
```
#### File: aakash-saboo/semi_contrast_seg_fixed/run_seg_pipeline.py
```python
import os
import argparse
import torch
from os.path import exists
from trixi.util import Config
from configs.Config import get_config
import configs.Config_mmwhs as config_mmwhs
from datasets.prepare_dataset.preprocessing import preprocess_data
from datasets.prepare_dataset.create_splits import create_splits
from experiments.SegExperiment import SegExperiment
from datasets.downsanpling_data import downsampling_image
import datetime
import time
import matplotlib
import matplotlib.pyplot as plt
from datasets.prepare_dataset.rearrange_dir import rearrange_dir
def parse_option():
parser = argparse.ArgumentParser("argument for run segmentation pipeline")
parser.add_argument("--dataset", type=str, default="hippo")
parser.add_argument("--train_sample", type=float, default=1)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("-f", "--fold", type=int, default=1)
parser.add_argument("--saved_model_path", type=str, default=None)
parser.add_argument("--freeze_model", action='store_true',
help="whether load saved model from saved_model_path")
parser.add_argument("--load_saved_model", action='store_true',
help='whether freeze encoder of the segmenter')
args = parser.parse_args()
return args
def training(config):
if not os.path.exists(os.path.join(config.split_dir, "splits.pkl")):
create_splits(output_dir=config.split_dir, image_dir=config.data_dir)
if config.saved_model_path is not None:
config.load_model = True
# config.saved_model_path = os.path.abspath('save') + '/SupCon/Hippocampus_models/' \
# + 'SupCon_Hippocampus_resnet50_lr_0.0001_decay_0.0001_bsz_1_temp_0.7_trial_0_cosine/' \
# + 'last.pth'
exp = SegExperiment(config=config, name=config.name, n_epochs=config.n_epochs,
seed=42, append_rnd_to_name=config.append_rnd_string) # visdomlogger_kwargs={"auto_start": c.start_visdom}
exp.run()
exp.run_test(setup=False)
def testing(config):
c.do_load_checkpoint = True
c.checkpoint_dir = c.base_dir + '/20210202-064334_Unet_mmwhs' + '/checkpoint/checkpoint_current'
exp = SegExperiment(config=config, name='unet_test', n_epochs=config.n_epochs,
seed=42, globs=globals())
exp.run_test(setup=True)
if __name__ == "__main__":
args = parse_option()
if args.dataset == "mmwhs":
c = config_mmwhs.get_config()
elif args.dataset == "hippo" or args.dataset == "Hippocampus":
c = get_config()
else:
exit('the dataset is not supoorted currently')
c.fold = args.fold
c.batch_size = args.batch_size
c.train_sample = args.train_sample
if args.load_saved_model:
c.saved_model_path = os.path.abspath('save') + '/SupCon/mmwhs_models/' \
+ 'SupCon_mmwhs_adam_fold_1_lr_0.0001_decay_0.0001_bsz_4_temp_0.7_train_0.4_block/' \
+ 'ckpt.pth'
c.saved_model_path = args.saved_model_path
c.freeze = args.freeze_model
print(c)
training(config=c)
``` |
{
"source": "aakash-sharma/DS-Analyzer",
"score": 3
} |
#### File: DS-Analyzer/tool/analyze.py
```python
import sys
import os
import json
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
stats = defaultdict(lambda: defaultdict(dict))
stats2 = defaultdict(lambda: defaultdict(dict))
gpu_map = {
"p2.xlarge" : "gpus-1",
"p2.8xlarge" : "gpus-8",
"p2.16xlarge-io1" : "gpus-16",
"p2.16xlarge" : "gpus-16"}
instances = []
def process_json(model, gpu, json_path):
with open(json_path) as fd:
dagJson = json.load(fd)
stats[model][gpu]["TRAIN_SPEED_INGESTION"] = dagJson["SPEED_INGESTION"]
stats[model][gpu]["TRAIN_SPEED_DISK"] = dagJson["SPEED_DISK"]
stats[model][gpu]["TRAIN_SPEED_CACHED"] = dagJson["SPEED_CACHED"]
stats[model][gpu]["DISK_THR"] = dagJson["DISK_THR"]
#stats[model][gpu]["MEM_THR"] = dagJson["MEM_THR"]
stats[model][gpu]["TRAIN_TIME_DISK"] = dagJson["RUN2"]["TRAIN"]
stats[model][gpu]["TRAIN_TIME_CACHED"] = dagJson["RUN3"]["TRAIN"]
stats[model][gpu]["CPU_UTIL_DISK_PCT"] = dagJson["RUN2"]["CPU"]
stats[model][gpu]["CPU_UTIL_CACHED_PCT"] = dagJson["RUN3"]["CPU"]
stats[model][gpu]["GPU_UTIL_DISK_PCT"] = dagJson["RUN2"]["GPU_UTIL"]
stats[model][gpu]["GPU_UTIL_CACHED_PCT"] = dagJson["RUN3"]["GPU_UTIL"]
stats[model][gpu]["GPU_MEM_UTIL_DISK_PCT"] = dagJson["RUN2"]["GPU_MEM_UTIL"]
stats[model][gpu]["GPU_MEM_UTIL_CACHED_PCT"] = dagJson["RUN3"]["GPU_MEM_UTIL"]
stats[model][gpu]["PREP_STALL_TIME"] = dagJson["RUN3"]["TRAIN"] - dagJson["RUN1"]["TRAIN"]
stats[model][gpu]["FETCH_STALL_TIME"] = dagJson["RUN2"]["TRAIN"] - stats[model][gpu]["PREP_STALL_TIME"]
stats[model][gpu]["PREP_STALL_PCT"] = stats[model][gpu]["PREP_STALL_TIME"] / stats[model][gpu]["TRAIN_TIME_DISK"] * 100
stats[model][gpu]["FETCH_STALL_PCT"] = stats[model][gpu]["FETCH_STALL_TIME"] / stats[model][gpu]["TRAIN_TIME_DISK"] * 100
def process_json2(model, gpu, json_path):
with open(json_path) as fd:
dagJson = json.load(fd)
stats[model][gpu]["TRAIN_SPEED_INGESTION"] = dagJson["SPEED_INGESTION"]
stats[model][gpu]["TRAIN_SPEED_DISK"] = dagJson["SPEED_DISK"]
stats[model][gpu]["TRAIN_SPEED_CACHED"] = dagJson["SPEED_CACHED"]
stats[model][gpu]["DISK_THR"] = dagJson["DISK_THR"]
stats[model][gpu]["TRAIN_TIME_DISK"] = dagJson["RUN2"]["TRAIN"]
stats[model][gpu]["TRAIN_TIME_CACHED"] = dagJson["RUN3"]["TRAIN"]
stats[model][gpu]["TRAIN_TIME_CACHED"] = dagJson["RUN3"]["TRAIN"]
stats[model][gpu]["MEM_DISK"] = dagJson["RUN2"]["MEM"]
stats[model][gpu]["PCACHE_DISK"] = dagJson["RUN2"]["PCACHE"]
stats[model][gpu]["MEM_CACHED"] = dagJson["RUN3"]["MEM"]
stats[model][gpu]["PCACHE_CACHED"] = dagJson["RUN3"]["PCACHE"]
stats[model][gpu]["READ_WRITE_DISK"] = dagJson["RUN2"]["READ"] + dagJson["RUN2"]["WRITE"]
stats[model][gpu]["IO_WAIT_DISK"] = dagJson["RUN2"]["IO_WAIT"]
stats[model][gpu]["READ_WRITE_CACHED"] = dagJson["RUN3"]["READ"] + dagJson["RUN3"]["WRITE"]
stats[model][gpu]["IO_WAIT_CACHED"] = dagJson["RUN3"]["IO_WAIT"]
stats[model][gpu]["CPU_UTIL_DISK_PCT"] = dagJson["RUN2"]["CPU"]
stats[model][gpu]["CPU_UTIL_CACHED_PCT"] = dagJson["RUN3"]["CPU"]
stats[model][gpu]["GPU_UTIL_DISK_PCT"] = dagJson["RUN2"]["GPU_UTIL"]
stats[model][gpu]["GPU_UTIL_CACHED_PCT"] = dagJson["RUN3"]["GPU_UTIL"]
stats[model][gpu]["GPU_MEM_UTIL_DISK_PCT"] = dagJson["RUN2"]["GPU_MEM_UTIL"]
stats[model][gpu]["GPU_MEM_UTIL_CACHED_PCT"] = dagJson["RUN3"]["GPU_MEM_UTIL"]
stats[model][gpu]["CPU_UTIL_DISK_LIST"] = dagJson["RUN2"]["CPU_LIST"]
stats[model][gpu]["CPU_UTIL_CACHED_LIST"] = dagJson["RUN3"]["CPU_LIST"]
stats[model][gpu]["GPU_UTIL_DISK_LIST"] = dagJson["RUN2"]["GPU_UTIL_LIST"]
stats[model][gpu]["GPU_UTIL_CACHED_LIST"] = dagJson["RUN3"]["GPU_UTIL_LIST"]
stats[model][gpu]["GPU_MEM_UTIL_DISK_LIST"] = dagJson["RUN2"]["GPU_MEM_UTIL_LIST"]
stats[model][gpu]["GPU_MEM_UTIL_CACHED_LIST"] = dagJson["RUN3"]["GPU_MEM_UTIL_LIST"]
stats[model][gpu]["READ_WRITE_LIST_DISK"] = dagJson["RUN2"]["READ_LIST"] + dagJson["RUN2"]["WRITE_LIST"]
stats[model][gpu]["READ_WRITE_LIST_CACHED"] = dagJson["RUN3"]["READ_LIST"] + dagJson["RUN3"]["WRITE_LIST"]
stats[model][gpu]["IO_WAIT_LIST_DISK"] = dagJson["RUN2"]["IO_WAIT_LIST"]
stats[model][gpu]["IO_WAIT_LIST_CACHED"] = dagJson["RUN3"]["IO_WAIT_LIST"]
stats[model][gpu]["PREP_STALL_TIME"] = dagJson["RUN3"]["TRAIN"] - dagJson["RUN1"]["TRAIN"]
stats[model][gpu]["FETCH_STALL_TIME"] = dagJson["RUN2"]["TRAIN"] - stats[model][gpu]["PREP_STALL_TIME"]
stats[model][gpu]["PREP_STALL_PCT"] = stats[model][gpu]["PREP_STALL_TIME"] / stats[model][gpu]["TRAIN_TIME_DISK"] * 100
stats[model][gpu]["FETCH_STALL_PCT"] = stats[model][gpu]["FETCH_STALL_TIME"] / stats[model][gpu]["TRAIN_TIME_DISK"] * 100
def plotModels(instance):
fig1, axs1 = plt.subplots(2, 1)
gpu = gpu_map[instance]
X = [model for model in stats.keys()]
X_axis = np.arange(len(X))
Y_PREP_STALL_TIME = [stats[model][gpu]["PREP_STALL_TIME"] for model in X]
Y_FETCH_STALL_TIME = [stats[model][gpu]["FETCH_STALL_TIME"] for model in X]
Y_TRAIN_TIME = [stats[model][gpu]["TRAIN_TIME"] for model in X]
Y_PREP_STALL_PCT = [stats[model][gpu]["PREP_STALL_PCT"] for model in X]
Y_FETCH_STALL_PCT = [stats[model][gpu]["FETCH_STALL_PCT"] for model in X]
axs1[0].bar(X_axis-0.2, Y_TRAIN_TIME, 0.2, label = 'Train time')
axs1[0].bar(X_axis, Y_PREP_STALL_TIME, 0.2, label = 'Prep stall time')
axs1[0].bar(X_axis+0.2, Y_FETCH_STALL_TIME, 0.2, label = 'Fetch stall time')
axs1[1].bar(X_axis-0.2, Y_PREP_STALL_PCT, 0.2, label = 'Prep stall %')
axs1[1].bar(X_axis, Y_FETCH_STALL_PCT, 0.2, label = 'Fetch stall %')
axs1[0].set_xticks(X_axis)
axs1[0].set_xticklabels(X)
axs1[0].set_xlabel("Models")
axs1[0].set_ylabel("Time")
axs1[0].legend()
axs1[1].set_xticks(X_axis)
axs1[1].set_xticklabels(X)
axs1[1].set_xlabel("Models")
axs1[1].set_ylabel("Percentage")
axs1[1].legend()
fig1.suptitle("Stall analysis " + instance)
plt.show()
def compare():
models = list(stats.keys())
for instance in instances:
gpu = gpu_map[instance]
for model in models:
if gpu not in stats[model]:
del stats[model]
fig1, axs1 = plt.subplots(2, 1)
fig2, axs2 = plt.subplots(3, 1)
fig3, axs3 = plt.subplots(3, 1)
fig4, axs4 = plt.subplots(3, 1)
fig5, axs5 = plt.subplots(3, 1)
X = [model for model in stats.keys()]
X_axis = np.arange(len(X))
diff = 0
for instance in instances:
gpu = gpu_map[instance]
Y_PREP_STALL_PCT = [stats[model][gpu]["PREP_STALL_PCT"] for model in X]
Y_FETCH_STALL_PCT = [stats[model][gpu]["FETCH_STALL_PCT"] for model in X]
Y_TRAIN_TIME_DISK = [stats[model][gpu]["TRAIN_TIME_DISK"] for model in X]
Y_TRAIN_TIME_CACHED = [stats[model][gpu]["TRAIN_TIME_CACHED"] for model in X]
Y_DISK_THR = [stats[model][gpu]["DISK_THR"] for model in X]
Y_TRAIN_SPEED_INGESTION = [stats[model][gpu]["TRAIN_SPEED_INGESTION"] for model in X]
Y_TRAIN_SPEED_DISK = [stats[model][gpu]["TRAIN_SPEED_DISK"] for model in X]
Y_TRAIN_SPEED_CACHED = [stats[model][gpu]["TRAIN_SPEED_CACHED"] for model in X]
Y_CPU_UTIL_DISK_PCT = [stats[model][gpu]["CPU_UTIL_DISK_PCT"] for model in X]
Y_CPU_UTIL_CACHED_PCT = [stats[model][gpu]["CPU_UTIL_CACHED_PCT"] for model in X]
Y_GPU_UTIL_DISK_PCT = [stats[model][gpu]["GPU_UTIL_DISK_PCT"] for model in X]
Y_GPU_UTIL_CACHED_PCT = [stats[model][gpu]["GPU_UTIL_CACHED_PCT"] for model in X]
Y_GPU_MEM_UTIL_DISK_PCT = [stats[model][gpu]["GPU_MEM_UTIL_DISK_PCT"] for model in X]
Y_GPU_MEM_UTIL_CACHED_PCT = [stats[model][gpu]["GPU_MEM_UTIL_CACHED_PCT"] for model in X]
axs1[0].bar(X_axis-0.2 + diff , Y_PREP_STALL_PCT, 0.2, label = instance)
axs1[1].bar(X_axis-0.2 + diff, Y_FETCH_STALL_PCT, 0.2, label = instance)
axs2[0].bar(X_axis-0.2 + diff , Y_TRAIN_TIME_DISK, 0.2, label = instance)
axs2[1].bar(X_axis-0.2 + diff, Y_TRAIN_TIME_CACHED, 0.2, label = instance)
axs2[2].bar(X_axis-0.2 + diff, Y_DISK_THR, 0.2, label = instance)
axs3[0].bar(X_axis-0.2 + diff, Y_TRAIN_SPEED_INGESTION, 0.2, label = instance)
axs3[1].bar(X_axis-0.2 + diff , Y_TRAIN_SPEED_DISK, 0.2, label = instance)
axs3[2].bar(X_axis-0.2 + diff, Y_TRAIN_SPEED_CACHED, 0.2, label = instance)
axs4[0].bar(X_axis-0.2 + diff , Y_CPU_UTIL_DISK_PCT, 0.2, label = instance)
axs4[1].bar(X_axis-0.2 + diff , Y_GPU_UTIL_DISK_PCT, 0.2, label = instance)
axs4[2].bar(X_axis-0.2 + diff , Y_GPU_MEM_UTIL_DISK_PCT, 0.2, label = instance)
axs5[0].bar(X_axis-0.2 + diff , Y_CPU_UTIL_CACHED_PCT, 0.2, label = instance)
axs5[1].bar(X_axis-0.2 + diff , Y_GPU_UTIL_CACHED_PCT, 0.2, label = instance)
axs5[2].bar(X_axis-0.2 + diff , Y_GPU_MEM_UTIL_CACHED_PCT, 0.2, label = instance)
diff += 0.2
axs1[0].set_xticks(X_axis)
axs1[0].set_xticklabels(X)
axs1[0].set_xlabel("Models")
axs1[0].set_ylabel("Percentage")
axs1[0].set_title("Prep stall comparison")
axs1[0].legend()
axs1[1].set_xticks(X_axis)
axs1[1].set_xticklabels(X)
axs1[1].set_xlabel("Models")
axs1[1].set_ylabel("Percentage")
axs1[1].set_title("Fetch stall comparison")
axs1[1].legend()
fig1.suptitle("Stall comparison" , fontsize=20, fontweight ="bold")
fig1.savefig("stall_comparison.png")
axs2[0].set_xticks(X_axis)
axs2[0].set_xticklabels(X)
#axs2[0].set_xlabel("Models")
axs2[0].set_ylabel("Time")
axs2[0].set_title("Training time disk comparison")
axs2[0].legend()
axs2[1].set_xticks(X_axis)
axs2[1].set_xticklabels(X)
#axs2[1].set_xlabel("Models")
axs2[1].set_ylabel("Time")
axs2[1].set_title("Training time cached comparison")
axs2[1].legend()
axs2[2].set_xticks(X_axis)
axs2[2].set_xticklabels(X)
#axs2[1].set_xlabel("Models")
axs2[2].set_ylabel("Throughput")
axs2[2].set_title("Disk throughput comparison")
axs2[2].legend()
fig2.suptitle("Training time comparison" , fontsize=20, fontweight ="bold")
axs3[0].set_xticks(X_axis)
axs3[0].set_xticklabels(X)
#axs3[0].set_xlabel("Models")
axs3[0].set_ylabel("Samples/sec")
axs3[0].set_title("Training speed ingestion comparison")
axs3[0].legend()
axs3[1].set_xticks(X_axis)
axs3[1].set_xticklabels(X)
#axs3[1].set_xlabel("Models")
axs3[1].set_ylabel("Samples/sec")
axs3[1].set_title("Training speed disk comparison")
axs3[1].legend()
axs3[2].set_xticks(X_axis)
axs3[2].set_xticklabels(X)
#axs3[2].set_xlabel("Models")
axs3[2].set_ylabel("Samples/sec")
axs3[2].set_title("Training speed cached comparison")
axs3[2].legend()
fig3.suptitle("Training speed comparison", fontsize=20, fontweight ="bold")
axs4[0].set_xticks(X_axis)
axs4[0].set_xticklabels(X)
#axs4[0].set_xlabel("Models")
axs4[0].set_ylabel("Average CPU utilization")
axs4[0].set_title("CPU utilization comparison")
axs4[0].legend()
axs4[1].set_xticks(X_axis)
axs4[1].set_xticklabels(X)
#axs4[1].set_xlabel("Models")
axs4[1].set_ylabel("Average GPU utilization")
axs4[1].set_title("GPU utilization comparison")
axs4[1].legend()
axs4[2].set_xticks(X_axis)
axs4[2].set_xticklabels(X)
#axs4[2].set_xlabel("Models")
axs4[2].set_ylabel("Average GPU memory utilization")
axs4[2].set_title("GPU memory utilization comparison")
axs4[2].legend()
fig4.suptitle("CPU and GPU utilization DISK comparison", fontsize=20, fontweight ="bold")
axs5[0].set_xticks(X_axis)
axs5[0].set_xticklabels(X)
#axs5[0].set_xlabel("Models")
axs5[0].set_ylabel("Average CPU utilization")
axs5[0].set_title("CPU utilization comparison")
axs5[0].legend()
axs5[1].set_xticks(X_axis)
axs5[1].set_xticklabels(X)
#axs5[1].set_xlabel("Models")
axs5[1].set_ylabel("Average GPU utilization")
axs5[1].set_title("GPU utilization comparison")
axs5[1].legend()
axs5[2].set_xticks(X_axis)
axs5[2].set_xticklabels(X)
#axs5[2].set_xlabel("Models")
axs5[2].set_ylabel("Average GPU memory utilization")
axs5[2].set_title("GPU memory utilization comparison")
axs5[2].legend()
fig5.suptitle("CPU and GPU utilization CACHED comparison", fontsize=20, fontweight ="bold")
plt.show()
def compare_models():
models = list(stats.keys())
max_dstat_len = 0
max_nvidia_len = 0
X = ["Disk Throughput", "Train speed", "Memory", "Page cache"]
X_IO = ["Read Write", "IOWait"]
# models = ["alexnet"]
for model in models:
for instance in instances:
gpu = gpu_map[instance]
if gpu not in stats[model]:
del stats[model]
continue
max_dstat_len = max(max_dstat_len, len(stats[model][gpu]["CPU_UTIL_DISK_LIST"]))
max_dstat_len = max(max_dstat_len, len(stats[model][gpu]["CPU_UTIL_CACHED_LIST"]))
max_nvidia_len = max(max_nvidia_len, len(stats[model][gpu]["GPU_UTIL_DISK_LIST"]))
max_nvidia_len = max(max_nvidia_len, len(stats[model][gpu]["GPU_UTIL_CACHED_LIST"]))
fig1, axs1 = plt.subplots(3, 2, figsize=(30,20))
fig2, axs2 = plt.subplots(3, 2, figsize=(30,20))
X_dstat_axis = np.arange(max_dstat_len)
X_nvidia_axis = np.arange(max_nvidia_len)
X_metrics_axis = np.arange(len(X))
X_metrics_io_axis = np.arange(len(X_IO))
diff = 0
for instance in instances:
gpu = gpu_map[instance]
style = None
if instance == "p2.8xlarge":
style = 'r--'
elif instance == "p2.16xlarge":
style = 'b--'
overlapping = 0.50
Y_METRICS_DISK = []
Y_METRICS_CACHED = []
Y_METRICS_IO_DISK = []
Y_METRICS_IO_CACHED = []
print(model)
Y_METRICS_DISK.append(stats[model][gpu]["DISK_THR"])
Y_METRICS_DISK.append(stats[model][gpu]["TRAIN_SPEED_DISK"])
Y_METRICS_DISK.append(stats[model][gpu]["MEM_DISK"])
Y_METRICS_DISK.append(stats[model][gpu]["PCACHE_DISK"])
Y_METRICS_IO_DISK.append(stats[model][gpu]["READ_WRITE_DISK"])
Y_METRICS_IO_DISK.append(stats[model][gpu]["IO_WAIT_DISK"])
Y_METRICS_CACHED.append(stats[model][gpu]["DISK_THR"])
Y_METRICS_CACHED.append(stats[model][gpu]["TRAIN_SPEED_CACHED"])
Y_METRICS_CACHED.append(stats[model][gpu]["MEM_CACHED"])
Y_METRICS_CACHED.append(stats[model][gpu]["PCACHE_CACHED"])
Y_METRICS_IO_CACHED.append(stats[model][gpu]["READ_WRITE_CACHED"])
Y_METRICS_IO_CACHED.append(stats[model][gpu]["IO_WAIT_CACHED"])
Y_CPU_UTIL_DISK = stats[model][gpu]["CPU_UTIL_DISK_LIST"]
Y_CPU_UTIL_CACHED = stats[model][gpu]["CPU_UTIL_CACHED_LIST"]
Y_GPU_UTIL_DISK = stats[model][gpu]["GPU_UTIL_DISK_LIST"]
Y_GPU_UTIL_CACHED = stats[model][gpu]["GPU_UTIL_CACHED_LIST"]
Y_GPU_MEM_UTIL_DISK = stats[model][gpu]["GPU_MEM_UTIL_DISK_LIST"]
Y_GPU_MEM_UTIL_CACHED = stats[model][gpu]["GPU_MEM_UTIL_CACHED_LIST"]
Y_IO_WAIT_LIST_DISK = stats[model][gpu]["IO_WAIT_LIST_DISK"]
Y_IO_WAIT_LIST_CACHED = stats[model][gpu]["IO_WAIT_LIST_CACHED"]
if len(Y_CPU_UTIL_DISK) < max_dstat_len:
Y_CPU_UTIL_DISK.extend([0] * (max_dstat_len - len(Y_CPU_UTIL_DISK)))
if len(Y_CPU_UTIL_CACHED) < max_dstat_len:
Y_CPU_UTIL_CACHED.extend([0] * (max_dstat_len - len(Y_CPU_UTIL_CACHED)))
if len(Y_GPU_UTIL_DISK) < max_nvidia_len:
Y_GPU_UTIL_DISK.extend([0] * (max_nvidia_len - len(Y_GPU_UTIL_DISK)))
if len(Y_GPU_UTIL_CACHED) < max_nvidia_len:
Y_GPU_UTIL_CACHED.extend([0] * (max_nvidia_len - len(Y_GPU_UTIL_CACHED)))
if len(Y_GPU_MEM_UTIL_DISK) < max_nvidia_len:
Y_GPU_MEM_UTIL_DISK.extend([0] * (max_nvidia_len - len(Y_GPU_MEM_UTIL_DISK)))
if len(Y_GPU_MEM_UTIL_CACHED) < max_nvidia_len:
Y_GPU_MEM_UTIL_CACHED.extend([0] * (max_nvidia_len - len(Y_GPU_MEM_UTIL_CACHED)))
if len(Y_IO_WAIT_LIST_DISK) < max_dstat_len:
Y_IO_WAIT_LIST_DISK.extend([0] * (max_dstat_len - len(Y_IO_WAIT_LIST_DISK)))
if len(Y_IO_WAIT_LIST_CACHED) < max_dstat_len:
Y_IO_WAIT_LIST_CACHED.extend([0] * (max_dstat_len - len(Y_IO_WAIT_LIST_CACHED)))
axs1[0,0].bar(X_metrics_axis -0.2 + diff, Y_METRICS_CACHED, 0.2, label = instance)
axs1[0,1].plot(X_dstat_axis, Y_CPU_UTIL_CACHED, style, alpha=overlapping, label = instance)
axs1[1,0].plot(X_nvidia_axis, Y_GPU_UTIL_CACHED, style, alpha=overlapping, label = instance)
axs1[1,1].plot(X_nvidia_axis, Y_GPU_MEM_UTIL_CACHED, style, alpha=overlapping, label = instance)
axs1[2,0].bar(X_metrics_io_axis -0.2 + diff, Y_METRICS_IO_CACHED, 0.2, label = instance)
axs1[2,1].plot(X_dstat_axis, Y_IO_WAIT_LIST_CACHED, style, alpha=overlapping, label = instance)
axs2[0,0].bar(X_metrics_axis - 0.2 + diff, Y_METRICS_DISK, 0.2, label = instance)
axs2[0,1].plot(X_dstat_axis, Y_CPU_UTIL_DISK, style, alpha=overlapping, label = instance)
axs2[1,0].plot(X_nvidia_axis, Y_GPU_UTIL_DISK, style, alpha=overlapping, label = instance)
axs2[1,1].plot(X_nvidia_axis, Y_GPU_MEM_UTIL_DISK, style, alpha=overlapping, label = instance)
axs2[2,0].bar(X_metrics_io_axis -0.2 + diff, Y_METRICS_IO_DISK, 0.2, label = instance)
axs2[2,1].plot(X_dstat_axis, Y_IO_WAIT_LIST_DISK, style, alpha=overlapping, label = instance)
diff += 0.2
axs1[0,0].set_xticks(X_metrics_axis)
axs1[0,0].set_xticklabels(X)
axs1[0,0].set_xlabel("Metrics")
axs1[0,0].set_ylabel("Values")
axs1[0,0].set_title("Metric comparison cached")
axs1[0,0].legend()
axs1[0,1].set_xlabel("Time")
axs1[0,1].set_ylabel("Percentage")
axs1[0,1].set_title("CPU utilization comparison cached")
axs1[0,1].legend()
axs1[1,0].set_xlabel("Time")
axs1[1,0].set_ylabel("Percentage")
axs1[1,0].set_title("GPU utilization comparison cached")
axs1[1,0].legend()
axs1[1,1].set_xlabel("Time")
axs1[1,1].set_ylabel("Percentage")
axs2[1,1].set_title("GPU memory utilization comparison cached")
axs1[1,1].legend()
axs1[2,0].set_xticks(X_metrics_io_axis)
axs1[2,0].set_xticklabels(X_IO)
axs1[2,0].set_xlabel("Metrics")
axs1[2,0].set_ylabel("Values")
axs1[2,0].set_title("IO Metric comparison cached")
axs1[2,0].legend()
axs1[2,1].set_xlabel("Time")
axs1[2,1].set_ylabel("Percentage")
axs1[2,1].set_title("IO wait percentage cached")
axs1[2,1].legend()
fig1.suptitle("Cached comparison - " + model , fontsize=20, fontweight ="bold")
fig1.savefig("figures/cached_comparison - " + model)
axs2[0,0].set_xticks(X_metrics_axis)
axs2[0,0].set_xticklabels(X)
axs2[0,0].set_xlabel("Metrics")
axs2[0,0].set_ylabel("Values")
axs2[0,0].set_title("Metric comparison cached")
axs2[0,0].legend()
axs2[0,1].set_xlabel("Time")
axs2[0,1].set_ylabel("Percentage")
axs2[0,1].set_title("CPU utilization comparison cached")
axs2[0,1].legend()
axs2[1,0].set_xlabel("Time")
axs2[1,0].set_ylabel("Percentage")
axs2[1,0].set_title("GPU utilization comparison cached")
axs2[1,0].legend()
axs2[1,1].set_xlabel("Time")
axs2[1,1].set_ylabel("Percentage")
axs2[1,1].set_title("GPU memeory utilization comparison cached")
axs2[1,1].legend()
axs2[2,0].set_xticks(X_metrics_io_axis)
axs2[2,0].set_xticklabels(X_IO)
axs2[2,0].set_xlabel("Metrics")
axs2[2,0].set_ylabel("Values")
axs2[2,0].set_title("IO Metric comparison disk")
axs2[2,0].legend()
axs2[2,1].set_xlabel("Time")
axs2[2,1].set_ylabel("Percentage")
axs2[2,1].set_title("io wait percentage disk")
axs2[2,1].legend()
fig2.suptitle("Disk comparison - " + model , fontsize=20, fontweight ="bold")
fig2.savefig("figures/disk_comparison - " + model)
def main():
if len(sys.argv) <= 1:
return
result_dir = sys.argv[1]
for instance in sys.argv[2:]:
instances.append(instance)
result_path1 = result_dir + "/" + instance + "/" + "dali-gpu"
result_path2 = result_dir + "/" + instance + "/" + "dali-cpu"
for result_path in [result_path1, result_path2]:
try:
model_paths = [os.path.join(result_path, o) for o in os.listdir(result_path) if os.path.isdir(os.path.join(result_path,o))]
except:
continue
for model_path in model_paths:
model = model_path.split('/')[-1]
model_path_ = model_path + "/jobs-1"
gpu_paths = [os.path.join(model_path_, o) for o in os.listdir(model_path_) if os.path.isdir(os.path.join(model_path_,o))]
for gpu_path in gpu_paths:
gpu = gpu_path.split('/')[-1]
cpu_paths = [os.path.join(gpu_path, o) for o in os.listdir(gpu_path) if os.path.isdir(os.path.join(gpu_path,o))]
for cpu_path in cpu_paths:
json_path = cpu_path + "/MODEL.json"
json_path2 = cpu_path + "/MODEL2.json"
if not os.path.isfile(json_path):
continue
process_json(model, gpu, json_path)
process_json2(model, gpu, json_path2)
# compare()
compare_models()
if __name__ == "__main__":
main()
```
#### File: DS-Analyzer/tool/parseall.py
```python
import os
import json
import sys
def main():
if len(sys.argv) < 2:
print("Input file path")
sys.exit(1)
path = sys.argv[1]
for files in os.listdir(path):
for subdir in os.listdir(os.path.join(path, files)):
json_file = os.path.join(path, files,subdir, "MODEL.json")
data = json.load(open(json_file, 'r'))
print(files, data['SPEED_INGESTION'], data['SPEED_DISK'], data['SPEED_CACHED'])
if __name__ == "__main__":
main()
```
#### File: DS-Analyzer/tool/what_if_tool.py
```python
import sys
import subprocess
import os
import utils
from argparse import ArgumentParser, REMAINDER
from synthetic_data import get_shared_image_classification_tensors
from utils import aggregate_run1_maps, print_as_table, print_header
import multiprocessing
import json
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch data stall profiler")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
# profiling
parser.add_argument('-j', '--workers', default=3, type=int, metavar='N',
help='number of data loading workers')
parser.add_argument('--epochs', default=1, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18')
parser.add_argument('--synthetic', action='store_true',
help='Use synthetic dataset')
parser.add_argument('--data-profile', action='store_true',
help='Set profiler on')
parser.add_argument('--precreate', action='store_true')
parser.add_argument('--use_precreate', action='store_true')
parser.add_argument("--classes", default=1000, type=int)
parser.add_argument("--tensor_path", default="./train", type=str)
parser.add_argument("--num_minibatches", default=50, type=int)
parser.add_argument("--path", default="./", type=str)
parser.add_argument("--model_path", default=None, type=str)
parser.add_argument('-q', '--question', default="cache", type=str)
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
args = parse_args()
def analyze_cache():
print("In analyze cache")
if args.model_path is not None:
_analyze_cache(args.model_path)
else:
for job_path in args.jobs:
print("Analysis for jon {}".format(job_path))
#Must analyze for each GPU and CPU combination seperately
gpu_paths = [os.path.join(job_path, o) for o in os.listdir(job_path) if os.path.isdir(os.path.join(job_path,o))]
for gpu_path in gpu_paths:
cpu_paths = [os.path.join(gpu_path, o) for o in os.listdir(gpu_path) if os.path.isdir(os.path.join(gpu_path,o))]
for cpu_path in cpu_paths:
print(cpu_path)
model_path = cpu_path + "/MODEL.json"
_analyze_cache(model_path)
def _analyze_cache(model_path):
model = {}
with open(model_path, 'r') as mf:
model = json.load(mf)
# Have the model now. Calculate speeds for different cache sizes
max_speed = model["SPEED_INGESTION"]
cached_speed = model["SPEED_CACHED"]
disk_thr = model["DISK_THR"]
disk_bw = 525
mem_thr = model["MEM_THR"]
dataset_size = model["AVG_SAMPLE_SIZE"]
total_samples = model["TOTAL_SAMPLES"]
speed_map = {}
for cache_percent in range(0, 100, 5):
cache_size = cache_percent/100*dataset_size
disk_fetch_size = dataset_size - cache_size
time_to_cache = cache_size/mem_thr
time_to_disk = disk_fetch_size*1024/disk_bw
total_time = time_to_cache + time_to_disk
avg_sample_size = dataset_size*1024*1024 / total_samples
#effective_store_thr = dataset_size*1024*1024/total_time
effective_store_thr = dataset_size*1024*1024/total_time/avg_sample_size
speed_map[cache_percent] = effective_store_thr
keys = speed_map.keys()
values = speed_map.values()
print("Max achievable speed = {} samples/s".format(int(cached_speed)))
for key in keys:
print("{:<5}".format(int(key)), end=" ")
print("\n")
for val in values:
print("{:<5}".format(int(val)), end = " ")
print("\n")
print("-"*100)
def question(qname):
switch = {
"cache" : analyze_cache
# Add more analysis questions
}
func = switch.get(qname, lambda: "Invalid analysis option")
func()
def main():
"""
args.path is the path to the model being analyzed.
The expected heirarchy is :
<model>
|--jobs-<count_per_node>
|--gpus-<count_per_job>
|--cpus-<count_per_gpu>
|--MODEL.json
"""
args.jobs = [os.path.join(args.path, o) for o in os.listdir(args.path) if os.path.isdir(os.path.join(args.path,o))]
print(args.jobs)
question(args.question)
if __name__ == "__main__":
main()
``` |
{
"source": "aakash-sharma/DSS",
"score": 3
} |
#### File: DSS/events/event.py
```python
import copy
import logging
from abc import ABCMeta, abstractmethod
from enum import Enum
event_formatter = logging.Formatter('%(asctime)-15s %(levelname)-8s %(name)s CLK: %(clock)-20s %(message)s')
console = logging.StreamHandler()
console.setFormatter(event_formatter)
LOG = logging.getLogger("event")
LOG.addHandler(console)
LOG.propagate = False
EventResult = Enum("EventResult", "CONTINUE FINISHED PAUSE")
class EventLoggerAdapter(logging.LoggerAdapter):
def __init__(self, logger, event):
logging.LoggerAdapter.__init__(self, logger, extra={'event': event})
def process(self, msg, kwargs):
# Need to do it like this because the clock needs refreshing each time
self.extra['clock'] = self.extra['event'].state.simulator.clock_millis
return logging.LoggerAdapter.process(self, msg, kwargs)
class Event(object):
__metaclass__ = ABCMeta
def __init__(self, state):
self.time_millis = 0
self.state = state
self.log = EventLoggerAdapter(LOG, self)
# noinspection PyArgumentList
def __deepcopy__(self, memo):
new_event = self.__class__(copy.deepcopy(self.state, memo))
new_event.time_millis = self.time_millis
return new_event
@abstractmethod
def handle(self):
raise NotImplementedError()
```
#### File: yarn/elastic/elastic.py
```python
import copy
import logging
from abc import ABCMeta
from contextlib import contextmanager
from events.event import EventResult, LOG, event_formatter, Event
class YarnOracleSimulationEvent(Event):
__metaclass__ = ABCMeta
def __init__(self, state):
super(YarnOracleSimulationEvent, self).__init__(state)
def __deepcopy__(self, memo):
new_event = copy.copy(self)
return new_event
@staticmethod
@contextmanager
def nested_logging(name, debug_flag):
# Change logging output to include oracle simulation name for the rest of DSS
current_level = LOG.getEffectiveLevel()
LOG.setLevel(logging.DEBUG if debug_flag else current_level)
event_log = logging.getLogger("event")
for h in event_log.handlers:
h.setFormatter(logging.Formatter('%(asctime)-15s %(levelname)-8s ORACLE ' + name +
' %(name)s CLK: %(clock)-20s %(message)s'))
yarn_schedulers_log = logging.getLogger("yarn_schedulers")
yarn_schedulers_log.setLevel(logging.DEBUG if debug_flag else current_level)
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter('%(asctime)-15s %(levelname)-8s ORACLE ' + name +
' %(name)s %(message)s'))
yarn_schedulers_log.addHandler(console_handler)
yarn_schedulers_log.propagate = False
yield
# Revert logging back to normal
for h in event_log.handlers:
h.setFormatter(event_formatter)
yarn_schedulers_log.removeHandler(console_handler)
yarn_schedulers_log.propagate = True
console_handler.close()
yarn_schedulers_log.setLevel(current_level)
LOG.setLevel(current_level)
class YarnResumeSchedulingEvent(Event):
def __init__(self, state):
super(YarnResumeSchedulingEvent, self).__init__(state)
self.winner = None
self.job = None
self.node = None
def __deepcopy__(self, memo):
new_event = copy.copy(self)
return new_event
def handle(self):
self.log.info("YARN_RESUME_SCHEDULING_EVENT")
if not self.state.user_config.race_lockstep_regular:
self.state.scheduler.allow_scheduling = True
elif self.winner is not None:
self.log.info(str(self.winner))
if self.job is None and self.node is None:
# RACE_LOCKSTEP resume scheduling
self.state.scheduler.behavior = self.winner
elif self.node is None:
# RACE_JOB resume scheduling
self.state.scheduler.set_behavior(self.job, self.winner)
elif self.job is None:
# RACE_NODEG resume scheduling
self.state.scheduler.set_behavior(self.node, self.winner)
# Generate NodeHeartbeat events for all the nodes.
for node in self.state.nodes.values():
if node.next_heartbeat.handled:
node.next_heartbeat.generate_next_heartbeat()
return EventResult.CONTINUE,
```
#### File: DSS/schedulers/yarn.py
```python
import copy
import logging
import sys
from abc import ABCMeta, abstractmethod
from random import Random
from events.event import EventResult
from models.yarn.objects import YarnRunningContainer, YarnContainerType, YarnResource, YarnErrorType, YarnErrorMode
from stats.decisions import YarnSchedulerStats
from utils import PEnum, round_up
YarnSchedulerType = PEnum("YarnSchedulerType", "REGULAR GREEDY SMARTG SYMBEX RACE_LOCKSTEP RACE_CONTINUOUS " +
"RACE_JOB RACE_NODEG SRTF PEEK")
LOG = logging.getLogger("yarn_schedulers")
ERROR_RANDOM_SEED = 24381092348021
YARN_MIN_ALLOCATION_MB = 100
MEM_INCREMENT = 100
class YarnScheduler(YarnSchedulerStats):
__metaclass__ = ABCMeta
def __init__(self, state):
super(YarnScheduler, self).__init__()
self.state = state
self.running_jobs = set()
self.completed_jobs = set()
self.allocated_containers = {}
self.next_job_id = 1
self.allow_scheduling = True
# Per-job error factors for ideal-memory
self.job_random = Random(ERROR_RANDOM_SEED)
self.mem_errors = {}
# noinspection PyArgumentList
def __deepcopy__(self, memo):
new_scheduler = copy.copy(self)
memo[id(self)] = new_scheduler
new_scheduler.state = copy.deepcopy(self.state, memo)
new_scheduler.running_jobs = copy.deepcopy(self.running_jobs, memo)
new_scheduler.completed_jobs = self.completed_jobs.copy()
new_scheduler.allocated_containers = copy.deepcopy(self.allocated_containers, memo)
return new_scheduler
def compute_error_adjustment(self, ideal_value, error_perc, error_mode, error_type):
error = ideal_value * error_perc / 100
if error_mode is YarnErrorMode.CONSTANT:
error_adjustment = error if error_type is YarnErrorType.POSITIVE else -error
else:
lower_limit = 0 if error_type is YarnErrorType.POSITIVE else -error
upper_limit = 0 if error_type is YarnErrorType.NEGATIVE else error
error_adjustment = self.job_random.randint(lower_limit, upper_limit) if not isinstance(ideal_value, float) \
else self.job_random.uniform(lower_limit, upper_limit)
return error_adjustment
def compute_memory_error_adjustment(self, task):
memory_adjustment = self.compute_error_adjustment(task.ideal_resource.memory_mb,
self.state.user_config.mem_error,
self.state.user_config.mem_error_mode,
self.state.user_config.mem_error_type)
if task.ideal_resource.memory_mb + memory_adjustment < YARN_MIN_ALLOCATION_MB:
memory_adjustment = YARN_MIN_ALLOCATION_MB - task.ideal_resource.memory_mb
if task.ideal_resource.memory_mb + memory_adjustment > self.state.user_config.node_mem_mb:
memory_adjustment = self.state.user_config.node_mem_mb - task.ideal_resource.memory_mb
return memory_adjustment
def all_jobs_are_done(self):
return all(job.finished for job in self.state.jobs)
# NOTE: This method should return a tuple:
# (AllocationSuccessful, EventResultTuple)
# where AllocationResult is a bool marking if any
# containers were allocated on this node,
# and EventResultTuple is of the form:
# (EventResult, [ newly_forked_symbex_states ]
# A non-symbex call to schedule() will always return
# (EventResult.CONTINUE,) as the EventResultTuple
@abstractmethod
def schedule(self, node):
raise NotImplementedError()
def handle_job_arrived(self, job):
# Set a new application id for the job
job.yarn_id = self.next_job_id
self.next_job_id += 1
# Check that job isn't already running
if job in self.running_jobs:
LOG.error("Attempt to launch existing job: " + str(job))
raise Exception("Attempt to launch existing job")
# Adjust job start time
job.start_ms = self.state.simulator.clock_millis
self.stats_job_start_update(job.start_ms)
# Add job to set of running jobs
self.running_jobs.add(job)
# Compute memory error if needed.
if self.state.is_inside_oracle_simulation:
return
if self.state.user_config.mem_error is not None:
self.mem_errors[job.job_id] = self.compute_memory_error_adjustment(
next(task for task in job.tasks if task.type != YarnContainerType.MRAM)
)
def handle_job_completed(self, job):
# Update finishing time
job.end_ms = self.state.simulator.clock_millis
self.stats_job_end_update(job.end_ms)
# Move job to completed list.
if job not in self.running_jobs:
LOG.error("Completed job " + str(job) + " not found in running list: " + str(self.running_jobs))
raise Exception("Completed job not in running list.")
self.running_jobs.remove(job)
self.completed_jobs.add(job)
@abstractmethod
def has_pending_jobs(self):
raise NotImplementedError()
def create_container_from_task(self, node, allocated, job, task):
err_adjusted_duration = duration = task.duration
if task.penalty is not None:
mem_error_adjustment = self.mem_errors[job.job_id] if job.job_id in self.mem_errors else 0
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("YARN_REGULAR_SCHEDULER: MEM_ERR_ADJUST: {}".format(mem_error_adjustment))
resource = task.ideal_resource
if mem_error_adjustment != 0:
resource = YarnResource(task.ideal_resource.memory_mb, task.ideal_resource.vcores)
resource.memory_mb += mem_error_adjustment
resource.memory_mb = round_up(resource.memory_mb, MEM_INCREMENT)
if allocated < resource:
err_adjusted_duration = task.penalty.get_penalized_runtime(resource, allocated, task.duration)
duration = task.penalty.get_penalized_runtime(task.ideal_resource, allocated, task.duration)
# Create YarnRunningContainer from YarnPrototypeContainer
yarn_container = YarnRunningContainer(
container_id=job.get_new_container_id(),
duration=err_adjusted_duration,
resource=task.resource,
priority=task.priority,
container_type=task.type,
job=task.job,
node=node,
task=task)
yarn_container.duration_error = err_adjusted_duration - duration
return yarn_container
def handle_container_allocation(self, node, allocated, job, task, time_millis):
# Create a container from the task
yarn_container = self.create_container_from_task(node, allocated, job, task)
# Adjust job to reflect launched container
task.num_containers -= 1
if task.num_containers == 0:
# All of this job's type of tasks were processed: remove it from the task list
job.pending_tasks.remove(task)
job.running_tasks.add(yarn_container)
job.consumption += yarn_container.resource
# Mark container as scheduled
yarn_container.schedule_container(time_millis)
# Adjust node to reflect scheduled container
node.book_container(yarn_container)
# Add container to list of containers to launch
try:
self.allocated_containers[job.job_id].add(yarn_container)
except KeyError:
self.allocated_containers[job.job_id] = {yarn_container}
# Generate next AM heartbeat, if needed.
if job.am_next_heartbeat.handled:
job.am_next_heartbeat.generate_next_heartbeat()
def handle_container_finished(self, node, finished_container):
job = finished_container.job
job.consumption -= finished_container.resource
self.stats_container_end_update(finished_container)
class YarnFairScheduler(YarnScheduler):
__metaclass__ = ABCMeta
def __init__(self, state):
super(YarnFairScheduler, self).__init__(state)
self.job_queue = []
# noinspection PyArgumentList
def __deepcopy__(self, memo):
new_scheduler = YarnScheduler.__deepcopy__(self, memo)
memo[id(self)] = new_scheduler
new_scheduler.job_queue = copy.deepcopy(self.job_queue, memo)
return new_scheduler
def handle_job_arrived(self, job):
YarnScheduler.handle_job_arrived(self, job)
self.job_queue.insert(0, job)
# Sort the job queue
self.queue_down_job(0)
def handle_container_finished(self, node, finished_container):
YarnScheduler.handle_container_finished(self, node, finished_container)
# Sort the job queue
if finished_container.job.pending_tasks:
self.queue_up_job(self.job_queue.index(finished_container.job))
def has_pending_jobs(self):
return len(self.job_queue) != 0
@abstractmethod
def allocate_on_node(self, node, task):
# Returns a tuple (memory, duration)
raise NotImplementedError()
def queue_down_job(self, job_idx):
queue_len = len(self.job_queue)
while job_idx < queue_len - 1:
job = self.job_queue[job_idx]
next_job = self.job_queue[job_idx + 1]
if [job.consumption.memory_mb, job.start_ms, job.job_id] > [next_job.consumption.memory_mb,
next_job.start_ms, next_job.job_id]:
self.job_queue[job_idx], self.job_queue[job_idx + 1] = \
self.job_queue[job_idx + 1], self.job_queue[job_idx]
job_idx += 1
else:
return
def queue_up_job(self, job_idx):
while job_idx > 0:
job = self.job_queue[job_idx]
next_job = self.job_queue[job_idx - 1]
if [job.consumption.memory_mb, job.start_ms, job.job_id] < [next_job.consumption.memory_mb,
next_job.start_ms, next_job.job_id]:
self.job_queue[job_idx], self.job_queue[job_idx - 1] = \
self.job_queue[job_idx - 1], self.job_queue[job_idx]
job_idx -= 1
else:
return
# noinspection PyUnusedLocal
def adjust_for_allocation(self, node, task, queue_idx, alloc_result):
allocated_resource = alloc_result[0]
allocated_duration = alloc_result[1]
job = self.job_queue[queue_idx]
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("CLUSTER: Capacity: " + str(reduce(lambda x, y: x + y,
map(lambda x: x.available,
self.state.nodes.values())).memory_mb)
+ "MB Maximum: " + str(max(self.state.nodes.values(), key=lambda x:
x.available.memory_mb).available.memory_mb) + "MB")
LOG.debug("QUEUE: " + ", ".join(map(lambda x: "<" + x.get_name() + " " + str(x.am_launched) + " " +
str(x.consumption) + ">", self.job_queue)))
if not all(self.job_queue[i].consumption.memory_mb <= self.job_queue[i + 1].consumption.memory_mb
for i in xrange(len(self.job_queue) - 1)):
LOG.error("QUEUE SORTING ERROR")
# Adjust task, job and node properties to reflect allocation
self.handle_container_allocation(node, allocated_resource, job, task,
self.state.simulator.clock_millis)
if not job.pending_tasks:
# All of the job's containers were processed: remove it from the queue
self.job_queue.remove(job)
else:
# Sort the queue again since this job's consumption has changed
self.queue_down_job(queue_idx)
def get_job_finish_scheduling_time(self, job):
# Returns the expected time when all containers of this job are scheduled.
return sys.maxint
def get_job_finish_time(self, job):
# Returns the expected time when the job finishes.
return sys.maxint
def get_container_finish_time(self, container, memo=None):
# memo is an optional dictionary used to cache durations (e.g., for AM containers)
if memo is not None and container in memo:
return memo[container]
if container.type is not YarnContainerType.MRAM:
# NOTE: This is an approximation, since the container launch time is
# given by the AM heartbeat that triggers it.
container_finish_time = container.launched_time_millis if container.launched_time_millis != -1 else \
(container.scheduled_time_millis + container.node.hb_interval_ms)
container_finish_time += container.duration
else:
# AM containers can only be determined if the job duration is somehow known.
container_finish_time = self.get_job_finish_time(container.job)
if memo is not None:
memo[container] = container_finish_time
return container_finish_time
def schedule(self, node, queue_start=0):
# Check if scheduling is postponed
if not self.allow_scheduling:
# Return True so that node heartbeats keep getting created.
return True, (EventResult.CONTINUE,)
gap_allocation_mode = False
release_time = -1
# Check first if this node is reserved.
if node.reserved_application is not None:
# Get next task needed for the job
required_task = next(iter(node.reserved_application.pending_tasks or []), None)
# Check if the reserved application still needs this reservation.
if required_task is None or \
node.reserved_task_type != required_task.type:
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Released node " + str(node) + " from app " + str(node.reserved_application))
self.stats_release_node(node.reserved_application.job_id, node.node_id,
self.state.simulator.clock_millis)
node.reserved_application = None
node.reserved_task_type = None
reserved_duration = self.state.simulator.clock_millis - node.reserved_start_ms
reserved_memory_mb = node.capacity.memory_mb - node.available.memory_mb
self.reserved_memory_time += reserved_duration * reserved_memory_mb
if node.available.vcores > 0:
self.reserved_usable_memory_time += reserved_duration * reserved_memory_mb
node.reserved_start_ms = 0
else:
self.stats_decisions_inc(node.reserved_application.job_id)
# Try to allocate the reserved application.
alloc_result = self.allocate_on_node(node, required_task)
if alloc_result is not None:
reserved_duration = self.state.simulator.clock_millis - node.reserved_start_ms
reserved_memory_mb = node.capacity.memory_mb - node.available.memory_mb
self.reserved_memory_time += reserved_duration * reserved_memory_mb
if node.available.vcores > 0:
self.reserved_usable_memory_time += reserved_duration * reserved_memory_mb
self.stats_accept_decisions_inc(node.reserved_application.job_id)
self.adjust_for_allocation(node, required_task, self.job_queue.index(node.reserved_application),
alloc_result)
self.stats_release_node(node.reserved_application.job_id, node.node_id,
self.state.simulator.clock_millis)
node.reserved_application = None
node.reserved_task_type = None
node.reserved_start_ms = 0
if not self.state.user_config.assign_multiple:
return True, (EventResult.CONTINUE,)
else:
if self.state.use_gaps:
# Try to see if another app in the queue can make use of the unused slot.
# Compute time required for the reserved application to be scheduled.
required_resource = required_task.resource
released_resource = YarnResource(node.available.memory_mb, node.available.vcores)
available_resource = node.available
node.available = released_resource
release_time = -1
# Look at all the scheduled and running containers on this node.
duration_memo = {}
for container in sorted(node.running_containers | node.allocated_containers,
key=lambda x: self.get_container_finish_time(x, duration_memo)):
container_finish_time = self.get_container_finish_time(container, duration_memo)
released_resource += container.resource
if container_finish_time > release_time:
release_time = container_finish_time
if release_time == sys.maxint:
break
# Check if the reserved application can now make use of this.
if self.allocate_on_node(node, required_task) is not None or \
released_resource >= required_resource:
break
node.available = available_resource
# Check if the job that has the reservation is due to finish scheduling soon.
job_release_time = self.get_job_finish_scheduling_time(node.reserved_application)
if job_release_time is not None and \
job_release_time < release_time:
release_time = job_release_time
if release_time != -1:
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Gap for node " + node.name + " reserved by " +
node.reserved_application.get_name()
+ " of " + str(required_resource) + " available at " + str(release_time))
gap_allocation_mode = True
if not gap_allocation_mode:
self.stats_reject_decisions_inc(node.reserved_application.job_id)
return False, (EventResult.CONTINUE,)
queue_idx = queue_start
while queue_idx < len(self.job_queue):
if self.job_queue[queue_idx] == node.reserved_application:
# This can only happen if gap_allocation_mode is True.
queue_idx += 1
continue
# Iterate over jobs in order of consumption
job = self.job_queue[queue_idx]
self.stats_decisions_inc(job.job_id)
# Get next task needed for the job
task = job.pending_tasks[0]
# Check if the AM container was launched. In YARN this can't happen, since
# the AM would not send ResourceRequests for any other containers before
# the AM itself wouldn't be up and running.
# In gap-allocation mode, check that AMs can be allocated in gaps.
if not job.am_launched and \
(task.type is not YarnContainerType.MRAM or
(gap_allocation_mode and not self.state.user_config.gaps_allow_ams)):
self.stats_reject_decisions_inc(job.job_id)
queue_idx += 1
continue
# Check if the task fits on the current node
# In gap-allocation mode, also check that the task would finish in time.
alloc_result = self.allocate_on_node(node, task)
if alloc_result is not None and \
(gap_allocation_mode is False or
self.state.simulator.clock_millis +
alloc_result[1] + node.hb_interval_ms <= release_time):
self.stats_accept_decisions_inc(job.job_id)
self.adjust_for_allocation(node, task, queue_idx, alloc_result)
if not self.state.user_config.assign_multiple:
return True, (EventResult.CONTINUE,)
else:
# Reset the queue_idx to take first job in order of consumption
queue_idx = 0
elif self.state.user_config.use_reservations and not gap_allocation_mode:
# Reserve this node and finish.
self.stats_reserve_decisions_inc(job.job_id)
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Reserving node " + str(node) + " by app " + str(job))
node.reserved_application = job
node.reserved_task_type = task.type
node.reserved_start_ms = self.state.simulator.clock_millis
self.stats_reserve_node(job.job_id, node.node_id, self.state.simulator.clock_millis)
break
else:
self.stats_reject_decisions_inc(job.job_id)
queue_idx += 1
return bool(node.allocated_containers), (EventResult.CONTINUE,)
class YarnRegularScheduler(YarnFairScheduler):
def __init__(self, state):
super(YarnRegularScheduler, self).__init__(state)
def create_container_from_task(self, node, allocated, job, task):
yarn_container = super(YarnRegularScheduler, self).create_container_from_task(node, allocated, job, task)
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("YARN_REGULAR_SCHEDULER: " + str(self.state.simulator.clock_millis) + " Allocated " +
str(task.resource.memory_mb) + " regularly for " +
task.job.get_name() + ":" +
str(task.job.next_container_id) + " with a duration of: " +
str(task.duration))
return yarn_container
def allocate_on_node(self, node, task):
if task.resource <= node.available:
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("YARN_REGULAR_SCHEDULER: REGULAR possible with " +
str(task.resource.memory_mb) + " for " +
task.job.get_name() + ":" +
str(task.job.next_container_id) + " with a duration of: " +
str(task.duration))
return task.resource, task.duration
return None
```
#### File: aakash-sharma/DSS/simulator.py
```python
import logging
import time
from events.event import EventResult
from utils import PQueue
LOG = logging.getLogger("simulator")
class Simulator(object):
def __init__(self):
self.queue = PQueue()
self.clock_millis = 0
self.start_real_clock_seconds = 0
self.finish_real_clock_seconds = 0
def __deepcopy__(self, memo):
new_simulator = Simulator()
memo[id(self)] = new_simulator
new_simulator.queue = self.queue.__deepcopy__(memo)
new_simulator.clock_millis = self.clock_millis
return new_simulator
@property
def duration_seconds(self):
if self.start_real_clock_seconds == 0:
LOG.error("Simulation has not started yet, cannot compute duration!")
return 0
if self.finish_real_clock_seconds == 0:
LOG.error("Simulation has not finished yet, cannot compute duration!")
return 0
return self.finish_real_clock_seconds - self.start_real_clock_seconds
def add_event(self, event):
self.queue.push(event, event.time_millis)
def run(self):
self.start_real_clock_seconds = time.clock()
try:
while not self.queue.empty():
queue_el = self.queue.pop()
new_clock = queue_el[0]
event = queue_el[1]
if new_clock > self.clock_millis:
# Update the internal clock
self.clock_millis = new_clock
elif new_clock < self.clock_millis and new_clock != 0:
LOG.warn("Parsing event in the past: " + str(new_clock))
if event is None:
continue
# Run event callback
# NOTE: All event handlers should return a tuple (EVENT_RESULT, optional_info) with the first element
# being the handling result and whether the simulation should continue, and additional information.
#
# This is used, for example, to pass information from the simulation to the simulation runner.
event_return = event.handle()
if event_return[0] is EventResult.FINISHED or event_return[0] is EventResult.PAUSE:
return event_return
LOG.warn("Reached queue end without receiving a FINISHED event.")
return EventResult.FINISHED,
finally:
self.finish_real_clock_seconds = time.clock()
``` |
{
"source": "aakash-sharma/training_on_a_dime",
"score": 3
} |
#### File: scripts/aws/analyze.py
```python
from datetime import datetime
import json
import matplotlib.pyplot as plt
from availability import instance_types
TL = []
all_y_axis = {}
lifespan_buckets = {}
def get_timeline(logs2):
for ts in sorted(logs2.keys()):
TL.append(ts)
def instances_over_time(logs, logs2):
for instance in instance_types:
all_y_axis[instance] = [0] * len(TL)
i = 1
for ts in sorted(logs2.keys()):
instance_id = logs2[ts][0]
if instance_id == None:
continue
curr_instance = (logs[instance_id][0][0], logs[instance_id][0][1])
for instance in all_y_axis:
y_axis = all_y_axis[instance]
if instance == curr_instance:
y_axis[i] = y_axis[i-1] + logs2[ts][1]
else:
y_axis[i] = y_axis[i-1]
i += 1
for instance in all_y_axis:
y_axis = all_y_axis[instance]
for i in range(len(y_axis)):
y_axis[i] = max(0, y_axis[i])
print(all_y_axis)
def avg_lifespan(logs):
for instance in instance_types:
lifespan_buckets[instance] = []
end_time = datetime.strptime(TL[-1], '%Y-%m-%dT%H:%M:%S.000Z')
print(end_time)
for instance_id in logs:
instance = (logs[instance_id][0][0], logs[instance_id][0][1])
st_time = datetime.strptime(logs[instance_id][1], '%Y-%m-%dT%H:%M:%S.000Z')
lifespan = 0
if logs[instance_id][2] != -1:
lifespan = datetime.strptime(logs[instance_id][2], '%Y-%m-%dT%H:%M:%S.000Z') - st_time
else:
lifespan = end_time - st_time
if lifespan.total_seconds() < 0:
print(logs[instance_id])
lifespan_buckets[instance].append(lifespan.total_seconds())
print(lifespan_buckets)
def plot():
#fig1, axs1 = plt.plot()
instance = ("k80", 1)
y_axis = all_y_axis[instance]
plt.plot([i for i in range(len(TL))], y_axis)
plt.title("Spot instance (p2.xlarge) availability")
plt.xlabel('Normalized time interval')
plt.ylabel('Availability')
plt.show()
y_axis = lifespan_buckets[instance]
y_axis = [lifespan for lifespan in y_axis if lifespan > 0]
x_axis = [i for i in range(len(y_axis))]
plt.plot(x_axis, y_axis)
plt.title("Instance lifespan")
plt.ylabel('Seconds')
plt.xlabel('Normalized instance Id')
plt.show()
def main():
with open('dict_snapshot', 'r') as dict_snapshot:
logs = json.load(dict_snapshot)
with open('dict_snapshot2', 'r') as dict_snapshot2:
logs2 = json.load(dict_snapshot2)
get_timeline(logs2)
instances_over_time(logs, logs2)
avg_lifespan(logs)
plot()
if __name__ == '__main__':
main()
```
#### File: scripts/data_transfer/data_transfer_costs.py
```python
def aws(data_size, intra_region, src_continent, dst_internet):
# https://aws.amazon.com/ec2/pricing/on-demand/
if data_size <= 0.0:
return 0.0
data_transfer_cost = 0.0
if intra_region:
data_transfer_cost = 0.01 * data_size
elif dst_internet:
if src_continent == 'north_america' or src_continent == 'europe':
if data_size > 1:
data_transfer_cost += .09 * min(data_size - 1,
10000 - 1)
if data_size > 10000:
data_transfer_cost += .085 * min(data_size - 10000,
50000 - 10000)
if data_size > 50000:
data_transfer_cost += .07 * min(data_size - 50000,
150000 - 50000)
if data_size > 150000:
data_transfer_cost += .05 * min(data_size - 150000,
500000 - 150000)
elif src_continent == 'asia':
if data_size > 1:
data_transfer_cost += .114 * min(data_size - 1,
10000 - 1)
if data_size > 10000:
data_transfer_cost += .089 * min(data_size - 10000,
50000 - 10000)
if data_size > 50000:
data_transfer_cost += .086 * min(data_size - 50000,
150000 - 50000)
if data_size > 150000:
data_transfer_cost += .084 * min(data_size - 150000,
500000 - 150000)
elif src_continent == 'south_america':
if data_size > 1:
data_transfer_cost += .25 * min(data_size - 1,
10000 - 1)
if data_size > 10000:
data_transfer_cost += .23 * min(data_size - 10000,
50000 - 10000)
if data_size > 50000:
data_transfer_cost += .21 * min(data_size - 50000,
150000 - 50000)
if data_size > 150000:
data_transfer_cost += .19 * min(data_size - 150000,
500000 - 150000)
else:
if src_continent == 'north_america' or src_continent == 'europe':
data_transfer_cost = 0.02 * data_size
elif src_continent == 'asia':
data_transfer_cost = 0.08 * data_size
elif src_continent == 'south_america':
data_transfer_cost = 0.16 * data_size
return data_transfer_cost
def azure(data_size, intra_region, src_continent, dst_internet):
# https://azure.microsoft.com/en-us/pricing/details/bandwidth/
if data_size <= 0.0:
return 0.0
data_transfer_cost = 0.0
if not intra_region:
if src_continent == 'north_america' or src_continent == 'europe':
if data_size > 5:
data_transfer_cost += .087 * min(data_size - 5,
10000 - 5)
if data_size > 10000:
data_transfer_cost += .083 * min(data_size - 10000,
50000 - 10000)
if data_size > 50000:
data_transfer_cost += .07 * min(data_size - 50000,
150000 - 50000)
if data_size > 150000:
data_transfer_cost += .05 * min(data_size - 150000,
500000 - 150000)
elif src_continent == 'asia':
if data_size > 5:
data_transfer_cost += .12 * min(data_size - 5,
10000 - 5)
if data_size > 10000:
data_transfer_cost += .085 * min(data_size - 10000,
50000 - 10000)
if data_size > 50000:
data_transfer_cost += .082 * min(data_size - 50000,
150000 - 50000)
if data_size > 150000:
data_transfer_cost += .08 * min(data_size - 150000,
500000 - 150000)
elif src_continent == 'south_america':
if data_size > 5:
data_transfer_cost += .181 * min(data_size - 5,
10000 - 5)
if data_size > 10000:
data_transfer_cost += .175 * min(data_size - 10000,
50000 - 10000)
if data_size > 50000:
data_transfer_cost += .17 * min(data_size - 50000,
150000 - 50000)
if data_size > 150000:
data_transfer_cost += .16 * min(data_size - 150000,
500000 - 150000)
return data_transfer_cost
def gcp(data_size, intra_region, src_continent, dst_internet):
# https://cloud.google.com/compute/network-pricing
if data_size <= 0.0:
return 0.0
data_transfer_cost = 0.0
if not intra_region:
if dst_internet:
if src_continent == 'asia':
data_transfer_cost += min(data_size * 0.147, 1000 * 0.147)
if data_size > 1000:
data_transfer_cost += .147 * min(data_size - 1000,
10000 - 1000)
if data_size > 10000:
data_transfer_cost += .134 * data_size - 10000
else:
data_transfer_cost += min(data_size * 0.12, 1000 * 0.12)
if data_size > 1000:
data_transfer_cost += .11 * min(data_size - 1000,
10000 - 1000)
if data_size > 10000:
data_transfer_cost += .08 * data_size - 10000
elif src_continent == 'north_america':
data_transfer_cost = 0.01 * data_size
elif src_continent == 'europe':
data_transfer_cost = 0.02 * data_size
elif src_continent == 'asia':
data_transfer_cost = 0.05 * data_size
elif src_continent == 'south_america':
data_transfer_cost = 0.08 * data_size
return data_transfer_cost
``` |
{
"source": "aakash-sheth/django-rest-api",
"score": 3
} |
#### File: app/core/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractBaseUser,\
BaseUserManager, PermissionsMixin
from django.conf import settings
# Create your models here
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Create and save a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Create and save a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that support using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class GrowthRateByAgeEducation(models.Model):
"""Table for Growth Rate by Age and Education"""
updated_date = models.DateField(auto_now=True)
age = models.IntegerField(unique=True)
dropout = models.DecimalField(max_digits=6, decimal_places=3)
diploma = models.DecimalField(max_digits=6, decimal_places=3)
some_college = models.DecimalField(max_digits=6, decimal_places=3)
associates = models.DecimalField(max_digits=6, decimal_places=3)
license = models.DecimalField(max_digits=6, decimal_places=3)
bachelors = models.DecimalField(max_digits=6, decimal_places=3)
masters = models.DecimalField(max_digits=6, decimal_places=3)
mba = models.DecimalField(max_digits=6, decimal_places=3)
attorney = models.DecimalField(max_digits=6, decimal_places=3)
doctorate = models.DecimalField(max_digits=6, decimal_places=3)
professional = models.DecimalField(max_digits=6, decimal_places=3)
def __str__(self):
return str(self.age)
class Pricing(models.Model):
"""Table for ISA pricing"""
updated_date = models.DateField(auto_now=True)
term = models.IntegerField(unique=True)
interest_rate = models.DecimalField(max_digits=6, decimal_places=4)
min_cagr = models.DecimalField(max_digits=5, decimal_places=3)
targeted_cagr = models.DecimalField(max_digits=5, decimal_places=3)
max_cagr = models.DecimalField(max_digits=5, decimal_places=3)
payment_cap_factor = models.DecimalField(max_digits=5, decimal_places=3)
prepayment_fv = models.DecimalField(max_digits=5, decimal_places=3)
prepayment_growth = models.DecimalField(max_digits=5, decimal_places=3)
def __str__(self):
return str(self.term)
class UnemploymentByIndustry(models.Model):
"""Table of Mean and Median Unemployment Duration in Weeks by Industry (NAICS) """
updated_date = models.DateField(auto_now=True)
industry_id = models.IntegerField()
industry = models.CharField(max_length=255)
mean_duration = models.DecimalField(max_digits=4, decimal_places=1)
median_duration = models.DecimalField(max_digits=4, decimal_places=1)
def __str__(self):
return self.industry
class UnemploymentByOccupation(models.Model):
"""Table of Mean and Median Unemployment Duration in Weeks by Occupation (NAICS) """
updated_date = models.DateField(auto_now=True)
occupation_id = models.IntegerField()
occupation = models.CharField(max_length=255)
occupation_type = models.CharField(max_length=255)
mean_duration = models.DecimalField(max_digits=4, decimal_places=1)
median_duration = models.DecimalField(max_digits=4, decimal_places=1)
def __str__(self):
return self.occupation
class UnemploymentByAgeGroup(models.Model):
"""Table of Mean and Median Unemployment Duration in Weeks by Age Group """
updated_date = models.DateField(auto_now=True)
age_group = models.CharField(max_length=255)
age_min = models.IntegerField()
age_max = models.IntegerField()
mean_duration = models.DecimalField(max_digits=4, decimal_places=1)
median_duration = models.DecimalField(max_digits=4, decimal_places=1)
def __str__(self):
return self.age_group
class EmploymentDurationByAgeGroup(models.Model):
"""Table of Mean and Median Employment Duration in Months by Age Group """
updated_date = models.DateField(auto_now=True)
age_group = models.CharField(max_length=255)
age_min = models.IntegerField()
age_max = models.IntegerField()
duration = models.DecimalField(max_digits=4, decimal_places=1)
def __str__(self):
return self.age_group
class HikesByEducation(models.Model):
"""Table of average % hike by Education based on what farrukh thinks"""
degree = models.CharField(max_length=255)
updated_date = models.DateField(auto_now=True)
hike = models.DecimalField(max_digits=6, decimal_places=3)
def __str__(self):
return str(self.degree)
class PraisParameterCap(models.Model):
"""Table of Prais limit paramters for upper and lower protection"""
updated_date = models.DateField(auto_now=True)
isa_processing_fee = models.DecimalField(max_digits=6, decimal_places=3)
isa_servicing_fee = models.DecimalField(max_digits=6, decimal_places=3)
isa_sales_charge = models.DecimalField(max_digits=6, decimal_places=3)
minimum_self_equity_perc = models.DecimalField(max_digits=6, decimal_places=3)
max_minimum_self_equity = models.DecimalField(max_digits=7, decimal_places=2)
annual_lower_income = models.DecimalField(max_digits=8, decimal_places=2)
isa_processing_fee_cap = models.DecimalField(max_digits=8, decimal_places=2)
buyout_servicing_fee = models.DecimalField(max_digits=6, decimal_places=2)
isp_age_factor = models.DecimalField(max_digits=4, decimal_places=2)
isa_maximum_value = models.DecimalField(max_digits=4, decimal_places=2)
max_age_for_quote = models.IntegerField()
min_age_for_quote = models.IntegerField()
def __str__(self):
return str(self.updated_date)
#
# class Quote(models.Model):
# """Quotes Calculated"""
# user = models.ForeignKey(
# settings.AUTH_USER_MODEL
# )
# customer_id = models.CharField(ma)
```
#### File: app/quote/views.py
```python
from rest_framework.views import APIView
from rest_framework import viewsets, mixins,generics
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework import status
from core.models import GrowthRateByAgeEducation, UnemploymentByAgeGroup,\
UnemploymentByIndustry,UnemploymentByOccupation,\
Pricing,EmploymentDurationByAgeGroup
from quote import serializers
from quote.quote import Prais
import json
class QuoteViewSet(APIView):
""" Process quotes"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
# serializer_class = serializers.QuoteSerializer
def get_queryset(self):
pass
def get(self, request, *args, **kwargs):
"""
Quotes request should have following parameters
funding_amount,current_income,age,degree,industry(optional),
profession(optional),method(optional),term_list(optional in years)
"""
term_flag = 0
paras = request.query_params
funding_amount = float(paras['funding_amount'])
current_income = float(paras['current_income'])
age = int(paras['age'])
degree = paras['degree']
if 'industry'in paras.keys():
industry = request.query_params['industry']
if 'profession' in paras.keys():
profession = request.query_params['profession']
if 'method' in paras.keys():
method = request.query_params['method']
if 'term_list' in paras.keys():
term_list = eval(request.query_params['term_list'])
term_flag = 1
print('here')
prais = Prais()
if term_flag == 1:
quotes_result = prais.Quotes(funding_amount,current_income,age,degree,term_list=term_list)
else :
quotes_result = prais.Quotes(funding_amount,current_income,age,degree)
quotes_json = quotes_result#json.dumps(quotes_result)
# except:
# return Response(status.HTTP_406_NOT_ACCEPTABLE)
return Response(quotes_json)
``` |
{
"source": "AakashSrinivasan03/GlossBert-GraphEmbeddings",
"score": 2
} |
#### File: GlossBert-GraphEmbeddings/GCN/train_classifier_gcn.py
```python
from __future__ import absolute_import, division, print_function
import argparse
from collections import OrderedDict
import csv
import logging
import os
import random
import sys
import pandas as pd
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from nltk.corpus import wordnet as wn
from torch.nn import CrossEntropyLoss, MSELoss
from graph_embeddings import Graph
from evaluation_script import evaluate_results
import networkx as nx
import torch.sparse as ts
import scipy.sparse as ss
def get_graph():
G=nx.Graph()
vertices = []
edges = []
dictionary_name_to_id = {}
dictionary_id_to_name = {}
noun_synsets = list(wn.all_synsets())
vertices = [synset for synset in noun_synsets]
G.add_nodes_from(range(0, len(vertices)))
for node in G.nodes():
G.nodes[node]['name'] = vertices[node].name()
dictionary_name_to_id[vertices[node].name()] = node
dictionary_id_to_name[node] = vertices[node]
for node in G.nodes():
current_vertex_id = node
current_vertex_name = G.nodes[node]['name']
word = vertices[node].lemmas()[0].name()
current_synset = wn.synsets(word)
for syn in current_synset:
# if ".n." in synset.name()
# if synset.name() >= current_vertice_name:
synset_id = dictionary_name_to_id[syn.name()]
G.add_edge(current_vertex_id, synset_id)
# G.add_edges_from(edges)
nV = len(G.nodes())
G.add_node(nV) # dummy for unknown synsets
G.add_edge(nV, nV)
# nx.write_adjlist(G, "wordnet.adjlist")
return G
import torch.nn as nn
import torch.nn.functional as F
# from pygcn.layers import GraphConvolution
class GCN_FCNet(nn.Module):
def __init__(self, input_dim, G, embedding_dim=300, num_labels=2, emb_scale=1.0, w_scale=1e-2, dropout=0.1, activation=F.sigmoid):
super(GCN_FCNet, self).__init__()
self.input_dim = input_dim+embedding_dim
# self.linear_final = nn.Linear(self.input_dim + embedding_dim, num_labels)
self.linear_final = nn.Sequential(
nn.Linear(self.input_dim, 500),
nn.Sigmoid(),
# nn.Linear(500, 500),
# nn.Sigmoid(),
nn.Linear(500, 500),
nn.Sigmoid(),
nn.Linear(500, num_labels)
)
# self.linear_final = nn.Linear(self.input_dim, num_labels)
self.G = G
self.nV = len(G.nodes())
edgelist = np.array([[u, v] for u,v,c in nx.to_edgelist(G)]).T
self.nE = edgelist.shape[1]
A = ss.csr_matrix((np.ones(self.nE), edgelist), shape=(self.nV, self.nV))
# A = ss.coo_matrix(A)
# A_ix = torch.LongTensor(np.vstack((A.row, A.col)))
# A_val = torch.FloatTensor(A.data)
# A = ts.FloatTensor(A_ix, A_val, torch.Size(A.shape))
# A.requires_grad = False
D_half_val = np.array(A.sum(axis=1)).flatten()**-0.5
D_half_ix = np.arange(self.nV).reshape(1,-1).repeat(2, axis=0)
D_half = ss.csr_matrix((D_half_val, D_half_ix), shape=(self.nV, self.nV))
LM = ss.coo_matrix(D_half.dot(A).dot(D_half))
LM_ix = torch.LongTensor(np.vstack((LM.row, LM.col)))
LM_val = torch.FloatTensor(LM.data)
self.LM = ts.FloatTensor(LM_ix, LM_val, torch.Size(LM.shape))
self.LM.requires_grad = False
self.H0 = torch.randn((self.nV, embedding_dim))*emb_scale
self.W1 = torch.randn((embedding_dim, embedding_dim))*w_scale
self.W2 = torch.randn((embedding_dim, embedding_dim))*w_scale
self.H0 = nn.Parameter(self.H0, requires_grad=True)
self.W1 = nn.Parameter(self.W1, requires_grad=True)
self.W2 = nn.Parameter(self.W2, requires_grad=True)
self.cached = False
# def backward_hook(m, grad_input, grad_output):
# m.cached = False
# self.register_backward_hook(backward_hook)
self.dropout = dropout
self.activation = activation
# self.gcn = GCN(embedding_dim, embedding_dim, embedding_dim, 0.1, A)
def forward(self, input_tensor, node):
if not self.cached:
# H1 = F.dropout(self.activation(torch.spmm(self.LM, torch.mm(self.H0, self.W1)) ), self.dropout, training=self.training)
# self.H2 = F.dropout(self.activation(torch.spmm(self.LM, torch.mm(H1, self.W2)) ), self.dropout, training=self.training)
H1 = self.activation(torch.spmm(self.LM, torch.mm(self.H0, self.W1)) )
self.H2 = self.activation(torch.spmm(self.LM, torch.mm(H1, self.W2)) )
# self.H2 = ts.spmm(self.LM, torch.mm(H1, self.W1))
self.cached = True
# print(gcn_out.shape)
# print(input_tensor.shape)
gcn_out = self.H2[node]
fc_input = torch.cat((gcn_out, input_tensor), 1)
scores = self.linear_final(fc_input)
# scores = self.linear_final(input_tensor)
return scores
def load_dataset(path,train):
train_data = np.load(path, allow_pickle=True)
if not train:
train_data = train_data[()]
embeddings = train_data['embeddings']
labels = train_data['labels']
sense_keys = train_data['synsets']
synsets = [sc2ss(sensekey) for sensekey in sense_keys]
print('loaded BERT embeddings')
return embeddings, labels, synsets
# def attach_graph(graph_dict, sense_keys, embeddings):
# counter = 0
# concatenated_representation = []
# #print('report.n.04' in ix_G_lookup)
# #print('back.n.03' in ix_G_lookup)
# for i in range(len(sense_keys)):
# sensekey = sense_keys[i]
# synset = sc2ss(sensekey)
# if(synset in ix_G_lookup):
# index = ix_G_lookup[synset]
# vector = graph_dict['embeddings'][index]
# else:
# #sensekey not in graph list
# counter += 1
# vector = np.zeros_like(graph_dict['embeddings'][0])
# if(i%1e5==0):
# print(i,"done")
# #attach graph vector
# concatenated_representation.append(np.concatenate([embeddings[i],vector],axis=0))
# print("shape",np.array(concatenated_representation).shape,counter)
# return np.array(concatenated_representation)
# def get_graph_embeddings(graph_dict,synset_ids):
# embeddings = []
# for synset_id in synset_ids:
# embeddings.append(graph_dict['embeddings'][synset_id])
# return np.array(embeddings)
def write_results(path,dataset,probs):
pred = np.argmax(probs,axis=1)
with open(os.path.join(path,dataset+'_results.txt'),'w') as f:
for i,j,k in zip(pred,probs[:,0],probs[:,1]):
f.write(str(i)+' '+str(j)+' '+str(k)+'\n')
return os.path.join(path,dataset+'_results.txt')
def sc2ss(sensekey):
'''Look up a synset given the information from SemCor'''
### Assuming it is the same WN version (e.g. 3.0)
# TO DO: Need a better way of extracting string
synset = str(wn.lemma_from_key(sensekey).synset())[8:-2]
#print(synset)
return synset
# def main():
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--embeddings_data_dir",default=None,type=str,help="The input embedding file")
parser.add_argument("--dataset",default=None,type=str,help="Dataset name")
parser.add_argument("--do_train",action='store_true',help="Whether to run training.")
parser.add_argument("--do_eval",action='store_true',help="Whether to run evaluation")
parser.add_argument("--out_results_dir",default=None,type=str,help="Output result path")
parser.add_argument("--load_model_path",default=None,type=str,help="Eval - model to be loaded")
parser.add_argument("--batch_size",default=32,type=int,help="Total batch size for training.")
parser.add_argument("--num_epochs",default=5,type=int,help="Number of epochs.")
# parser.add_argument("--graph_embeddings_loc",default=None,type=str,help="The graph embedding file")
args = parser.parse_args()
os.makedirs(args.out_results_dir, exist_ok=True)
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = "cpu"
n_gpu = torch.cuda.device_count()
print("device",device,"n_gpu",n_gpu)
# graph_location = args.graph_embeddings_loc
# graph = Graph('load')
# g_vector = graph.get_embeddings(location =graph_location)
# graph_dict = graph.embedding_to_tuple(g_vector)
G = get_graph()
# model = GCN_FCNet(300, G)
# model.to(device)
# model(1, 2)
nV = len(G.nodes())
ix_G_lookup = {name:i for i, name in enumerate(G.nodes())}
embeddings, labels,synsets = load_dataset(args.embeddings_data_dir,args.do_train)
# assert list(G.nodes()) == ix_G_lookup.keys()
all_labels = labels
synset_mapping = torch.tensor([ix_G_lookup[synset] if synset in ix_G_lookup else -1 for synset in synsets]).long()
# graph_embeddings = torch.tensor(np.concatenate([graph_dict['embeddings'],np.mean(graph_dict['embeddings'],axis=0).reshape(1,-1)],axis=0))
###embeddings = attach_graph(graph_dict,sense_keys,embeddings)
embeddings = torch.tensor(embeddings)
labels = torch.tensor(labels).long()
data= TensorDataset(embeddings, labels, synset_mapping)
shuffle_bool = not args.do_eval
dataloader = DataLoader(data, batch_size=args.batch_size, shuffle=shuffle_bool)
num_labels = 2
####Semeval07 dev set
dev_embeddings, dev_labels,dev_synsets = load_dataset("embeddings/semeval2007.npy",False)
dev_embeddings = torch.tensor(dev_embeddings, device=device)
all_dev_labels = dev_labels
dev_synset_mapping = torch.tensor([ix_G_lookup.get(synset, nV-1) for synset in dev_synsets]).long()
# dev_graph_embeddings = torch.tensor(np.concatenate([graph_dict['embeddings'],np.mean(graph_dict['embeddings'],axis=0).reshape(1,-1)],axis=0))
# dev_embeddings = torch.tensor(dev_embeddings)
# dev_concatenated_embeddings = torch.cat((dev_embeddings,dev_graph_embeddings[dev_synset_mapping]),axis=1)
##########
best_accuracy = 0
if args.do_train:
output_model_file = os.path.join(args.out_results_dir,"model_save")
model = GCN_FCNet(embeddings.shape[1], G)
model.to(device)
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
gamma = 0.995
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma, last_epoch=-1)
epoch = 0
loss_fct = CrossEntropyLoss()
for epoch_no in trange(int(args.num_epochs), desc="Epoch"):
epoch += 1
tr_loss = 0
for step, batch in enumerate(tqdm(dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
bert_embeddings, labels, synsets = batch
# graph_embedding_lookup = graph_embeddings[synsets.to('cpu')]
# inputs = torch.cat((bert_embeddings,graph_embedding_lookup.to(device)),1)
inputs = bert_embeddings
logits = model(inputs.float(), synsets)
loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))
tr_loss += loss
loss.backward(retain_graph=True)
optimizer.step()
optimizer.zero_grad()
print("Epoch",epoch_no,"Loss",tr_loss)
dev_logits = model(dev_embeddings.float(), dev_synset_mapping)
dev_prob_values = nn.Softmax(dim=-1)(dev_logits).cpu().detach().numpy()
result_path = write_results(".",'semeval2007',dev_prob_values)
accuracy = evaluate_results('semeval2007',result_path)
best_accuracy = max(best_accuracy,accuracy)
# if(best_accuracy==accuracy):
# print("saving model..")
# torch.save(model.state_dict(), output_model_file)
if(args.do_eval):
model = GCN_FCNet(embeddings.shape[1], G)
model.to(device)
model.load_state_dict(torch.load(args.load_model_path))
model.eval()
probs = np.zeros((embeddings.shape[0],num_labels))
l = 0
h = 0
eval_dataloader = DataLoader(data, batch_size=args.batch_size, shuffle=False)
for step, batch in enumerate(tqdm(eval_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
bert_embeddings, labels, synsets = batch
# graph_embedding_lookup = graph_embeddings[synsets.to('cpu')]
# inputs = torch.cat((bert_embeddings,graph_embedding_lookup.to(device)),1)
inputs = bert_embeddings
logits = model(inputs.float(), synsets)
prob_values = nn.Softmax(dim=-1)(logits).cpu().detach().numpy()
h = l + prob_values.shape[0]
probs[l:h] = prob_values
l = h
pred = (probs[:,1]>=0.5).astype(int)
truth = all_labels.astype(int)
print(truth.shape)
print("accuracy",np.sum(pred==truth)*1.0/pred.shape[0])
result_path = write_results(args.out_results_dir,args.dataset,probs)
evaluate_results(args.dataset,result_path)
# if __name__ == "__main__":
# main()
```
#### File: AakashSrinivasan03/GlossBert-GraphEmbeddings/graph_embeddings.py
```python
import numpy as np
import os
from tqdm import tqdm
class Graph(object):
"""docstring for Graph"""
def __init__(self, source):
super(Graph, self).__init__()
self.source = source
def build(self):
'''builds adjacency matrix'''
pass
def load(self, location):
embeddings_index = {}
f = open(location, 'r', encoding='utf-8')
for line in f:
values = line.rstrip().rsplit(' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('found %s word vectors' % len(embeddings_index))
return embeddings_index
def get_embeddings(self, location = None):
if self.source == 'load':
embeddings = self.load(location)
else:
embeddings = self.build()
return embeddings
def embedding_to_tuple(self, dic):
'''converts embedding dictionary to matrix and returns matrix and indexing'''
name_to_index = {}
index_to_name = {}
mat = np.zeros((len(dic),300))
i=0
for key, value in dic.items():
mat[i] = value
name_to_index[key] = i
index_to_name[i] = key
i+=1
output = {}
output['embeddings'] = mat
output['node_2_idx'] = name_to_index
output['idx_2_node'] = index_to_name
return output
```
#### File: AakashSrinivasan03/GlossBert-GraphEmbeddings/sampling.py
```python
import pickle
import pandas as pd
import nltk
import re
from nltk.corpus import wordnet as ewn
import numpy as np
def load_dataset(path,train):
train_data = np.load(path, allow_pickle=True)
########if(not train):
#train_data = train_data[()]
embeddings = train_data['embeddings']
labels = train_data['labels']
sense_keys = train_data['synsets']
synsets = [sc2ss(sensekey) for sensekey in sense_keys]
print('loaded BERT embeddings')
return embeddings, labels, synsets
def sc2ss(sensekey):
'''Look up a synset given the information from SemCor'''
### Assuming it is the same WN version (e.g. 3.0)
# TO DO: Need a better way of extracting string
synset = str(ewn.lemma_from_key(sensekey).synset())[8:-2]
#print(synset)
return synset
count = 0
def get_neg_sampling(data_loc,loc,save_loc):
print(data_loc)
print(loc)
embeddings, labels, synsets = load_dataset(data_loc,True)
df = pd.read_csv(loc,sep='\t')
def get_key(sent):
return sent.split()[0]
df['key'] = df['gloss'].apply(get_key)
print('keys done')
def sc2ss(sensekey):
'''Look up a synset given the information from SemCor'''
### Assuming it is the same WN version (e.g. 3.0)
# TO DO: Need a better way of extracting string
synset = str(ewn.lemma_from_key(sensekey).synset())[8:-2]
#print(synset)
return synset
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return 's'
elif treebank_tag.startswith('V'):
return 'v'
elif treebank_tag.startswith('N'):
return 'n'
elif treebank_tag.startswith('R'):
return 'r'
else:
return None
def sensekey_2_syn(x):
syn = sc2ss(x).split('.')[1]
return syn
df['syn'] = df['sense_key'].apply(sensekey_2_syn)
print('got syn')
def get_tag(x):
sent = x['sentence']
#key = x['gloss'].split()[0]
key = x['key']
#sense = x['sense_key']
global count
count+=1
if(count%2000==0):
print('We are at line ',count)
#syn = sc2ss(sense).split('.')[1]
syn = x['syn']
#sent is a single sentence
tokens = nltk.word_tokenize(sent)
tokens = [t for t in tokens if not re.search(r'[^\w\d\s]',t)]
tags = nltk.pos_tag(tokens)
for i in range(len(tokens)):
if tokens[i]==key:
val = get_wordnet_pos(tags[i][1])
if val==syn:
return 1
else:
return 0
return 0
print('done')
df['pos'] = df.apply(get_tag,axis=1)
out = df['pos'].to_numpy()
#print(df['pos'].head())
#print(df['pos'].sum())
#np.save('mask_train_pos.npy',out)
embeddings = embeddings[out==1]
labels = labels[out==1]
synsets = np.array(synsets)[out==1]
dataset = {}
dataset['embeddings'] = embeddings
dataset['labels'] = labels
dataset['synsets'] = synsets
with open(save_loc, 'wb') as handle:
pickle.dump(out, handle, protocol=pickle.HIGHEST_PROTOCOL)
return dataset
import argparse
if __name__ =='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--embeddings_loc",default=None,type=str,help="Location to embeddings of numpy")
parser.add_argument("--csv_loc",default=None,type=str,help="Location to the csv")
parser.add_argument("--save_location",default=None,type=str,help="Location for the final dataset")
args = parser.parse_args()
d = get_neg_sampling(data_loc=args.embeddings_loc,loc=args.csv_loc,save_loc = args.save_location)
# d = get_neg_sampling(data_loc='combined.npy',loc= '/home/pratyushgarg11/data/bert-n-graph-embeddings/GlossBert-GraphEmbeddings/Training_Corpora/SemCor/semcor_train_sent_cls_ws.csv')
'''
count= 0
def count_zeros(word):
global count
if not word:
count+=1
return 0
_ = words.apply(count_zeros)
print(count)
print(words.head())
'''
``` |
{
"source": "AakashSudhakar/6nomads-interview-project",
"score": 3
} |
#### File: 6nomads-interview-project/structures/dataset_processor.py
```python
import numpy as np
import pandas as pd
class Dataset_Processor(object):
""" Class object instance for processing and transforming 6Nomads data for predictive analytics. """
def __init__(self):
""" Initializer method for object instance creation. """
self.REL_PATH_TO_INT_DATA_TRAIN = "../data/interim/train_i.csv"
self.REL_PATH_TO_INT_DATA_TEST = "../data/interim/test_i.csv"
def load_data(self, which="both"):
"""
Instance method to load in dataset(s) into conditionally separated/joined Pandas DataFrame(s).
INPUTS:
{which}:
- str(both): Reads in training and testing data files as tuple of individual DataFrames. (DEFAULT)
- str(all): Reads in training and testing data files as single conjoined DataFrame.
- str(train): Reads in training data file as single DataFrame.
- str(test): Reads in testing data file as single DataFrame.
OUTPUTS:
pandas.DataFrame: Single or multiple Pandas DataFrame object(s) containing relevant data.
"""
# Validate conditional data loading arguments
if which not in ["all", "both", "train", "test"]:
raise ValueError("ERROR: Inappropriate value passed to argument `which`.\n\nExpected value in range:\n - all\n - both\n - train\n - test\n\nActual:\n - {}".format(which))
# Independently load training data
if which == "train":
return pd.read_csv(self.REL_PATH_TO_INT_DATA_TRAIN, index_col=0)
# Independently load testing data
if which == "test":
return pd.read_csv(self.REL_PATH_TO_INT_DATA_TEST, index_col=0)
else:
df_train = pd.read_csv(self.REL_PATH_TO_INT_DATA_TRAIN, index_col=0)
df_test = pd.read_csv(self.REL_PATH_TO_INT_DATA_TEST, index_col=0)
# Load merged training and testing data
if which == "all":
return pd.concat([df_train, df_test], keys=["train", "test"], sort=True)
# Load separated training and testing data (DEFAULT)
if which == "both":
return df_train, df_test
def feature_encoder(self, datasets, target, lookup_table, dtype="discrete", drop_og=False):
"""
Instance method to iteratively encode labels in dataset as numerically categorical data.
INPUTS:
{datasets}:
- pd.DataFrame: Single dataset; cast to list for iterative feature mapping.
- list: List of datasets; used for iterative feature mapping.
{target}:
- str: Name of target feature in dataset containing labels on which to encode.
{lookup_table}:
- dict: Encoding table with unencoded data ranges as values and encoded numerical categories as keys.
{dtype}:
- str(discrete): Data type parameter; indicates presence of discretized values across dataset. (DEFAULT)
- str(continuous): Data type parameter; indicates presence of continuous values across dataset.
{drop_og}:
- bool(True): Dataset drops original feature after encoding.
- bool(False): Dataset does not drop original feature after encoding. (DEFAULT)
OUTPUTS:
NoneType: Dataset insertion is performed inplace and does not return new object(s).
"""
if type(datasets) is not list:
datasets = [datasets]
def _encoder_helper(label, lookup_table, dtype):
"""
Custom helper function to replace unencoded label with encoded value from custom lookup table.
INPUTS:
{label}:
- int: Unencoded value within Pandas Series to alter to categorically encoded label.
{lookup_table}:
- dict: Encoding table with unencoded data ranges as values and encoded numerical categories as keys.
{dtype}:
- str(discrete): Data type parameter; indicates presence of discretized labels.
- str(continuous): Data type parameter; indicates presence of continuous labels.
OUTPUTS:
int: Encoded numerical category as new label. (DEFAULT)
str: Encoded string-based category as new label.
"""
for key, value in lookup_table.items():
if dtype == "discrete":
if label in value:
return key
if dtype == "continuous":
if value[0] <= label < value[1]:
return key
encoded_feature = "{}_encoded".format(target)
for dataset in datasets:
if encoded_feature in dataset:
dataset.drop(columns=[encoded_feature], inplace=True)
features = dataset.columns.tolist()
dataset.insert(loc=features.index(target) + 1,
column="{}_encoded".format(target),
value=dataset[target].apply(_encoder_helper,
lookup_table=lookup_table,
dtype=dtype))
if drop_og:
dataset.drop(columns=[target], inplace=True)
return
def save_dataset(self, dataset, savepath, filetype="csv"):
"""
Instance method to save current state of dataset to data file accessible by navigating the parent directory.
INPUTS:
{dataset}:
- pd.DataFrame: Single parent dataset; used for data formatting and allocation (save to memory).
{savepath}:
- str: Relative path location within parent directory to which dataset is saved.
{filetype}:
- str(csv): Dataset is formatted as comma-separated values file architecture. (DEFAULT)
- str(excel): Dataset is formatted as Excel spreadsheet file architecture.
OUTPUTS:
NoneType: Saving data to memory is performed outside context of object and does not return new object(s).
"""
# Sanitization for method instantiation if unknown value is passed to `filetype` keyword argument
if filetype not in ["csv", "excel"]:
raise ValueError("Value passed to keyword argument `filetype` is uninterpretable. EXPECTED: ['csv', 'excel']. ACTUAL: ['{}']".format(filetype))
# Explicit relative pathway declaration and saving process performed on dataset
savepath += ".{}".format(filetype)
if filetype == "csv":
dataset.to_csv(savepath)
elif filetype == "excel":
dataset.to_excel(savepath)
return
``` |
{
"source": "AakashSudhakar/helical",
"score": 2
} |
#### File: AakashSudhakar/helical/app.py
```python
from flask import Flask, request, render_template
# Imports Custom Algorithm Structures for Data Processing
from .structures import processor01, processor02, processor03, processor04, processor05
# Initializes Flask App
app = Flask(__name__)
def _validate_data_general(dataset):
""" Helper function to validate user input as genomic data. """
PERMITTED_CHARS = ["A", "C", "G", "T"]
for char in dataset:
if char not in PERMITTED_CHARS:
return -1
def _validate_data_codons(dataset):
""" Helper function to validate user input as codon sequence. """
if len(dataset) % 3 != 0:
return -1
# Creates Home Page with User Data Input Option
@app.route("/")
def user_form():
return render_template("input-data.html")
# Creates POST Route at Home Page with User Data Processing
@app.route("/", methods=["POST"])
def user_form_proc():
# NOTE: PROCESSORS values reflect the _value_ tags in the HTML button elements in ./templates/input-data.html.
PROCESSORS = {
1: "Get Nucleotide Count", # P01: DNA
2: "Convert to RNA", # P02: RNA
3: "Generate Reverse Complement", # P03: REVC
4: "Determine GC Content", # P05: GC
5: "Translate to Protein Chain" # P08: PROT
}
if request.form["proc"] == PROCESSORS[1]:
if request.form["proc01"]:
text01 = request.form["proc01"].upper()
if _validate_data_general(text01) != -1:
proc01 = processor01.Processor01_NucleotideCounter(text01)
return proc01.render_response(proc01.nucleotide_counter())
else:
return "ERROR: Inputted data is invalid. Please try again."
else:
return "ERROR: Nucleotide Counts Processor did not receive any data."
elif request.form["proc"] == PROCESSORS[2]:
if request.form["proc02"]:
text02 = request.form["proc02"].upper()
if _validate_data_general(text02) != -1:
proc02 = processor02.Processor02_DNATransriber(text02)
return proc02.render_response(proc02.transcriber())
else:
return "ERROR: Inputted data is invalid. Please try again."
else:
return "ERROR: RNA Transcriber Processor did not receive any data."
elif request.form["proc"] == PROCESSORS[3]:
if request.form["proc03"]:
text03 = request.form["proc03"].upper()
if _validate_data_general(text03) != -1:
proc03 = processor03.Processor03_ReverseComplement(text03)
return proc03.render_response(proc03.reverse_complement_generator())
else:
return "ERROR: Inputted data is invalid. Please try again."
else:
return "ERROR: Reverse Complement Processor did not receive any data."
elif request.form["proc"] == PROCESSORS[4]:
if request.form["proc04"]:
text04 = request.form["proc04"].upper()
if _validate_data_general(text04) != -1:
proc04 = processor04.Processor04_GCContent(text04)
return proc04.render_response(proc04.gc_content_calculator())
else:
return "ERROR: Inputted data is invalid. Please try again."
else:
return "ERROR: GC-Content Calculation Processor did not receive any data."
elif request.form["proc"] == PROCESSORS[5]:
if request.form["proc05"]:
text05 = request.form["proc05"].upper()
if _validate_data_general(text05) != -1 and _validate_data_codons(text05) != -1:
preproc05 = processor02.Processor02_DNATransriber(text05)
proc05 = processor05.Processor05_ProteinTranslator(preproc05.transcriber())
return proc05.render_response(proc05.rna_to_protein_translator())
else:
return "ERROR: Inputted data is invalid or is not evenly divisible by three (3). Please try again."
else:
return "ERROR: Protein Chain Translator did not receive any data."
```
#### File: helical/structures/processor02.py
```python
class Processor02_DNATransriber(object):
""" Object structure containing logic for DNA-to-RNA Transcription algorithm. """
def __init__(self, data):
self.dataset = data
def transcriber(self):
""" Method to transcribe relevant DNA base pairs (Thymine) into RNA base pairs (Uracil). """
return "".join([[base, "U"][base == "T"] for base in self.dataset])
def render_response(self, response):
""" Method to render stylized response text to user. """
return "\nOriginal genomic sequence: {}\n\nTranscribed genomic sequence: {}".format(str(self.dataset), response)
```
#### File: helical/structures/processor03.py
```python
class Processor03_ReverseComplement(object):
""" Object structure containing logic for DNA reverse complement generation. """
def __init__(self, data):
self.dataset = data
def reverse_complement_generator(self):
""" Method to generate reversed complement strand (swapped base pair, reverse order) from DNA. """
BASE_PAIRS = {
"A": "T",
"T": "A",
"G": "C",
"C": "G"
}
return [str(value) for value in [BASE_PAIRS.get(key) for key in self.dataset.strip()]]
def render_response(self, response):
""" Method to render stylized response text to user. """
return "\nOriginal genomic sequence: {}\n\nReverse complement sequence: {}".format(str(self.dataset), "".join(response)[::-1])
```
#### File: helical/structures/processor05.py
```python
class Processor05_ProteinTranslator(object):
""" Object structure containing logic for DNA-to-RNA-to-protein genomic translation. """
def __init__(self, data):
self.dataset = data
self.TRANSLATION_TABLE = {
"UUU": "F", "CUU": "L", "AUU": "I", "GUU": "V",
"UUC": "F", "CUC": "L", "AUC": "I", "GUC": "V",
"UUA": "L", "CUA": "L", "AUA": "I", "GUA": "V",
"UUG": "L", "CUG": "L", "AUG": "M", "GUG": "V",
"UCU": "S", "CCU": "P", "ACU": "T", "GCU": "A",
"UCC": "S", "CCC": "P", "ACC": "T", "GCC": "A",
"UCA": "S", "CCA": "P", "ACA": "T", "GCA": "A",
"UCG": "S", "CCG": "P", "ACG": "T", "GCG": "A",
"UAU": "Y", "CAU": "H", "AAU": "N", "GAU": "D",
"UAC": "Y", "CAC": "H", "AAC": "N", "GAC": "D",
"UAA": "Stop", "CAA": "Q", "AAA": "K", "GAA": "E",
"UAG": "Stop", "CAG": "Q", "AAG": "K", "GAG": "E",
"UGU": "C", "CGU": "R", "AGU": "S", "GGU": "G",
"UGC": "C", "CGC": "R", "AGC": "S", "GGC": "G",
"UGA": "Stop", "CGA": "R", "AGA": "R", "GGA": "G",
"UGG": "W", "CGG": "R", "AGG": "R", "GGG": "G"
}
def rna_to_protein_translator(self):
""" Method to convert transcribed DNA into corresponding protein sequence. """
protein_seq = list()
for iterator in range(0, len(self.dataset), 3):
codon = self.dataset[iterator:iterator+3]
if self.TRANSLATION_TABLE[codon] == "Stop":
return protein_seq
protein_seq.append(self.TRANSLATION_TABLE[codon])
return protein_seq
def render_response(self, response):
""" Method to render stylized response text to user. """
return "Transcribed RNA sequence: {}\n\nTranslated protein chain: {}".format(self.dataset, "-".join(response))
``` |
{
"source": "aakashsur/docker-hirise",
"score": 3
} |
#### File: docker-hirise/scripts/add_short_contigs.py
```python
import argparse
def main(broken=None, scaffolds=None, length=1000, output=None):
"""Takes as input a broken.fa file and a HiRise scaffold file and
outputs the scaffolds with the short contigs appended to the end."""
# Open output and write HiRise scaffolds
with open(output, 'w') as out_handle:
with open(scaffolds) as scaff_handle:
for line in scaff_handle:
out_handle.write(line)
# Writes small contigs to output
for header, seq, string_seq in get_seqs(broken):
if len(seq) < length:
out_handle.write(header)
out_handle.write(string_seq)
out_handle.write("\n")
def get_seqs(fasta):
"""Takes as input a fasta file and yields the header line,
the sequence stripped on newlines, and the sequence with new
lines as they were in the fasta file."""
with open(fasta) as fasta_handle:
header = None
for line in fasta_handle:
if line.startswith(">"):
if header:
yield header, ''.join(seq), '\n'.join(seq)
seq = []
header = line
else:
seq.append(line.rstrip())
# Yield final sequence
yield header, ''.join(seq), '\n'.join(seq)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--broken", help="The broken.fa file.")
parser.add_argument("-s", "--scaffolds", help="The HiRise scaffold fasta file.", default="/dev/stdin")
parser.add_argument("-l", "--length", help="The maximum length of contigs to include", default=1000)
parser.add_argument("-o", "--output", help="Output file", default="/dev/stdout")
parser.add_argument("-d", "--debug", help="Run pdb", default=False, action="store_true")
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
if args.debug:
import pdb
pdb.set_trace()
main(broken=args.broken, scaffolds=args.scaffolds,
length=args.length, output=args.output)
```
#### File: docker-hirise/scripts/chicago_edge_scores.py
```python
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import math
import sys
"""Chicago likelihood model calculations as described in ... """
debug=False
def fragment_size(l1,l2,o1,o2,coords,g=0):
"""o1 and o2 indicate the orientations of the two contigs being used 0 means 5' to 3'; 1 means flipped"""
x,y = coords[0],coords[1]
if (o1,o2) == (0,0): # -------1-----> ----2------>
return (y+l1-x+g)
if (o1,o2) == (0,1): # -------1-----> <---2-------
return (l2-y+l1-x+g)
if (o1,o2) == (1,0): # <-------1----- ----2------>
return (x+y+g)
if (o1,o2) == (1,1): # <-------1-----> <----2------
return (l2-y+x+g)
class ChicagoModel(object):
def __init__(self,params):
self.alpha = params.get('alpha',[1.0])
self.beta = params.get('beta',[1.0])
self.pn = params.get('pn',0.5)
self.G = params.get('G',3000000000)
self.N = params.get('N',100000000)
self.nexps = len(self.alpha)
def set_params(self,params):
self.alpha = params.get('alpha')
self.beta = params.get('beta' )
self.pn = params.get('pn' )
self.G = params.get('G' )
self.N = params.get('N' )
self.nexps = len(self.alpha)
self.lnFinf = math.log(self.pn)-math.log(self.G)
self.logN = math.log(self.N)
self.logG = math.log(self.G)
def __repr__(self):
return(str({ 'pn': self.pn, 'G':self.G, 'N':self.N, 'a':self.alpha, 'b':self.beta, 'n':self.nexps }))
### These are the things that depend on the model:
#
def f0(self,x):
"""P_{raw}(x)= \sum \alpha_i \beta_i e^{-x \beta_i} """
return sum( [ self.alpha[i]*self.beta[i]*math.exp(-x*self.beta[i]) for i in range(self.nexps) ] )
def f0_prime(self,x):
alpha=self.alpha
beta=self.beta
"""P_{raw}(x)= \sum \alpha_i \beta_i e^{-x \beta_i} """
return ( sum([ -alpha[i]*beta[i]*beta[i]*math.exp(-beta[i]*x) for i in range(self.nexps) ]) )
def f0_double_prime(self,x):
alpha=self.alpha
beta=self.beta
"""P_{raw}(x)= \sum \alpha_i \beta_i e^{-x \beta_i} """
return ( sum([ alpha[i]*(beta[i]**3.0)*math.exp(-beta[i]*x) for i in range(self.nexps) ]) )
def F0(self,d):
# \int f(x) dx
alpha=self.alpha
beta =self.beta
if d<0:
print("WTF? d<0:",d,file=sys.stderr)
raise Exception
return self.pn*d/self.G + (1.0-self.pn) * sum( [ - alpha[i]*(math.exp(-beta[i]*d)) for i in range(self.nexps)] )
def H0(self,d):
# \int b x f(x) dx = \int b x e^{- b x} dx = -e^{- b x} \frac{bx+1}{b}
alpha=self.alpha
beta =self.beta
return 0.5*d*d*self.pn/self.G + (1.0-self.pn)* sum([ -(old_div(alpha[i],beta[i]))*(math.exp(-beta[i]*d)*(beta[i]*d + 1.0)) for i in range(self.nexps) ])
def H0_good(self,d):
# \int b x f(x) dx = \int b x e^{- b x} dx = -e^{- b x} \frac{bx+1}{b}
alpha=self.alpha
beta =self.beta
return (1.0-self.pn)* sum([ -(old_div(alpha[i],beta[i]))*(math.exp(-beta[i]*d)*(beta[i]*d + 1.0)) for i in range(self.nexps) ])
#
###
def F(self,d):
return self.F0(d)-self.F0(0)
def H(self,d):
return self.H0(d)-self.H0(0)
def f(self,x,cache={}): #cache this
if x in cache: return cache[x]
r= old_div(self.pn,self.G) + (1.0-self.pn) * self.f0(x)
cache[x]=r
return r
def f_prime(self,x):
return (1-self.pn)*self.f0_prime(x)
def f_double_prime(self,x):
return (1-self.pn)*self.f0_double_prime(x)
### These are for backwards compatability with the old implementation
#
def p_insert_raw(self,x):
return self.f0(x)
def p_insert_effective(self,x):
""" p_n/G + (1-p_n) P_{raw}(x) """
return self.f(x)
def ddg_p(self,x):
return self.f0_prime(x)
def d2dg2_p(self,x):
return self.f0_double_prime(x)
def ll(self,l1,l2,o1,o2,links,g,p0=-1):
return self.lnL(l1,l2,o1,o2,g,links)
def p0(self,l1,l2,g):
return 1.0-(old_div(self.n_bar(l1,l2,g),self.N))
#
###
def omega(self,l1a,l2a,d):
l1=min(l1a,l2a)
l2=max(l1a,l2a)
if d<0: return 1
elif d<l1: return d+1
elif d<l2: return l1+1
elif d<=l1+l2: return l1+l2-d+1
else: return 0
def osigma(self,l1a,l2a,d):
l1=min(l1a,l2a)
l2=max(l1a,l2a)
if d<0: return 0
elif d<l1: return 1.0
elif d<l2: return 0
elif d<l1+l2: return -1.0
else: return 0
def lnF(self,x,cache={}): # Cache this
if x in cache: return cache[x]
r=math.log( self.f(x) )
cache[x]=r
return r #math.log( self.f(x) )
def T(self,d,cache={}): # Cache this
if d in cache: return cache[d]
#\sum_{i=0}^{d} (d-i) f(i) \approx
x= d*self.F(d) - self.H(d)
cache[d]=x
return x
def p(self,l1,l2,g):
if l1<0: raise Exception
if l2<0: raise Exception
if (l1+l2+g)<0: sys.stderr.write("wtf: l1+l2+g < 0 ; {}+{}+{} < 0\n".format(l1,l2,g))
if (l1+g)<0: sys.stderr.write("wtf: l1+g < 0 ; {}+{} < 0\n".format(l1,g))
if (l2+g)<0: sys.stderr.write("wtf: l2+g < 0 ; {}+{} < 0\n".format(l2,g))
if (g)<0: sys.stderr.write("wtf: g < 0 ; {} < 0\n".format(g))
p = old_div((self.T(l1+l2+g)+self.T(g)-self.T(l1+g)-self.T(l2+g)),self.G)
return p
def p_prime(self,l1,l2,g):
if l1<0: raise Exception
if l2<0: raise Exception
p = old_div((self.F(l1+l2+g)+self.F(g)-self.F(l1+g)-self.F(l2+g)),self.G)
# p = ((l1+l2+g)*self.f(l1+l2+g)+g*self.f(g)-(l1+g)*self.f(l1+g)-(l2+g)*self.f(l2+g))/self.G
return p
def p_double_prime(self,l1,l2,g):
if l1<0: raise Exception
if l2<0: raise Exception
ss = self.f(l1+l2+g)+self.f(g)-self.f(l1+g)-self.f(l2+g)
# ss +=(l1+l2+g)*self.f_prime(l1+l2+g)+g*self.f_prime(g)-(l1+g)*self.f_prime(l1+g)-(l2+g)*self.f_prime(l2+g)
return old_div(ss,self.G)
# def Q(self,l1,l2,g,x):
# return (self.omega(l1,l2,x-g)*self.f_prime(x) + self.osigma(l1,l2,x-g)*self.f(x) )/( self.omega(l1,l2,x-g)*self.f(x) )
def R(self,l1,l2,g,x):
# P''/P - (P'/P)^2
return ( old_div(self.f_double_prime(x),self.f(x)) - (self.Q(x))**2.0 )
def n_bar0(self,l1l2):
#return self.N*self.pn*l1l2*2/(self.G*self.G)
# return self.N*self.pn*l1l2*2.0/(self.G*self.G)
return self.N*self.pn*l1l2/(self.G*self.G)
def n_bar(self,l1,l2,g):
p = self.p(l1,l2,g)
return self.N*p
def cutScore(self,l1,x,n,sumLogP,rangeCutoff=False,minSep=0):
la=x-minSep/2
lb=l1-x-minSep/2
if rangeCutoff and la>rangeCutoff: la=rangeCutoff
if rangeCutoff and lb>rangeCutoff: lb=rangeCutoff
if la <=0: return 0.0
if lb <=0: return 0.0
try:
n_bar = self.n_bar(la,lb,minSep)
except Exception as e:
print("Caught an exception when trying to compute n_bar, where la={}, lb={} and gap={}, based on l1={}, x={}, minSep={}".format(la,lb,minSep,l1,x,minSep))
raise e
n_bar0= self.n_bar0(la*lb)
sumLogP0 = n*(math.log(self.pn) - self.logG)
return -n_bar + sumLogP - ( -n_bar0 + sumLogP0)
def tileScore(self,binwidth,tile,n,sumLogP,rangeCutoff=False,minSep=0):
la=binwidth
lb=binwidth
# if rangeCutoff and la>rangeCutoff: la=rangeCutoff
# if rangeCutoff and lb>rangeCutoff: lb=rangeCutoff
if tile[0]==tile[1]:
n_bar = self.N*(binwidth*(self.F(binwidth)-self.F(minSep))-(self.H(binwidth)-self.H(minSep)))/self.G
n_bar0= self.N*self.pn*(((binwidth-minSep)/self.G)**2.0)
# sumLogP0 = n*(math.log(self.pn) - self.logG)
sumLogP0 = n*self.lnFinf
return -n_bar + sumLogP - ( -n_bar0 + sumLogP0)
else:
n_bar = self.n_bar(la,lb,(tile[1]-tile[0]-1)*binwidth)
n_bar0= self.N*self.pn*la*lb*2/(self.G*self.G)
# sumLogP0 = n*(math.log(self.pn) - self.logG)
# self.lnFinf = math.log(self.pn)-math.log(self.G)
sumLogP0 = n*self.lnFinf
return -n_bar + sumLogP - ( -n_bar0 + sumLogP0)
def lnL(self,l1,l2,o1,o2,g,links):
n=len(links)
n_bar = self.n_bar(l1,l2,g)
try:
r= n*self.logN - n_bar - n*self.logG + sum( [ math.log(self.omega(l1,l2,fragment_size(l1,l2,o1,o2,links[i],0))) + self.lnF( fragment_size(l1,l2,o1,o2,links[i],g) ) for i in range(n) ] )
except Exception as e:
print(e)
print("l1,l2",l1,l2)
print("o1,o2",o1,o2)
print("links:",links)
print("fragment sizes g=0:", [fragment_size(l1,l2,o1,o2,links[i],0) for i in range(n)])
print("fragment sizes g=g:", [fragment_size(l1,l2,o1,o2,links[i],g) for i in range(n)])
print("omega=",[self.omega(l1,l2,fragment_size(l1,l2,o1,o2,links[i],0)) for i in range(n)])
print("lnF=",[ self.lnF( fragment_size(l1,l2,o1,o2,links[i],g)) for i in range(n)])
raise e
return r
def lnL0(self,l1,l2,o1,o2,links):
n=len(links)
n_bar = self.n_bar0(l1*l2)
# n_bar = self.N*self.pn*l2*l1/(self.G**2)
# return n*math.log(n_bar) - n_bar - math.log(math.factorial(n))
ll1= n*self.logN - n_bar - n*self.logG + sum( [ math.log(self.omega(l1,l2,fragment_size(l1,l2,o1,o2,links[i],0))) + self.lnFinf for i in range(n) ] )
return ll1
# ll2=ll1
# o2+=1
# o2=o2%2
# ll2= n*self.logN - n_bar - n*self.logG + sum( [ math.log(self.omega(l1,l2,fragment_size(l1,l2,o1,o2,links[i],0))) + self.lnFinf for i in range(n) ] )
# return max(ll1,ll2)
def score(self,l1,l2,o1,o2,links,g,p0=0):
n=len(links)
#return self.lnL(l1,l2,o1,o2,g,links) - self.lnL0(l1,l2,o1,o2,links)
# alternatively:
return( - self.n_bar(l1,l2,g) + self.n_bar0(l1*l2) - n * self.lnFinf + sum( [ ( self.lnF( fragment_size(l1,l2,o1,o2,links[i],g) ) ) for i in range(n) ] ) )
def Q(self,x):
# return (self.omega(l1,l2,x-g)*self.f_prime(x) + self.osigma(l1,l2,x-g)*self.f(x) )/( self.omega(l1,l2,x-g)*self.f(x) )
return (old_div(self.f_prime(x),self.f(x)))
def ddg_lnL( self,l1,l2,o1,o2,links,g):
n=len(links)
try:
r = -self.N * self.p_prime(l1,l2,g) + sum([ self.Q(fragment_size(l1,l2,o1,o2,links[i],g) ) for i in range(n)])
except Exception as e:
print(e)
print("l1,l2,g",l1,l2,g)
print("n_bar:",self.n_bar(l1,l2,g))
print("p:",self.p(l1,l2,g))
print("p_prime:",self.p_prime(l1,l2,g))
raise e
return r
# return ( -self.n_bar(l1,l2,g) * self.p_prime(l1,l2,g) / self.p(l1,l2,g) + sum([ self.Q(l1,l2,g,fragment_size(l1,l2,o1,o2,links[i],g) ) for i in range(n)]))
def d2dg2_lnL(self,l1,l2,o1,o2,links,g):
n=len(links)
return ( -self.N * self.p_double_prime(l1,l2,g) + sum([ self.R(l1,l2,g,fragment_size(l1,l2,o1,o2,links[i],g) ) for i in range(n) ]) )
def d2dg2_llr(self, l1,l2,o1,o2,links,g,seen={}):
if not "warned" in seen:
sys.stderr.write("using deprecated interface to likelihood model code.\n")
seen["warned"]=1
return self.d2dg2_lnL(l1,l2,o1,o2,links,g)
def ddg_llr(self, l1,l2,o1,o2,links,g,seen={}):
if not "warned" in seen:
sys.stderr.write("using deprecated interface to likelihood model code.\n")
seen["warned"]=1
return self.ddg_lnL(l1,l2,o1,o2,links,g)
def ml_gap(self,l1,l2,o1,o2,links,g0):
gap=g0
G=self.G
N=self.N
pn=self.pn
last_gap=g0
for i in range(100):
#p0 = self.p0( l1,l2,gap)
x1= self.ddg_lnL( l1,l2,o1,o2,links,gap) #1st derivative of the likelihood wrt. gap size
x2= self.d2dg2_lnL(l1,l2,o1,o2,links,gap) #2nd derivative of the likelihood wrt. gap size
score=self.score(l1,l2,o1,o2,links,gap)
# if debug: print "\t".join(map(str,[ "#it",i,o1,o2,gap,score,gap-x1/2,x1,x2,x1/x2]))
if x2==0:
break
print("hit x2==0 after",i)
gap = int( gap - old_div(x1,x2) )
if gap<0.0:
gap=10.0
if gap>200000.0:
gap=200000
if abs(old_div(x1,x2))<0.1: break
if abs(gap-last_gap)<1.0: break
last_gap=gap
score=self.score(l1,l2,o1,o2,links,gap) #l1,l2,o1,o2,G,pn,links,N,gap,p0)
if gap<0.0:
print("wtf? negative gap")
raise Exception
return gap,score
model=ChicagoModel({})
def insert_size_dist(x):
return model.p_insert_raw(x)
def set_exp_insert_size_dist_fit_params(fit_params):
model.set_params(fit_params)
#print("#",model)
def p_not_a_hit(l1,l2,GenomeSize,gaplen,pn):
return model.p0(l1,l2,gaplen)
def llr_v0(l1,l2,o1,o2,GenomeSize,pn,links,N,gaplen,p0 ):
return model.score(l1,l2,o1,o2,links,gaplen,p0)
def ll(l1,l2,o1,o2,GenomeSize,pn,links,N,gaplen,p0=-1):
return model.ll(l1,l2,o1,o2,links,gaplen,p0)
def ml_gap(l1,l2,o1,o2,G,pn,links,N,g0):
#print "#",links
return model.ml_gap(l1,l2,o1,o2,links,g0)
# def ml_gap(self, l1,l2,o1,o2,links,g0):
if __name__=="__main__":
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-M','--set_insert_size_dist_fit_params',default=False )
parser.add_argument('-d','--debug',default=False )
args = parser.parse_args()
debug=args.debug
fmodel=open( args.set_insert_size_dist_fit_params )
contents = fmodel.read()
try:
fit_params=eval(contents)
except:
"couldn't deal with option", args.param
fmodel.close
set_exp_insert_size_dist_fit_params(fit_params)
for x in range(1,200000,100):
print(x,model.f(x),model.f_prime(x),model.S(x),model.F(x),model.H(x))
```
#### File: docker-hirise/scripts/chicago_support_bootstrap.py
```python
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
import sys
import argparse
from mapperlite import MapperLite
import struct
import hashlib
import pysam
import chicago_edge_scores as ces
#import BamTags
from bamtags import BamTags
from chicago_edge_links2 import read_mask_ranges, mask_test
from time import gmtime, strftime
debug=False
def pairs_overlap(x,y):
a=min(x[0],x[1])
b=max(x[0],x[1])
c=min(y[0],y[1])
d=max(y[0],y[1])
if a<=c and c<=b: return True
if a<=d and d<=b: return True
if c<=a and a<=d: return True
if c<=b and b<=d: return True
return False
def segments_intersect(x,y):
if not pairs_overlap(x,y):
raise Exception
a=min(x[0],x[1])
b=max(x[0],x[1])
c=min(y[0],y[1])
d=max(y[0],y[1])
if a<=c and c<=b:
p,q = (c,min(b,d))
if p>q:
print("wtf1",x,y,(p,q))
if a<=d and d<=b:
p,q = (max(a,c),d)
if p>q:
print("wtf2",x,y,(p,q))
if c<=a and a<=d:
p,q = (a,min(b,d))
#wtf3 (2608, 2741) (1500, 3000) (2608, 1500)
if p>q:
print("wtf3",x,y,(p,q))
if c<=b and b<=d:
p,q = (max(a,c),b)
if p>q:
print("wtf4",x,y,(p,q))
#wtf (1694, 3362) (1500, 3000) (1694, 1500)
if p>q:
print("wtf",x,y,(p,q))
raise Exception
return (p,q)
tmsd_debug=False
def tile_mask_score_delta(i,j,binwidth,masked_segments,model,mask_iter_i,mask_iter_j,debug):
if i==j: return(0)
gap=(j-i)*binwidth
left_tile_masked_segments = []
right_tile_masked_segments = []
mii0,mij0=mask_iter_i,mask_iter_j
while mask_iter_i<len(masked_segments) and masked_segments[mask_iter_i][0]<(i+1)*binwidth:
if tmsd_debug: print("tmsd:\tI",mask_iter_i,masked_segments[mask_iter_i],(i*binwidth,(i+1)*binwidth),file=sys.stderr,sep="\t")
if pairs_overlap( masked_segments[mask_iter_i],(i*binwidth,(i+1)*binwidth)) :
a,b = segments_intersect(masked_segments[mask_iter_i],(i*binwidth,(i+1)*binwidth))
if b-a < 0:
print("tmsd:\tI",a,b,mask_iter_i,masked_segments[mask_iter_i],(i*binwidth,(i+1)*binwidth),file=sys.stderr,sep="\t")
raise Exception
left_tile_masked_segments.append( (a,b) )
mask_iter_i+=1
while mask_iter_j<len(masked_segments) and masked_segments[mask_iter_j][0]<(j+1)*binwidth:
if tmsd_debug: print("tmsd:\tJ",mask_iter_j,masked_segments[mask_iter_j],(j*binwidth,(j+1)*binwidth),file=sys.stderr,sep="\t")
if pairs_overlap( masked_segments[mask_iter_j],(j*binwidth,(j+1)*binwidth)) :
a,b = segments_intersect(masked_segments[mask_iter_j],(j*binwidth,(j+1)*binwidth))
if b-a < 0:
print("tmsd:\tJ",a,b,mask_iter_j,masked_segments[mask_iter_j],(j*binwidth,(j+1)*binwidth),file=sys.stderr,sep="\t")
raise Exception
right_tile_masked_segments.append( (a,b) )
mask_iter_j+=1
score_delta = 0.0
for a,b in right_tile_masked_segments:
score_delta += model.n_bar( binwidth, b-a, gap+a-j*binwidth ) - model.n_bar0( binwidth*(b-a) )
for a,b in left_tile_masked_segments:
score_delta += model.n_bar( binwidth, b-a, gap+(i+1)*binwidth-b ) - model.n_bar0( binwidth*(b-a) )
for a,b in left_tile_masked_segments:
for c,d in right_tile_masked_segments:
if tmsd_debug: print("mask pair:",a,b,c,d,b-a,d-c,c-b,file=sys.stderr,sep="\t")
score_delta -= model.n_bar( b-a, d-c, c-b ) - model.n_bar0( (b-a)*(d-c) )
if tmsd_debug:
print("tmsd:",(i*binwidth,(i+1)*binwidth),(j*binwidth,(j+1)*binwidth),score_delta,left_tile_masked_segments,right_tile_masked_segments,(mii0,mij0),masked_segments,file=sys.stderr,sep="\t")
return(score_delta)
def pair2bin2D(pair,binwidth):
a,b=pair
return( int(a/binwidth),int(b/binwidth) )
def make_scaffold_mask(scaffold,mapper,mask):
s_mask = {}
segments={}
slen = mapper.scaffold_length[scaffold]
for seg in mapper.scaffold_contigs[scaffold]:
# print("x:",seg)
ocontig=mapper.contig_ocontig[seg]
for m in mask.get(ocontig,[]):
oscaffold ,z1a,z2a,strand,c1 = mapper.mapCoord(ocontig,m[0],m[1])
if not scaffold==oscaffold: continue
if not z1a: continue
if strand==1:
# segments.append( (z1a,min(slen,z2a)) )
segments[ (z1a,min(slen,z2a)) ] =1
else:
segments[ (max(0,z2a),z1a) ] = 1
# segments.append( (max(0,z2a),z1a) )
# print(ocontig,seg,"\t\t",m,mapper.mapCoord(ocontig,m[0],m[1]))
# for a,b in segments:
# print("\t",a,b,b-a,sep="\t")
segments=list(segments.keys())
segments.sort()
return(segments)
def chicago_pairs(sca,mapper,bamlist,minq=20,mask={}):
for seg in mapper.scaffold_contigs[sca]:
ref="_".join(seg.split("_")[:-1])
for b in bamlist:
for aln in b.fetch(until_eof=True,reference=ref):
if not aln.is_read1: continue
if aln.is_duplicate: continue
if aln.mapq < minq : continue
if BamTags.mate_mapq(aln) < minq : continue
# print("#x",ref,mask.get(ref,[]))
if mask_test(ref,aln.pos,mask) or mask_test(b.getrname(aln.rnext),aln.pnext,mask) : continue
contig = b.getrname(aln.tid) # snam[aln.tid]
ncontig= b.getrname(aln.rnext) if aln.rnext>=0 else -1
scaffold ,z1a,z2a,z3a,c1 = mapper.mapCoord( contig, aln.pos, aln.pos+1 )
nscaffold,z2p,x2p,z3p,c2 = mapper.mapCoord(ncontig, aln.pnext,aln.pnext+1 )
if debug: print(("#x",contig,ncontig,aln.pos,aln.pnext,scaffold,nscaffold,sca,z1a,z2p,ref,mapper.ocontig_contigs.get(ref,[])))
if scaffold==nscaffold and sca==scaffold:
#yield( sc,seg,contig,ncontig,scaffold,z1a,z2a,z3a,nscaffold,z2p,x2p,z3p )
yield( sca,z1a,z2p,c1,c2,seg,contig,ncontig,scaffold,z1a,z2a,z3a,nscaffold,z2p,x2p,z3p,aln.query_name )
hist_sample_interval = 1500
hist_bisize = 1.0
scaffold_end_filter=10000
# Fine grained means for every read
def fine_grained_support(edges,nb,scaffold,pairs,model,buff,debug,t1,t2,gpf,minx,maxx,joins,logfile,binwidth,slen,mask=[],masked_segment_pairs_n_minus_n0=[],raw_hist=False,support_curve=False):
# print("zz:",scaffold,sum([b-a for a,b in mask]),[ (a,b,b-a) for a,b in mask])
curve=[]
if gpf: gpf.write("\n\n\n")
# if True: print("fine",len(edges),debug)
if len(edges)==0: return([],[])
if edges[-1][0]<0 or edges[-1][0] > slen+1:
print("unexpected coordinate {} not in range 0-{}".format(edges[-1][0],slen))
raise Exception
edges.sort()
nb.sort()
rs=0 # running sum for individual reads
n=0
tripped=False # have we reached the threshold where we want to start breaking (by construction support low on edges).
last=0
state=0
stretch_start=0
low_point=0.0
ji=0
gap_scores={}
gap_coverages={}
# slen = mapper.scaffold_length[scaffold]
break_buffer = []
last_trip=0
a=0
b=0
c=0
f=0 # index in masked segment pairs
mask_correction_rs=0.0
next_sample = hist_sample_interval /2
for i in range(len(edges)):
rs+=edges[i][1]
n+=nb[i][1] # number of pairs spanning
x=edges[i][0] # position
while f<len(masked_segment_pairs_n_minus_n0) and masked_segment_pairs_n_minus_n0[f][0]<x:
mask_correction_rs -= masked_segment_pairs_n_minus_n0[f][1]
f +=1
#update iterators that keep track of which masked regions are "in range": a-b are in range and trailing x, b-c are in range and upcoming.
while a<len(mask) and mask[a][1]<x-maxx: a+=1
while b<len(mask) and mask[b][0]<x: b+=1
while c<len(mask) and mask[c][0]<x+maxx: c+=1
# -----------------------------------------
# a bx c
mask_correction = 0.0
# rmi = right mask index
for rmi in range(b,min(c+1,len(mask))):
ma,mb = mask[rmi]
if x>ma: continue
left_limit = max(0,ma-maxx)
if (x-left_limit)<0: continue
mask_correction_delta = model.n_bar( mb-ma, x-left_limit, ma-x ) - model.n_bar0( (mb-ma)*(x-left_limit ) )
mask_correction += mask_correction_delta
if debug: print(x,ma-x,a,b,c,left_limit,"Right",mask_correction_delta,sep="\t")
# left mask index
for lmi in range(a,b):
ma,mb = mask[lmi]
if x<mb:
if ma<x and x<mb:
right_limit = min(slen,mb+maxx)
#left_limit = min(slen,mb-maxx)
left_limit = max(0,ma-maxx)
mask_correction_delta1 = model.n_bar( mb-x, x-left_limit, 0 ) - model.n_bar0( (mb-x)*(x-left_limit) )
try:
mask_correction_delta2 = model.n_bar( x-ma, right_limit-x, mb-x ) - model.n_bar0( (x-ma)*(right_limit-x) )
except Exception as e:
#wtf: scaffold: 7396, x: 1006292, ma: 686903, mb: 1006300, rl: 886903, ll: 486903
print("wtf: scaffold: {scaffold}, x: {x}, ma: {ma}, mb: {mb}, rl: {right_limit}, ll: {left_limit}".format(scaffold=scaffold,x=x,ma=ma,right_limit=right_limit,mb=mb,left_limit=left_limit))
raise e
mask_correction += mask_correction_delta1 + mask_correction_delta2
if debug: print(x,x-mb,a,b,c,left_limit,right_limit,"Spanned",mask_correction_delta1,mask_correction_delta2,sep="\t")
continue
right_limit = min(slen,mb+maxx)
if right_limit - x < 0: continue
mask_correction_delta = model.n_bar( mb-ma, right_limit-x, x-mb ) - model.n_bar0( (mb-ma)*(right_limit-x ) )
mask_correction += mask_correction_delta
if debug: print(x,x-mb,a,b,c,right_limit,"Left",mask_correction_delta,sep="\t")
# take our running sum score and see if we should cut here, fine grained not clipping
try:
score=model.cutScore(slen,x,n,rs,rangeCutoff=maxx,minSep=minx)+mask_correction+mask_correction_rs
score+=mask_correction+mask_correction_rs
if debug: print("finescore:",scaffold,x,a,b,c,len(mask),score,mask_correction ,mask_correction_rs,score+mask_correction+mask_correction_rs,sep="\t")
except Exception as e:
print("Exception computing cutScore for scaffold {} at {}. i={}".format(scaffold,x,i),edges[:10])
#Exception computing cutScore for scaffold 8351 at 12290. i=2 [(11306, -8.997670875606678, 'a', 'a'), (11686, -8.578118407318865, 'a', 'a'), (12290, 8.578118407318865, 'a', 'b'), (12297, 8.997670875606678, 'a', 'b')]
# print(i,edges[i],nb[i],rs,n,x,scaffold,slen,len(edges))
raise e
# print("#xxxxx",x,next_sample,raw_hist)
# print(support_curve)
if support_curve:
curve.append((x,score))
# print("##yield")
# yield (x,score)
if (not raw_hist==False) and x>next_sample:
if min(x,slen-x)>scaffold_end_filter:
next_sample += hist_sample_interval
hist_bin=int(score/hist_bisize)*hist_bisize
raw_hist[ hist_bin ] = raw_hist.get(hist_bin,0)+1
# print("#hist bin",hist_bin,score,raw_hist[ hist_bin ])
# sys.stdout.flush()
# Obsolete? each gap between contigs?
while ji<len(joins) and x>joins[ji][0]:
gap_scores[ji] =score
gap_coverages[ji] =n
ji+=1
if score>t1:
tripped=True
last_trip=x
if tripped and score<t2 and state==0:
stretch_start=x
state=1
low_point =score
low_x = x
# Reached beginning of region for candidate break (state = 0)
if state==1 and score>t2:
if logfile:
break_buffer.append( (scaffold,stretch_start,x,low_x,low_point,slen) )
# print(scaffold,stretch_start,x,slen,low_point,"break")
state=0
if state==1:
if score<low_point:
low_point=score
low_x=x
if debug: print("dd:",scaffold,edges[i][0],score,rs,state,x,stretch_start,score,n,edges[i][2])
if gpf: gpf.write("{}\t{}\t{}\t{}\n".format(edges[i][0],score,rs,n))
last=edges[i][0]
segments = []
for scaffold,stretch_start,x,low_x,low_point,slen in break_buffer:
if x < last_trip:
logfile.write("{} {} {} {} {} {} rawLLR\n".format(scaffold,stretch_start,x,low_x,low_point,slen))
segments.append((scaffold,stretch_start,x,low_x,low_point))
return segments,curve
# for ji in range(len(joins)):
# print("\t".join(map(str,["gapscore:",ji]+list(joins[ji])+[gap_scores.get(ji,0),gap_coverages.get(ji,0)])))
def pairs2support(scaffold,
pairs, # internal to scaffold
model,
slen=0,
masked_segments=[],
mapper=None,
buff=0,
debug=False,
t1=20.0,
t2=5.0,
gpf=False, # gnu plot file handle?
minx=1000,
maxx=1e7,
joins=[],
logfile=False,
binwidth=1000,
nreject=2, # how many rows or colums to toss out
raw_hist=False,
clipped_hist=False,
support_curve=False):
# slen=mapper.scaffold_length[scaffold]
# if debug: print("#masked:",scaffold,len(masked_segments),masked_segments[:10])
# print("#",scaffold,masked_segments,file=logfile,sep="\t")
logfile.flush()
edges=[] # pairs
nb=[] # buffer to keep track of how many pairs cover
tile_scores={}
tile_counts={}
tile_reads={}
maxtile=0
# masked_segments=[]
for p in pairs:
if len(p)>=16: # old style, mostly ignore the additional info now
if p[1]<p[2]:
a,b,c1,c2,w,z=p[1],p[2],p[3],p[4],p[6],p[7]
else:
a,b,c1,c2,w,z=p[2],p[1],p[4],p[3],p[7],p[6]
# masked_segments = p[17]
tid=p[16]
else:
# a,b = p[0],p[1]
# Order the coordinates
a=min( p[0],p[1])
b=max( p[0],p[1])
c1,c2="a","a"
tid="x"
if a>slen or b> slen:
print("how could a read be at x > slen?",a,b,slen)
raise Exception
# Throw away really far and short innies
if abs(b-a) > maxx: continue
if abs(b-a) < minx: continue
# insert size log liklihood
ll=model.lnF(b-a)
if debug: print("pt:",scaffold,a,b,b-a,tid,ll-model.lnFinf)
#For the old-style exact LLR score, size of "buff" is taken off
# to be conservative about support
edges.append( tuple([a+buff , ll,c1,"a"]) )
edges.append( tuple([b-buff +1 , -ll,c2,"b"]) )
nb.append( tuple([a+buff , 1]) )
nb.append( tuple([b-buff +1 ,-1]) )
#For the new-style clipped LLR score add to appropriate tile
tile = pair2bin2D((a,b),binwidth) # tile is a tuple rows, colums
maxtile = max(maxtile,tile[0],tile[1]) # furthest tiles seen for size?
tile_scores[tile] = tile_scores.get(tile,0.0)+ll
tile_counts[tile] = tile_counts.get(tile,0)+1
# for debuggin? should we remove when not debugging?
tile_reads[tile] = tile_reads.get(tile,[]) + [tid]
masked_segment_pairs_n_minus_n0 = []
for i in range(len(masked_segments)):
a,b = masked_segments[i]
for j in range(i+1,len(masked_segments)):
c,d = masked_segments[j] # pair of masked segments: a---b c-----d
if c-b > maxx: continue # gap = c-b
if b-a<0:
print("#wtf?",scaffold,(a,b),(c,d),i,j,file=logfile,sep="\t")
logfile.flush()
raise Exception
if d-c<0:
print("#wtf?",scaffold,(a,b),(c,d),i,j,file=logfile,sep="\t")
logfile.flush()
raise Exception
if c-b<0:
print("#wtf?",scaffold,(a,b),(c,d),i,j,file=logfile,sep="\t")
logfile.flush()
raise Exception
n_bar = ces.model.n_bar(b-a,d-c,c-b)
n_bar0= ces.model.n_bar0((b-a)*(d-c))
if debug: print("X",i,j,(a,b),(c,d),b-a,d-c,c-b,n_bar,n_bar0,sep="\t")
masked_segment_pairs_n_minus_n0.append( (b, (n_bar-n_bar0)) )
masked_segment_pairs_n_minus_n0.append( (c,-(n_bar-n_bar0)) )
masked_segment_pairs_n_minus_n0.sort()
fine_grain_segments,fine_grain_support_curve=fine_grained_support(edges,nb,scaffold,pairs,model,buff,debug,t1,t2,gpf,minx,maxx,joins,logfile,binwidth,slen,masked_segments,masked_segment_pairs_n_minus_n0=masked_segment_pairs_n_minus_n0,raw_hist=raw_hist,support_curve=support_curve)
if debug:
print("w:",scaffold,slen,len(fine_grain_segments),len(edges),sep="\t")
for a,b in masked_segments:
print(a,b)
if a>slen:
print("a>slen",a,slen)
raise Exception
if b>slen:
print("b>slen",b,slen)
raise Exception
# clipped LLR score:
tile_bumps=[]
# print("#maxtile=",maxtile,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
mask_iter_i=0
for i in range(maxtile+1):
# print("#i=",i,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
while mask_iter_i<len(masked_segments) and masked_segments[mask_iter_i][1]<i*binwidth: mask_iter_i+=1
j=i
mask_iter_j=mask_iter_i
while ((j-i)*binwidth<maxx) and j<=maxtile:
tile=(i,j)
# print("#tile=",(i,j),strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
tscore = tile_scores.get(tile,0.0)
score = model.tileScore(binwidth,tile,tile_counts.get(tile,0),tscore,rangeCutoff=maxx,minSep=minx)
# print("#score=",score,masked_segments,i,j,mask_iter_i,mask_iter_j,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
mask_score_delta = tile_mask_score_delta(i,j,binwidth,masked_segments,model,mask_iter_i,mask_iter_j,debug)
score+= mask_score_delta
# print("#mask_score_delta=",mask_score_delta,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
if debug:
print("tile:",scaffold,tile[0],tile[1],tile[0]*binwidth,tile[1]*binwidth,tscore,tile_counts.get(tile,0.0),score,mask_score_delta,sep="\t")
for read in tile_reads.get(tile,[]):
print("tileread:",tile,read)
if not i==j:
tile_bumps.append( (i*binwidth,i,j, score, 1) )
tile_bumps.append( (j*binwidth,i,j,-score,-1) )
j+=1
while mask_iter_j<len(masked_segments) and masked_segments[mask_iter_j][1]<j*binwidth: mask_iter_j+=1
print("#done making tile bumps. len(tile_bumps)=",len(tile_bumps),strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
logfile.flush()
if debug:
for tile in tile_scores.keys():
print("tile:",scaffold,tile[0],tile[1],tile[0]*binwidth,tile[1]*binwidth,tile_scores[tile],tile_counts[tile],model.tileScore(binwidth,tile,tile_counts[tile],tile_scores[tile],rangeCutoff=maxx,minSep=minx),sep="\t")
for read in tile_reads[tile]:
print("tileread:",tile,read)
tile_bumps.sort()
print("#done sorting tile bumps",strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
logfile.flush()
tripped=False
row_sums={}
col_sums={}
row_counts={}
col_counts={}
break_buffer = []
stretch_start=0
minx=0
state=0
low_point=0
ii=0
last_trip=0
while ii<len(tile_bumps):
x,i,j,scoreD,dn = tile_bumps[ii]
row_sums[i] = row_sums.get(i,0.0) + scoreD
col_sums[j] = col_sums.get(j,0.0) + scoreD
row_counts[i] = row_counts.get(i,0) + dn
col_counts[j] = col_counts.get(j,0) + dn
if dn==-1 and row_counts[i]==0: del row_sums[i]
if dn==-1 and col_counts[j]==0: del col_sums[j]
if ii%100000==0:
print("#progress: ii= {} / {}".format(ii,len(tile_bumps)),x,i,j,scoreD,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
logfile.flush()
while ii<len(tile_bumps)-1 and tile_bumps[ii+1][0]==x:
ii+=1
x,i,j,scoreD,dn = tile_bumps[ii]
row_sums[i] = row_sums.get(i,0.0) + scoreD
col_sums[j] = col_sums.get(j,0.0) + scoreD
row_counts[i] = row_counts.get(i,0) + dn
col_counts[j] = col_counts.get(j,0) + dn
if dn==-1 and row_counts[i]==0: del row_sums[i]
if dn==-1 and col_counts[j]==0: del col_sums[j]
total = sum(row_sums.values())
row_vals = list(row_sums.values())
col_vals = list(col_sums.values())
row_vals.sort()
col_vals.sort()
without_best_row = sum(row_vals[:-nreject]) #total - max(row_sums.values())
without_best_col = sum(col_vals[:-nreject]) #total - max(col_sums.values())
trimmed_total = min(without_best_row,without_best_col,total)
score=trimmed_total
if (not clipped_hist==False):
if min(x,slen-x)>scaffold_end_filter:
hist_bin=int(score/hist_bisize)*hist_bisize
clipped_hist[ hist_bin ] = clipped_hist.get(hist_bin,0)+1
if score>t1:
tripped=True
last_trip=x
if tripped and score<t2 and state==0:
stretch_start=x
state=1
low_point =score
minx = x
if debug: print("trimmed_support",x,trimmed_total,total,without_best_row,without_best_col,tripped,maxtile*binwidth,scaffold,tripped,score<t2,state,low_point,minx,stretch_start,last_trip)
if state==1 and score>t2:
if logfile:
break_buffer.append( (scaffold,stretch_start,x,minx,low_point,slen) )
# print(scaffold,stretch_start,x,slen,low_point,"break")
state=0
if state==1:
if score<low_point:
low_point=score
minx=x
ii+=1
print("#done building breaks buffer",strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
logfile.flush()
if state==1:
if logfile:
break_buffer.append( (scaffold,stretch_start,x,minx,low_point,slen) )
for scaffold,stretch_start,x,minx,low_point,slen in break_buffer:
if (x < last_trip) or (minx < last_trip):
min_fine_graph_segment_overlap_score=False
for fg_scaffold,fg_stretch_start,fg_x,fg_minx,fg_low_point in fine_grain_segments:
if pairs_overlap((fg_stretch_start,fg_x),(stretch_start,x)):
if (not min_fine_graph_segment_overlap_score) or (min_fine_graph_segment_overlap_score > fg_low_point):
min_fine_graph_segment_overlap_score = fg_low_point
logfile.write("{} {} {} {} {} {} {} clippedLLR\n".format(scaffold,stretch_start,x,minx,low_point,slen,min_fine_graph_segment_overlap_score))
return fine_grain_support_curve
if __name__=="__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d','--debug',default=False,action="store_true",help="Turn on debugging ouput")
parser.add_argument('-p','--progress',default=False,action="store_true",help="Print progress info")
parser.add_argument('-l','--layout',default=False,help="File containing scaffolding layout.")
parser.add_argument('-s','--segments',default=False,help="File containing scaffolding segments.")
# parser.add_argument('-L','--length',default=False,help="File containing lenghts.")
parser.add_argument('-m','--mask',default=False,help="File containing segments to mask.")
parser.add_argument('-g','--plotfiles',default=False,help="Plot file name prefix.")
parser.add_argument('-f','--logfile',default=False,help="Output file for storing score segments.")
parser.add_argument('-q','--mapq',type=int,default=55)
parser.add_argument('-t','--t1',type=float,default=50.0,help="Don't break in the trailing regions at the end of scaffolds where support never exceeds this number.")
parser.add_argument('-T','--t2',type=float,default=25.0,help="Report all segments where support dips below this threshold, if they are not in the trailing ends.")
parser.add_argument('-w','--binwidth',type=float,default=3000.0)
parser.add_argument('-n','--nreject',type=int,default=2)
parser.add_argument('-c','--my_chunk',type=int,default=1)
parser.add_argument('-C','--nchunks',type=int,default=32)
parser.add_argument('--minx',type=int,default=1000, help=" ")
parser.add_argument('--maxx',type=int,default=200000,help=" ")
parser.add_argument('-S','--scaffold',default=False)
# parser.add_argument('-b','--bamfiles',required=True)
parser.add_argument('-b','--bamfile',action="append")
parser.add_argument('-M','--model')
args = parser.parse_args()
if args.progress: print("#",args)
fmodel=open( args.model )
contents = fmodel.read()
try:
fit_params=eval(contents)
except:
print("couldn't set model parameters", args.model)
fmodel.close
ces.set_exp_insert_size_dist_fit_params(fit_params)
if args.mask:
mask_ranges = read_mask_ranges( open(args.mask) )
else:
mask_ranges={}
#
# Read in a hirise layout (from "^p:" lines)
#
mapper = MapperLite()
mapper.load_layout(open(args.layout))
if args.scaffold:
my_scaffolds={args.scaffold:1}
else:
my_scaffolds={}
scaffold_hashes={}
for s in list(mapper.scaffolds.keys()):
scaffold_hashes[s]=struct.unpack("<L", hashlib.md5(s.encode("utf-8")).digest()[:4])[0]%args.nchunks
if scaffold_hashes[s]==args.my_chunk:
my_scaffolds[s]=1
if args.debug: print("my scaffold:",s)
bamlist = [ pysam.Samfile(bamfile,"rb") for bamfile in args.bamfile ]
segment_logfile=False
if args.logfile: segment_logfile = open(args.logfile,"wt")
for sc in sorted(my_scaffolds.keys()):
ncontigs=len(mapper.scaffold_contigs[sc])
slen=mapper.scaffold_length[sc]
if not ncontigs>1: continue
print("sc:",sc,ncontigs,slen)
# print(sc,mapper.scaffold_contigs[sc])
fp=False
contigs = sorted(mapper.scaffold_contigs[sc],key=lambda x: mapper.contigx[x])
ii=0
gap_locations=[]
for i in range(len(contigs)-1):
con = contigs[i]
x= max(mapper.contigx[con]+mapper.contig_strand[con]*mapper.contig_length[con],mapper.contigx[con])
con2 = contigs[i+1]
y= min(mapper.contigx[con2]+mapper.contig_strand[con2]*mapper.contig_length[con2],mapper.contigx[con2])
gap_locations.append((int((x+y)/2),con,con2))
if args.plotfiles:
fn="{}{}".format(args.plotfiles,sc)
fp=open(fn,"wt")
print("#plot \"{}\" i 2 u 1:2 w steps, \"\" i 1 u 1:($2/20) lt 3 pt 5 ps 0.7, \"\" i 0 u 1:(($2-5)*100) w steps lt 3, -500 lt 3".format(fn))
ii=0
fp.write("0\t0\n")
for con in contigs:
ii+=1
ii=ii%2
if args.debug: print(con,mapper.contigx[con],mapper.contig_strand[con],mapper.contig_length[con],slen)
if mapper.contig_strand[con]==1:
fp.write("{}\t{}\n".format( mapper.contigx[con],2*ii-1 ))
fp.write("{}\t{}\n".format( mapper.contigx[con]+mapper.contig_length[con],0 ))
else:
fp.write("{}\t{}\n".format( mapper.contigx[con]-mapper.contig_length[con],2*ii-1 ))
fp.write("{}\t{}\n".format( mapper.contigx[con],0 ))
fp.write("\n\n\n")
masked_segments = make_scaffold_mask(sc,mapper,mask_ranges)
pairs2support(sc,chicago_pairs(sc,mapper,bamlist,minq=args.mapq,mask=mask_ranges),ces.model,masked_segments=masked_segments,slen=mapper.scaffold_length[sc],buff=1,debug=args.debug,gpf=fp,joins=gap_locations,minx=args.minx,maxx=args.maxx,logfile=segment_logfile,t1=args.t1,t2=args.t2,binwidth=args.binwidth,nreject=args.nreject)
if fp: fp.close()
```
#### File: docker-hirise/scripts/edgelist2oo.py
```python
from __future__ import print_function
from builtins import range
import networkx as nx
import sys
import greedy_chicagoan2
def edges2oo(f):
g=nx.Graph()
ll={}
scaffolds={}
scaffold={}
while True:
l = f.readline()
if not l: break
if l[:6]=="#edge:": #continue
c = l.strip().split()
v=eval(" ".join(c[3:]))
if v['contig']: ll[c[1][:-2]]=v['length']
g.add_edge(c[1],c[2],**v)
if l[:3]=="cc:": #continue
c = l.strip().split()
scn=int(c[1])
scl=eval(" ".join(c[3:]))
scaffolds[scn]=scl
for s in scl:
scaffold[s]=scn
contigs=[]
strand={}
scaffold={}
coords={}
facing={}
ccn=1
for c in nx.connected_components(g):
# print "#",len(c)
ends=[]
for cc in c:
scaffold[cc]=ccn
if g.degree(cc)==1:
ends.append(cc)
ccn+=1
order = list(nx.dfs_preorder_nodes(g,ends[0]))
# def traverse_and_layout(n,coords,facing,x,s,og,max_disp=False):
# """Traverse the nodes in og, from node n. Give node n position x. s==1 for increasing coords, -1 for decreasing. store in coords and facing the position and 'side' of each end respectively.
# Stop traversing if you hit max_disp (optional)."""
greedy_chicagoan2.traverse_and_layout(ends[0],coords,facing,0,1,g)
order1=[]
for i in range(0,len(order),2):
print(order)
if not order[i][:-2]==order[i+1][:-2]:
print("wtf", i,order)
exit(0)
if order[i][-1]=="5":
strand[order[i][:-2]]="+"
else:
strand[order[i][:-2]]="-"
order1.append(order[i][:-2])
contigs.append(order1)
return(contigs,strand,ll,scaffolds,scaffold,coords)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('-i','--input',default="gj4.out")
parser.add_argument('-l','--links')
parser.add_argument('-w','--window',type=int,default=3)
args = parser.parse_args()
print("#",args)
contigs,strand,ll,scaffolds,scaffold,coords = edges2oo(sys.stdin)
for cl in contigs:
sl=max( [ coords[c+e] for c in cl for e in (".5",".3")] )
cl.sort(key=lambda x: coords[x+".5"])
for c in cl:
if coords[c+".5"] < coords[c+".3"]:
ends = (".5",".3")
else:
ends = (".3",".5")
for e in ends:
print("p:",scaffold[c+e],c+e,coords[c+e],"-",-1,sl,ll[c],1==2)
#p: 1 Scaffold76818_1.3 790422 - -1 5832906 32362 False
# print [strand[cc] for cc in c]
```
#### File: docker-hirise/scripts/hirise_assembly.py
```python
import os
import re
import pysam
from bamtags import BamTags
import gc
import random
import bisect
import numpy as np
def parse_pline(l,scaffold_lengths):
c=l.strip().split()
m=re.match("(.*)_(\d+)\.([35])",c[2])
scaffold_lengths[c[1]]=int(c[6])
return( c[1],m.group(1),int(m.group(2)),m.group(3),int(c[3]),int(c[7]) )
def parse_pline2(l,scaffold_lengths):
c=l.strip().split()
base=int(c[3])
x=int(c[5])
scaffold_lengths[c[1]]=max(scaffold_lengths.get(c[1],0),x)
return( c[1],c[2],base,c[4],x,int(c[6]) )
def aln2bins(aln,binsize):
return( ( aln.tid,int(aln.pos/binsize) ),( aln.rnext,int(aln.pnext/binsize)))
class GrowingNPArray:
def __init__(self,dtype):
self.data = np.zeros((10000,),dtype=dtype)
self.N=10000
self.n=0
self.dtype = dtype
def append(self,d):
if self.n==self.N:
self.N *=2
dd = np.zeros((self.N,),dtype=self.dtype)
dd[:self.n] = self.data
self.data = dd
self.data[self.n]=d
self.n+=1
def finalize(self):
dd = np.zeros((self.n,),dtype=self.dtype)
dd[:self.n] = self.data[:self.n]
return dd
def pairs_overlap(x,y):
a=min(x[0],x[1])
b=max(x[0],x[1])
c=min(y[0],y[1])
d=max(y[0],y[1])
if a<=c and c<=b: return True
if a<=d and d<=b: return True
if c<=a and a<=d: return True
if c<=b and b<=d: return True
return False
class HiriseAssembly:
"""This class is designed to encapsulate a Hirise Assembly."""
def __init__(self,options={}):
"""Options currently used are: bams, bam, datamodel, layout"""
self.bams=[]
if options.get("bams"): self.bams+=list(options["bams"])
if options.get("bam") : self.bams +=[options["bam"]]
self.model_params=False
self.binsize=10000
self.bam_objects=False
self.shotgun_bam_objects=False
if "bam" in options or "bams" in options:
self.load_ocontig_info()
if "datamodel" in options and options["datamodel"]:
self.set_modelparams(options["datamodel"])
self.layout_lines=[]
if options.get("layout"):
self.load_playout(options["layout"])
self.masked_regions={}
self.check_bams()
self.ocontig_fa=False
self.ocontig_fasta=False
if "shotgun" in options:
self.shotgun = options["shotgun"]
else:
self.shotgun = []
self.contigx=False
def get_seq(self,contig):
if not self.ocontig_fasta:
raise Exception
if not os.path.exists(self.ocontig_fasta+".fai"):
pysam.faidx(self.ocontig_fasta)
if not self.ocontig_fa:
self.ocontig_fa = pysam.Fastafile(self.ocontig_fasta)
m=re.match("(.*)_(\d+)",contig)
ocontig=m.group(1)
x=int(m.group(2))
y=x+self.contig_length(contig)
leng=self.ocontig_lengths[ocontig]
#return self.ocontig_lengths[m.group(1)]
#dummy,scaffold,x,y,leng = l.strip().split()
#x=int(x)
#y=int(y)
#leng=int(leng)
# y=min(y,leng)
seq = self.ocontig_fa.fetch(ocontig,x-1,y)
return(seq)
def contig_length(self,contig):
if contig in self.contig_lengths:
return self.contig_lengths[contig]
else:
m=re.match("(.*)_(\d+)",contig)
return self.ocontig_lengths[m.group(1)]
return 0
def contig_end_to_region(self,contig_end_string,w):
((this_contig,this_base,this_end),) = re.findall("^(.*)_(\d+)\.([53])$",contig_end_string)
print(this_contig,this_base,this_end)
this_base = int(this_base)
# this_end = int(this_end)
for scaffold,contig,base,end,x,l in self.layout_lines:
slen = self.scaffold_lengths[scaffold]
if this_contig == contig and this_base == base and this_end == end:
return(scaffold,max(0,x-w),min(slen,x+w),(x<slen-x))
print("so such end found",contig_end_string)
raise Exception
def index_ocontigs(self):
self.ocontig_bases = {}
self.contig_lengths = {}
self.contig_scaffold= {}
self.load_ocontig_info()
# self.scaffold_ocontigs={}
ocb=self.ocontig_bases
for scaffold,contig,base,end,x,l in self.layout_lines:
# if not scaffold in self.scaffold_ocontigs:
# self.scaffold_ocontigs[scaffold]={}
# self.scaffold_ocontigs[scaffold][self.contig_ocontig[contig]]=1
self.contig_scaffold[contig,base]=scaffold
if end in [5,"5"] and not base==1:
ocb[contig] = ocb.get(contig,[])+[ base ]
if end in [5,"5"]:
self.contig_lengths[ contig+"_"+str(base) ]=l
self.contig_lengths[ contig,base ]=l
for b in ocb.keys():
ocb[b].sort()
#print("ocb:",b,ocb[b])
self.total_ocontig_length = sum(self.ocontig_lengths.values())
self.rc_buffer=[]
x=0
for i in self.ocontigs_iter():
self.rc_buffer.append((x,i))
x+=self.ocontig_lengths[i]
def broken_coord(self,ocontig,x,tuples=False):
if tuples:
if not ocontig in self.ocontig_bases:
if (not (ocontig,1) in self.contig_lengths ) or x < self.contig_lengths[(ocontig,1)]:
# print(ocontig,"1")
return((ocontig,1), x)
else:
# print(ocontig,"2")
return((ocontig, self.contig_lengths[(ocontig,1)]), x)
for b in self.ocontig_bases[ocontig][::-1]:
if b<x:
new_contig = (ocontig,b)
#print("zxzx",ocontig,x,b,new_contig,self.contig_lengths.get(new_contig,False))
if (not new_contig in self.contig_lengths) or (x-b)<self.contig_lengths[new_contig]:
# print(ocontig,"3")
return((ocontig,b), x-b)
else:
return (False,False)
new_contig = (ocontig,1)
if (not new_contig in self.contig_lengths) or x < self.contig_lengths[new_contig]:
# print(ocontig,"4",new_contig,new_contig in self.contig_lengths)
return(new_contig, x)
else:
return (False,False)
else:
if not ocontig in self.ocontig_bases:
if (not ocontig+"_1" in self.contig_lengths ) or x < self.contig_lengths[ocontig+"_1"]:
return(ocontig+"_1", x)
else:
return(ocontig+"_"+str( self.contig_lengths[ocontig+"_1"] ), x)
for b in self.ocontig_bases[ocontig][::-1]:
if b<x:
new_contig = ocontig+"_"+str(b)
#print("zxzx",ocontig,x,b,new_contig,self.contig_lengths.get(new_contig,False))
if (not new_contig in self.contig_lengths) or (x-b)<self.contig_lengths[new_contig]:
return(ocontig+"_"+str(b), x-b)
else:
return (False,False)
new_contig = ocontig+"_1"
if (not new_contig in self.contig_lengths) or x < self.contig_lengths[new_contig]:
return(new_contig, x)
else:
return (False,False)
def merge_masked_regions(self,debug=False):
mr = self.masked_regions
for ocontig in mr.keys():
new_regions=[]
edges = []
for x,y in mr[ocontig]:
edges.append((x,1))
edges.append((y,-1))
edges.sort(key=lambda x:(x[0],-x[1]))
rs=0
state=0
for x,b in edges:
rs+=b
if state==0 and rs>0:
start=x
state=1
elif state==1 and rs==0:
end=x
new_regions.append((start,end))
state=0
if debug and not (len(mr[ocontig])==len(new_regions)):
print("#mmr",ocontig,len(mr[ocontig]),len(new_regions),mr[ocontig],new_regions)
mr[ocontig]=new_regions
def add_mask_regions(self,segments=[],filename=False):
masked_regions = self.masked_regions
if filename:
for line in open(filename):
c=line.strip().split()
ocontig,x,y = c[0],int(c[1]),int(c[2])
if not ocontig in masked_regions: masked_regions[ocontig]=[]
masked_regions[ocontig].append((x,y))
for ocontig,x,y in segments:
if not ocontig in masked_regions: masked_regions[ocontig]=[]
masked_regions[ocontig].append((x,y))
for oc in masked_regions.keys():
masked_regions[oc].sort()
def setup_mapper(self,debug=False):
# debug=True
self.contigx={}
self.contigst={}
cex={}
for scaffold,contig,base,end,x,l in self.layout_lines:
if end in [5,"5"]:
self.contigx[contig,base] = x
cex[contig,base,end]=x
for contig,base in self.contigx.keys():
if cex[contig,base,"5"] < cex[contig,base,"3"]:
self.contigst[contig,base]=1
else:
self.contigst[contig,base]=-1
# if debug:
# for contig,base in self.contigx.keys():
# print("m:",contig,base,self.contigx[contig,base],self.contigst[contig,base])
def scaffolds_iter(self,debug=False):
if not self.layout_lines or len(self.layout_lines)==0:
self.make_trivial_layout_lines(debug=debug)
self.validate()
last_scaffold=False
for scaffold,contig,base,end,x,l in self.layout_lines:
if (not last_scaffold) or not scaffold==last_scaffold:
last_scaffold=scaffold
# print("#",scaffold)
yield(scaffold)
def scaffold_coord(self,c,x,debug=False):
if not self.contigx: self.setup_mapper(debug=debug)
y=self.contigx[c]+self.contigst[c]*x
if debug: print("sc:",c,x,y)
return y
def chicago_support_scan(self,scaffold,minsupport=0.0,mapq=10,debug=False,minx=0,maxx=1e6,gpf=False,logfile=False):
import chicago_edge_scores as ces
ces.set_exp_insert_size_dist_fit_params(self.model_params)
model=ces.model
links={}
self.get_scaffold_links(scaffold,skipI=False,mapq=mapq,links=links,debug=debug,tuples=True)
# model = ces.
pairs=[]
edges=[]
buff=100
nb=[]
joins=[]
t1=50.0
t2=minsupport
slen=self.ocontig_lengths[scaffold]
for c1,c2 in links.keys():
if debug: print(c1,c2,links[c1,c2])
for x,y in links[c1,c2]:
pairs.append( ( self.scaffold_coord(c1,x,debug=debug),self.scaffold_coord(c2,y,debug=debug)) )
for a,b in pairs:
a,b=min(a,b),max(a,b)
if debug:
print("z",a,b)
if (b-a)>minx and (b-a)<maxx:
if gpf: gpf.write("{}\t{}\t{}\t{}\n".format(0.5*(a+b),0.5*(b-a),w,z))
ll=model.lnF(b-a)
edges.append( tuple([a+buff , ll,c1,"a"]) )
edges.append( tuple([b-buff ,-ll,c2,"b"]) )
nb.append( tuple([a+buff , 1]) )
nb.append( tuple([b-buff ,-1]) )
if gpf: gpf.write("\n\n\n")
edges.sort()
nb.sort()
rs=0
n=0
tripped=False
last=0
state=0
stretch_start=0
low_point=0.0
ji=0
gap_scores={}
gap_coverages={}
max_t1=0
min_t1=slen
breakers=[]
for i in range(len(edges)):
rs+=edges[i][1]
n+=nb[i][1]
x=edges[i][0]
try:
score=model.cutScore(slen,x,n,rs,rangeCutoff=maxx,minSep=minx)
except Exception as e:
print(edges)
print(i,edges[i],nb[i],rs,n,x,scaffold,slen,len(edges))
raise e
if score>t1:
tripped=True
last_t1=x
min_t1 = min(min_t1,x)
max_t1 = max(max_t1,x)
if tripped and score<t2 and state==0:
stretch_start=x
state=1
low_point =score
minx = x
if state==1 and score>t2:
breakers.append((scaffold,stretch_start,x,minx,low_point,slen))
# if logfile: logfile.write("{} {} {} {} {} {}\n".format(scaffold,stretch_start,x,minx,low_point,slen))
state=0
if state==1:
if score<low_point:
low_point=score
minx=x
if debug: print("dd:",scaffold,edges[i][0],score,rs,state,x,stretch_start,score,n,edges[i][2])
if gpf: gpf.write("{}\t{}\t{}\t{}\n".format(edges[i][0],score,rs,n))
last=edges[i][0]
for scaffold,stretch_start,x,minx,low_point,slen in breakers:
if stretch_start < last_t1:
# if logfile: logfile.write("{} {} {} {} {} {} {} {}\n".format(scaffold,stretch_start,x,minx,low_point,slen,min_t1,max_t1))
if logfile: logfile.write("{} {} {} {} {} {}\n".format(scaffold,stretch_start,x,minx,low_point,slen))
def make_trivial_layout_lines(self,debug=False):
self.layout_lines=[]
#print(self.ocontig_lengths)
if len(self.ocontig_lengths)==0:
print("#load_contig_info")
self.load_ocontig_info()
i=1
for ocontig in self.ocontigs_iter():
scaffold=ocontig
# if debug: print(scaffold,ocontig,1,"5",0,self.ocontig_lengths[ocontig],sep="\t")
self.layout_lines.append((scaffold,ocontig,1,"5",0,self.ocontig_lengths[ocontig])) #
self.layout_lines.append((scaffold,ocontig,1,"3",self.ocontig_lengths[ocontig],self.ocontig_lengths[ocontig])) #
self.scaffold_lengths[scaffold] = self.ocontig_lengths[ocontig]
self.scaffold_ocontigs[scaffold] = self.scaffold_ocontigs.get(scaffold,[])+[ocontig]
i+=1
self.index_ocontigs()
self.setup_mapper()
def get_scaffold_links(self,Tscaffold,skipI=False,mapq=10,links={},debug=False,tuples=False):
if not self.layout_lines or len(self.layout_lines)==0:
self.make_trivial_layout_lines(debug=debug)
self.validate()
contigs={}
ocontigs={}
if not tuples:
for scaffold,contig,base,end,x,l in self.layout_lines:
if scaffold==Tscaffold and end in [5,"5"]:
contigs[ contig+"_"+str(base) ]=1
ocontigs[contig]=1
else:
for scaffold,contig,base,end,x,l in self.layout_lines:
if scaffold==Tscaffold and end in [5,"5"]:
contigs[ contig,base ]=1
ocontigs[contig]=1
if debug: print(ocontigs,contigs)
self.get_links(list(ocontigs.keys()),skipI,mapq,links,contigs,tuples=tuples)
def segments_iter(self):
for l in self.layout_lines:
scaffold1,contig1,base1,end1,x1,l21 =l
if end1 in [5 , "5"]:
#cc=self.contig_length(contig1+"_"+str(base1))
yield( contig1,base1,base1+l21,self.ocontig_lengths[contig1] )
def contigs_iter(self):
for l in self.layout_lines:
scaffold1,contig1,base1,end1,x1,l21 =l
if end1 in [5 , "5"]:
yield( contig1 +"_"+str(base1) )
def ocontigs_iter(self):
for ocontig in self.ocontig_lengths.keys():
yield(ocontig)
def load_ocontig_info(self):
import pysam
self.ocontig_lengths={}
if not self.bam_objects:
self.bam_objects={}
for b in self.bams:
self.bam_objects[b] = pysam.Samfile(b,"rb")
for bam in self.bam_objects.values():
h=bam.header
seqs=h['SQ']
for s in seqs:
if s['SN'] in self.ocontig_lengths:
if not self.ocontig_lengths[s['SN']]==s['LN'] :
raise Exception
else:
self.ocontig_lengths[s['SN']]=s['LN']
def random_window(self,wlen=1000):
r=int(random.random()*self.total_ocontig_length)
x=bisect.bisect_right(self.rc_buffer,(r,"a"))
while r+x > self.total_ocontig_length or (x<len(self.rc_buffer)-1 and r+wlen>self.rc_buffer[x+1][0]):
r=int(random.random()*self.total_ocontig_length)
x=bisect.bisect_right(self.rc_buffer,(r,"a"))
# print( r,self.rc_buffer[x],self.rc_buffer[x+1],r-self.rc_buffer[x][0])
x-=1
# print( r,self.rc_buffer[x],self.rc_buffer[x+1],r-self.rc_buffer[x][0])
return( self.rc_buffer[x][1], r-self.rc_buffer[x][0], r-self.rc_buffer[x][0]+wlen )
def top_other_contigs(self,reference,mapq=10,n=2,debug=False,bins=False,binx=0):
nother={}
if not self.bam_objects:
self.bam_objects={}
for b in self.bams:
self.bam_objects[b] = pysam.Samfile(b,"rb")
if debug: print(self.bam_objects)
if bins:
region="{}:{}-{}".format(reference,binx*self.binsize,(binx+1)*self.binsize)
for bam in self.bam_objects.values():
for aln in bam.fetch(region=region,until_eof=True):
if aln.is_duplicate : continue
if aln.mapq < mapq : continue
if BamTags.mate_mapq(aln) < mapq : continue
my_bin,rbin = aln2bins(aln,self.binsize)
if not my_bin == rbin :
nother[rbin] = nother.get(rbin,0)+1
else:
for bam in self.bam_objects.values():
for aln in bam.fetch(reference=reference,until_eof=True):
if aln.is_duplicate : continue
if aln.mapq < mapq : continue
if BamTags.mate_mapq(aln) < mapq : continue
if not aln.rnext == aln.tid :
nother[aln.rnext] = nother.get(aln.rnext,0)+1
kk=list(nother.keys())
kk.sort(key=lambda x: nother[x],reverse=True)
if debug: print(kk[:2])
return(tuple(kk[:2]))
def window_stats(self,reference,xa,xb,mapq=10,debug=False,bins=False):
if not self.bam_objects:
self.bam_objects=[]
for b in self.bams:
self.bam_objects.append( pysam.Samfile(b,"rb") )
if not self.shotgun_bam_objects:
self.shotgun_bam_objects=[]
for b in self.shotgun:
self.shotgun_bam_objects.append( pysam.Samfile(b,"rb") )
binx = 0
if bins:
binx = int(xa/self.binsize)
top2 = self.top_other_contigs(reference,mapq=mapq,debug=debug,bins=bins,binx=binx)
region = "{}:{}-{}".format(reference,max(1,xa-1000),xb+1000)
n_aligned=0
n_hiq=0
templates={}
n_lowq =0
occ ={}
occ2={}
for bam in self.bam_objects.values():
if debug: print("#",region,xa,xb,bam)
for aln in bam.fetch(region=region):
if aln.pos > xb: continue
if aln.pos < xa: continue
if aln.is_duplicate : continue
n_aligned+=1
if aln.mapq < mapq : continue
if BamTags.mate_mapq(aln) < mapq : continue
templates[aln.query_name]=templates.get(aln.query_name,0)+1
if bins:
my_bin,rbin = aln2bins(aln,self.binsize)
if not my_bin==rbin:
occ[rbin] = occ.get(rbin,0)+1
if not rbin in top2:
occ2[rbin] = occ2.get(rbin,0)+1
else:
if (not aln.rnext == aln.tid):
occ[aln.rnext] = occ.get(aln.rnext,0)+1
if (not aln.rnext in top2) :
occ2[aln.rnext] = occ2.get(aln.rnext,0)+1
n_hiq+=1
if debug: print("#",n_hiq,n_aligned)
n_both = len([ i for i in templates.keys() if templates[i]>1 ])
n_other = len(occ.keys())
n_other2 = len([ i for i in occ.keys() if occ[i]>1 ])
n_other3 = len([ i for i in occ.keys() if occ[i]>2 ])
n_otherB = len(occ2.keys())
n_otherB2 = len([ i for i in occ2.keys() if occ[i]>1 ])
n_otherB3 = len([ i for i in occ2.keys() if occ[i]>2 ])
n_shotgun=0
for bam in self.shotgun_bam_objects:
for aln in bam.fetch(region=region):
if aln.pos > xb: continue
if aln.is_duplicate : continue
if aln.mapq < mapq : continue
n_shotgun+=1
print("\t".join(map(str,[region,n_aligned,n_hiq,n_both,n_other,n_other2,n_other3,n_otherB,n_otherB2,n_otherB3,n_shotgun])))
def chicago_promiscuity_mask(self,ocontig,mapq=10,w=1000,minlinks=2,maxothers=3,outfile=False,bins=False,debug=False):
if not self.bam_objects:
self.bam_objects=[]
for b in self.bams:
self.bam_objects.append( pysam.Samfile(b,"rb") )
# dtype=[('x',int),('t',int),('y',int),('b',int),('r',int)]
dtype=[('x',int),('t',int),('y',int)]
buffer = GrowingNPArray(dtype=dtype)
i=0
tid=0
for bam in self.bam_objects:
if debug: print("#",ocontig,bam)
tid = bam.gettid(ocontig)
for aln in bam.fetch(reference=ocontig):
if aln.rnext<0: continue
if aln.mapq < mapq : continue
if BamTags.mate_mapq(aln) < mapq : continue
if aln.is_duplicate : continue
# buffer.append((aln.pos,aln.rnext,aln.pnext,i,aln.tid))
buffer.append((aln.pos,aln.rnext,aln.pnext))
i+=1
buffer = buffer.finalize()
buffer = np.sort(buffer,order='x',kind="mergesort")
bad_edges=[]
def get_contig(x,b):
return x[1]
def get_contig_bin(x,b):
return(x[1]+(int(x[2]/b)/1.0e6))
if bins:
get_bin = get_contig_bin
else:
get_bin = get_contig
bc={}
i=0
j=0
N=len(buffer)
while i < N:
mybin = get_bin((0,tid,buffer[i][0]),self.binsize)
while j<N and buffer[j][0]<buffer[i][0]+w:
jbin = get_bin(buffer[j],self.binsize)
bc[ jbin ] = bc.get(jbin,0)+1
j+=1
n_in_window = j-i
nw=len([ ii for ii in bc.keys() if bc[ii]>=minlinks and not ii==mybin ]) #n_other_windows(buffer[i:j],minlinks,bins,self.binsize)
# if debug: print(tid,buffer[i][0],n_in_window,nw,mybin,[ (ii,bc[ii]) for ii in bc.keys() if bc[ii]>=minlinks and not ii==mybin ],[ get_bin(x,self.binsize) for x in buffer[i:j] ])
if debug: print(tid,buffer[i][0],n_in_window,nw,mybin,[ (ii,bc[ii]) for ii in bc.keys() if bc[ii]>=minlinks and not ii==mybin ])
if nw > maxothers:
bad_edges.append((buffer[i ][0], 1))
bad_edges.append((buffer[j-1][0],-1))
ibin = get_bin(buffer[i],self.binsize)
bc[ ibin ] = bc[ibin]-1
if bc[ibin]==0: bc.pop(ibin)
i+=1
bad_edges.sort()
if debug:
for be in bad_edges:
print(e)
rs=0
state=0
for x,dy in bad_edges:
rs+=dy
if state==0 and rs>0:
startx=x
state=1
elif state==1 and rs==0:
outfile.write("{} {} {}\n".format(ocontig,startx,x))
if debug: print("#{} {} {}\n".format(ocontig,startx,x))
state=0
outfile.flush()
def chicago_read_density_mask(self,ocontigs,mapq=10,outfile=False,w=1000,cutoff=100,debug=False,shotgunT=False):
if not self.bam_objects:
self.bam_objects=[]
for b in self.bams:
self.bam_objects.append( pysam.Samfile(b,"rb") )
if not self.shotgun_bam_objects:
self.shotgun_bam_objects=[]
for b in self.shotgun:
self.shotgun_bam_objects.append( pysam.Samfile(b,"rb") )
for ocontig in ocontigs:
locations=[]
shotgun_locations=[]
# gc.disable()
for bam in self.bam_objects:
if debug: print("#",ocontig,bam)
for aln in bam.fetch(reference=ocontig):
# if debug: print("#",aln.pos,aln.is_duplicate,aln.mapq,BamTags.mate_mapq(aln),aln.query_name,len(locations))
if aln.rnext<0: continue
if aln.mapq < mapq : continue
if BamTags.mate_mapq(aln) < mapq : continue
if aln.is_duplicate : continue
locations.append(aln.pos)
if debug: print("#",len(locations))
if shotgunT:
for bam in self.shotgun_bam_objects:
if debug: print("#",ocontig,bam)
for aln in bam.fetch(reference=ocontig):
if aln.mapq < mapq : continue
if aln.is_duplicate : continue
shotgun_locations.append(aln.pos)
if debug: print("#",len(locations),len(shotgun_locations))
# gc.enable()
locations.sort()
shotgun_locations.sort()
i=0
j=0
bad_edges=[]
while i<len(locations):
while j<len(locations) and locations[j]<locations[i]+w: j+=1
n_in_window = j-i
if debug:
print("chicago_density",ocontig,locations[i],locations[j-1],j-i,locations[j-1]-locations[i],locations[i:j])
if n_in_window > cutoff:
# outfile.write("{} {} {}\n".format(ocontig,locations[i],locations[j-1]))
bad_edges.append((locations[i],1) )
bad_edges.append((locations[j-1],-1) )
i+=1
if shotgunT:
i=0
j=0
while i<len(shotgun_locations):
while j<len(shotgun_locations) and shotgun_locations[j]<shotgun_locations[i]+w: j+=1
n_in_window = j-i
if debug:
print("shotgun_density",ocontig,shotgun_locations[i],shotgun_locations[j-1],j-i,shotgun_locations[j-1]-shotgun_locations[i])
if n_in_window > shotgunT:
# outfile.write("{} {} {}\n".format(ocontig,locations[i],locations[j-1]))
bad_edges.append((shotgun_locations[i],1) )
bad_edges.append((shotgun_locations[j-1],-1) )
i+=1
bad_edges.sort()
rs=0
state=0
for x,dy in bad_edges:
rs+=dy
if state==0 and rs>0:
startx=x
state=1
elif state==1 and rs==0:
outfile.write("{} {} {}\n".format(ocontig,startx,x))
state=0
outfile.flush()
def make_scaffold_mask(self,scaffold): #,mapper,mask):
s_mask = {}
segments={}
slen = self.scaffold_lengths[scaffold]
for ocontig in self.scaffold_ocontigs[scaffold]:
# print(ocontig)
for a,b in self.masked_regions.get(ocontig,[]):
c1,x = self.broken_coord(ocontig,a,tuples=True)
c2,y = self.broken_coord(ocontig,b,tuples=True)
s1=self.contig_scaffold.get(c1)
s2=self.contig_scaffold.get(c2)
if c1==c2 and s1==s2 and s1==scaffold:
xx=self.scaffold_coord(c1,x)
yy=self.scaffold_coord(c2,y)
#print("#msm:",scaffold,ocontig,a,b,x,y,xx,yy,s1,s2,scaffold,sep="\t")
segments[ max(0,min(xx,yy)) , min( slen, max(xx,yy) ) ]=1
elif (not c1==c2) and s1==s2 and s1==scaffold:
xA = x
yA = self.contig_lengths[c1]
xx=self.scaffold_coord(c1,xA)
yy=self.scaffold_coord(c1,yA)
segments[ max(0,min(xx,yy)) , min( slen, max(xx,yy) ) ]=1
xB = 1
yB = y
xx=self.scaffold_coord(c2,xB)
yy=self.scaffold_coord(c2,yB)
segments[ max(0,min(xx,yy)) , min( slen, max(xx,yy) ) ]=1
elif (not c1==c2) and s1==scaffold and not s2==scaffold:
xA = x
yA = self.contig_lengths[c1]
xx=self.scaffold_coord(c1,xA)
yy=self.scaffold_coord(c1,yA)
segments[ max(0,min(xx,yy)) , min( slen, max(xx,yy) ) ]=1
elif (not c1==c2) and s2==scaffold and not s1==scaffold:
xB = 1
yB = y
xx=self.scaffold_coord(c2,xB)
yy=self.scaffold_coord(c2,yB)
segments[ max(0,min(xx,yy)) , min( slen, max(xx,yy) ) ]=1
segments=list(segments.keys())
segments.sort()
#print("#msm:",scaffold,slen,segments,sep="\t")
return(segments)
def read_deserts(self,outfile,mapq=10,min_len=1000):
if not self.bam_objects:
self.bam_objects={}
for b in self.bams:
self.bam_objects[b]= pysam.Samfile(b,"rb")
desert_edges=[]
for bam in self.bam_objects.values():
lastx=0
lastr=-1
for aln in bam.fetch(until_eof=True):
if aln.mapq < mapq : continue
ref = aln.tid
if not ref==lastr:
if lastr>-1:
lastname =bam.getrname(lastr)
lastl = self.ocontig_lengths[ lastname ]
if lastl - lastx > min_len:
desert_edges.append(( lastname,lastx , 1 ))
desert_edges.append(( lastname,lastl ,-1 ))
lastx=0
deltax= aln.pos - lastx
if deltax>min_len:
desert_edges.append(( bam.getrname(aln.tid),lastx , 1 ))
desert_edges.append(( bam.getrname(aln.tid),aln.pos,-1 ))
lastx=aln.pos
lastr=aln.tid
nbams = len( self.bams)
state=0
startx=0
desert_edges.sort()
y=0
for contig,x,dy in desert_edges:
y+=dy
if state==0 and y == nbams:
state=1
startx=x
elif state==1 and y<nbams:
endx = x
state=0
if endx-startx>min_len:
print(contig,startx,endx,"readDesert",file=outfile)
last_contig,last_x = contig,x
def chicago_pairs(self,mapq=10,scaffold_contig_counts={},contig_scaffold_counts={},callback=False,bamfile=False,my_scaffolds=False):
read_length = 100
if not self.bam_objects:
self.bam_objects={}
for b in self.bams:
self.bam_objects[b]= pysam.Samfile(b,"rb")
if bamfile:
bams_iter = iter( [ self.bam_objects[bamfile] ] )
else:
bams_iter = iter( self.bam_objects.values() )
for bam in bams_iter:
# print("#",bam)
last_contig=False
mri=0
mr=[]
last_scaffold=False
for aln in bam.fetch(until_eof=True):
if not aln.is_read1: continue
track=False
skip=False
if aln.rnext<0: continue
if aln.mapq < mapq : continue
if BamTags.mate_mapq(aln) < mapq : continue
if aln.is_duplicate : continue
contig = bam.getrname(aln.tid)
contig2 = bam.getrname(aln.rnext)
if not last_contig or not last_contig==contig:
if contig in self.masked_regions:
mr = self.masked_regions[contig]
mri=0
else:
mr=[]
mri=0
c1,x = self.broken_coord(contig ,aln.pos ,tuples=True)
scaffold=self.contig_scaffold.get(c1)
c2,y = self.broken_coord(contig2,aln.pnext,tuples=True)
nscaffold=self.contig_scaffold.get(c2)
if (not my_scaffolds==False) and (not ((scaffold in my_scaffolds) and (nscaffold in my_scaffolds))): continue
if last_contig and not last_contig==contig:
# print("#done with contig",last_contig,list(contig_scaffold_counts.get(last_contig,{}).keys()))
for sc in contig_scaffold_counts.get(last_contig,{}).keys(): # for each hirise scaffold that this contig contributes a segment to:
if sc in scaffold_contig_counts:
if last_contig in scaffold_contig_counts[sc]: # if we are still tracking the number of times we need to read this contig from a bam file for this scaffold
# print("##",sc,last_contig,scaffold_contig_counts[sc][last_contig])
scaffold_contig_counts[sc][last_contig] -= 1 # decrease the count by one
# print("##",sc,last_contig,scaffold_contig_counts[sc][last_contig])
if scaffold_contig_counts[sc][last_contig] ==0:
del scaffold_contig_counts[sc][last_contig]
if len(scaffold_contig_counts[sc])==0:
# print("scaffold {} ready now",sc)
if callback:
callback(sc)
else:
yield(sc,"done")
del scaffold_contig_counts[sc]
# raise ScaffoldReady(sc)
# print("#s",last_contig,sc,scaffold_contig_counts.get(sc))
# print("#c",last_contig,contig_scaffold_counts.get(last_contig))
last_contig = contig
last_scaffold = scaffold
while mri<len(mr) and aln.pos > mr[mri][1]: mri+=1
if mri<len(mr) and mr[mri][0]<aln.pos and aln.pos+read_length<mr[mri][1] : continue #this read maps to a masked region
if contig2 in self.masked_regions:
if [ (aa,bb) for aa,bb in self.masked_regions[contig2] if aa<aln.pnext and aln.pnext+read_length<bb ]: continue #the pair maps to a masked region
if not scaffold: continue
if not nscaffold: continue
xx=self.scaffold_coord(c1,x)
yy=self.scaffold_coord(c2,y)
# yield(s1,self.scaffold_coord(c1,x),s2,self.scaffold_coord(c2,y))
if self.scaffold_lengths[scaffold] < xx or self.scaffold_lengths[nscaffold] < yy:
print("coordinate out of range error",xx,yy,scaffold,nscaffold,self.scaffold_lengths[scaffold], self.scaffold_lengths[nscaffold],contig,contig2,c1,c2,x,y)
raise Exception
yield( scaffold, nscaffold, xx, yy, c1, c2, aln.query_name )
def chicago_pairs_for_scaffolds(self,mapq=10,callback=False,bamfile=False,scaffolds=[],contigs=False,minsep=0):
read_length = 100
if not self.bam_objects:
self.bam_objects={}
for b in self.bams:
self.bam_objects[b]= pysam.Samfile(b,"rb")
if bamfile:
bams_iter = iter( [ self.bam_objects[bamfile] ] )
else:
bams_iter = iter( self.bam_objects.values() )
ocontigs_to_hit = {}
my_ocontigs=[]
if contigs:
my_ocontigs=contigs
else:
for scaffold in scaffolds:
for oc in self.scaffold_ocontigs[scaffold]:
ocontigs_to_hit[oc]=1
my_ocontigs = list(ocontigs_to_hit.keys())
for bam in bams_iter:
# print("#",bam)
last_contig=False
mri=0
mr=[]
last_scaffold=False
for ocontig in my_ocontigs:
for aln in bam.fetch(reference=ocontig):
if not aln.is_read1: continue
track=False
skip=False
if aln.rnext<0: continue
if aln.mapq < mapq : continue
if BamTags.mate_mapq(aln) < mapq : continue
if aln.is_duplicate : continue
contig = bam.getrname(aln.tid)
contig2 = bam.getrname(aln.rnext)
if not last_contig or not last_contig==contig:
if contig in self.masked_regions:
mr = self.masked_regions[contig]
mri=0
else:
mr=[]
mri=0
if last_contig and not last_contig==contig:
pass
# print("#done with contig",last_contig,list(contig_scaffold_counts.get(last_contig,{}).keys()))
c1,x = self.broken_coord(contig ,aln.pos ,tuples=True)
scaffold=self.contig_scaffold.get(c1)
last_contig = contig
last_scaffold = scaffold
while mri<len(mr) and aln.pos > mr[mri][1]: mri+=1
if mri<len(mr) and mr[mri][0]<aln.pos and aln.pos+read_length<mr[mri][1] : continue #this read maps to a masked region
if contig2 in self.masked_regions:
if [ (aa,bb) for aa,bb in self.masked_regions[contig2] if aa<aln.pnext and aln.pnext+read_length<bb ]: continue #the pair maps to a masked region
c2,y = self.broken_coord(contig2,aln.pnext,tuples=True)
nscaffold=self.contig_scaffold.get(c2)
if not scaffold: continue
if not nscaffold: continue
if not scaffold in scaffolds: continue
if not nscaffold in scaffolds: continue
xx=self.scaffold_coord(c1,x)
yy=self.scaffold_coord(c2,y)
if scaffold==nscaffold and abs(xx-yy)<minsep: continue
# yield(s1,self.scaffold_coord(c1,x),s2,self.scaffold_coord(c2,y))
if self.scaffold_lengths[scaffold] < xx or self.scaffold_lengths[nscaffold] < yy:
print("coordinate out of range error",xx,yy,scaffold,nscaffold,self.scaffold_lengths[scaffold], self.scaffold_lengths[nscaffold],contig,contig2,c1,c2,x,y)
raise Exception
yield( scaffold, nscaffold, xx, yy, c1, c2, aln.query_name )
def get_links(self,ocontigs,skipI=False,mapq=10,links={},contigs=False,raw=False,tuples=False,debug=False):
# links={}
if not self.bam_objects:
self.bam_objects={}
for b in self.bams:
self.bam_objects[b]= pysam.Samfile(b,"rb")
for bam in self.bam_objects.values():
# bam = pysam.Samfile(b,"rb")
for ocontig in ocontigs:
# print("#",ocontig)
mr=[]
mri=0
if ocontig in self.masked_regions: mr=self.masked_regions[ocontig]
for aln in bam.fetch(reference=ocontig):
# if debug: print("#",aln.query_name,aln.pos,aln.pnext,aln.mapq,sep="\t")
if aln.rnext<0: continue
if aln.mapq < mapq : continue
if BamTags.mate_mapq(aln) < mapq : continue
if aln.is_duplicate : continue
while mri<len(mr) and aln.pos > mr[mri][1]: mri+=1
if mri<len(mr) and mr[mri][0]<aln.pos and aln.pos<mr[mri][1] :
if debug: print("#m1",ocontig,bam.getrname(aln.rnext),aln.pos,aln.pnext,mr[mri],aln.query_name,sep="\t")
continue #this read maps to a masked region
if raw:
c1,x1 = ocontig,aln.pos
else:
c1,x1 = self.broken_coord(ocontig,aln.pos,tuples=tuples)
if not c1: continue
if contigs and not c1 in contigs: continue
ocontig2 = bam.getrname(aln.rnext)
if raw:
c2,x2 = ocontig2,aln.pnext
else:
c2,x2 = self.broken_coord(ocontig2,aln.pnext,tuples=tuples)
if not c2: continue
if contigs and not c2 in contigs: continue
if skipI and c1 == c2: continue
if ocontig2 in self.masked_regions:
overlapping_masked_segs = [ (aa,bb) for aa,bb in self.masked_regions[ocontig2] if aa<aln.pnext and aln.pnext<bb ]
if overlapping_masked_segs:
if debug: print("#m2",ocontig,ocontig2, aln.pos,aln.pnext,overlapping_masked_segs ,aln.query_name,sep="\t")
continue #the pair maps to a masked region
if not (c1,c2) in links:
links[c1,c2]=[]
links[c1,c2].append((x1,x2))
if debug: print("#p",c1,c2,aln.query_name,aln.pos,aln.pnext,sep="\t")
# print(self.broken_coord(ocontig,aln.pos),self.broken_coord(bam.getrname(aln.rnext),aln.pnext))
# for c1,c2 in links:
# print(c1,c2,links[c1,c2])
return(links)
def validate(self):
contig_mins={}
contig_maxs={}
scaffold_mins={}
scaffold_maxs={}
contig_lengths={}
counts={}
# scaffold_lengths={]
lastx=0
last_scaffold=False
last_contig=(False,)
for scaffold,contig,base,end,x,l in self.layout_lines:
if last_scaffold and scaffold == last_scaffold and not x>lastx:
print(last_scaffold,scaffold,x,lastx)
raise Exception
if x<0: raise Exception
counts[contig,base]=counts.get((contig,base),0)+1
contig_mins[contig,base]=min( contig_mins.get((contig,base),1e99) , x )
contig_maxs[contig,base]=max( contig_maxs.get((contig,base),-1) , x )
if (contig,base) in contig_lengths:
if not contig_lengths[contig,base]==l: raise Exception
else:
contig_lengths[contig,base]=l
if (contig,base) == last_contig:
# print(scaffold,contig,base,end,x,l,last_contig,lastx,x-lastx,"XXXX",sep="\t")
if not l == x-lastx :
print(scaffold,contig,base,end,x,l,last_contig,lastx,sep="\t")
raise Exception
scaffold_mins[scaffold]=min( scaffold_mins.get(scaffold,1e99) , x )
scaffold_maxs[scaffold]=max( scaffold_maxs.get(scaffold,-1) , x )
last_contig=(contig,base)
lastx,last_scaffold=x,scaffold
for contig,base in counts.keys():
if not counts[contig,base]==2: raise Exception
if not contig_maxs[contig,base]-contig_mins[contig,base]==contig_lengths[contig,base]:
print("wtf?",contig,base,contig_mins[contig,base],contig_maxs[contig,base],contig_lengths[contig,base],contig_maxs[contig,base]-contig_mins[contig,base])
raise Exception
self.setup_mapper()
lastk=(1,0)
lastl=0
lastx=0
for k in sorted(self.contigx.keys()):
if k[0]==lastk[0]:
#print("XX",k,contig_lengths.get(k),self.contigx[k],lastl,k[1]-lastk[1],lastl==k[1]-lastk[1],sep="\t")
if not lastl==k[1]-lastk[1]:
print(k,contig_lengths.get(k),self.contigx[k],lastl,k[1]-lastk[1],lastl==k[1]-lastk[1],sep="\t")
raise Exception
lastk=k
lastl = contig_lengths.get(k)
lastx=self.contigx[k]
# self.contigx={}
# self.contigst={}
print("#validation success")
# contig_maxs[contig,base]=max( contig_maxs.get((contig,base),-1) , x )
def add_breakpoints_ranges(self,breaks,debug=False,scores={},unlink_threshold=100.0, contig_break_threshold=100.0,score_types={},clipped_likelihood_for_gaps_only=True):
"""This function filters breaks expressed as spans according to several criteria before passing selected break points along to add_breakpoints()"""
if not self.layout_lines or len(self.layout_lines)==0:
self.make_trivial_layout_lines(debug=debug)
self.validate()
break_points = []
category={}
br={}
scaffold_breaks={}
for s,a,b,c in breaks:
if not s in scaffold_breaks: scaffold_breaks[s]=[]
for s,a,b,c in breaks:
scaffold_breaks[s].append((a,b,c))
for s in scaffold_breaks.keys():
scaffold_breaks[s].sort()
i=0
offset={}
while i< len(self.layout_lines)-1:
#print(self.layout_lines[i])
scaffold1,contig1,base1,end1,x1,l21 = self.layout_lines[ i ]
scaffold2,contig2,base2,end2,x2,l22 = self.layout_lines[ i+1 ]
i+=1
if not scaffold1 in scaffold_breaks: continue
if not scaffold1 == scaffold2 : continue
for a,b,c in scaffold_breaks[scaffold1]:
if pairs_overlap((a,b),(x1,x2)) and (not (contig1,base1)==(contig2,base2)): # you don't have to break a contig to make this break
if x1<=c and c<=x2:
this_offset_from_min_score = 0
else:
this_offset_from_min_score = min(abs(x1-c),abs(x2-c))
if (not (scaffold1,a,b,c) in category) or \
(category[scaffold1,a,b,c]=="break") or \
(category[scaffold1,a,b,c]=="gap" and offset[scaffold1,a,b,c]>this_offset_from_min_score):
category[scaffold1,a,b,c]="gap"
br[scaffold1,a,b,c]=int((x1+x2)/2)
offset[scaffold1,a,b,c] = this_offset_from_min_score
elif x1<a and b<x2:
if not (scaffold1,a,b,c) in category:
if scores.get((scaffold1,a,b,c),-1e9) < contig_break_threshold:
category[scaffold1,a,b,c]="break"
br[scaffold1,a,b,c]=c
categories={}
for s,a,b,c in br.keys():
if (category[s,a,b,c]=="break" and scores.get((s,a,b,c),100)< contig_break_threshold) or \
(category[s,a,b,c]=="gap" and scores.get((s,a,b,c),100)< unlink_threshold) :
if clipped_likelihood_for_gaps_only and (score_types.get((s,a,b,c),"fine")=="clipped") and category[s,a,b,c]=="break":
if debug: print("Skip break because we're only using clipped likelihood for un-scaffolding, not contig breaking",s,a,b,c,scores.get((s,a,b,c),-100),sep="\t")
if debug: print("X",s,a,b,c,br[s,a,b,c],category[s,a,b,c],score_types.get((s,a,b,c),"fine"),scores.get((s,a,b,c),-100))
continue
break_points.append((s,br[s,a,b,c]))
if True or debug: print("Q",s,a,b,c,br[s,a,b,c],category[s,a,b,c],score_types.get((s,a,b,c),"fine"),scores.get((s,a,b,c),-100))
categories[s,br[s,a,b,c]]=category[s,a,b,c]
self.add_breakpoints(break_points,debug=debug)
def add_breakpoints(self,breaks,debug=False,categories={}):
"""breaks is a list of (scaffold,x) pairs where scaffold is the id of a scaffold in the assembly,
and x is a basepair offset from the scaffold 5' end of the scaffold where a break should be added. If x
falls in a gap between contigs, the linkage is broken. If x falls in a contig, the contig is broken."""
import networkx as nx
g = nx.DiGraph()
scaffold_breaks={}
for s,x in breaks:
if not s in scaffold_breaks: scaffold_breaks[s]=[]
for s,x in breaks:
scaffold_breaks[s].append(x)
for s in scaffold_breaks.keys():
scaffold_breaks[s].sort()
if debug: print(scaffold_breaks)
new_layout_lines=[]
scaffold_broken=False
break_indices={}
i=0
break_counter=0
# debug=False
edge_breaks={}
elength={}
scaffold={}
xcoord={}
#
# Convert the layout lines into a directed graph
#
contig_length={}
while i< len(self.layout_lines)-1:
#print(self.layout_lines[i])
scaffold1,contig1,base1,end1,x1,l21 = self.layout_lines[ i ]
scaffold2,contig2,base2,end2,x2,l22 = self.layout_lines[ i+1 ]
scaffold[contig1,base1]=scaffold1
scaffold[contig2,base2]=scaffold2
i+=1
if scaffold1==scaffold2:
if scaffold1 in scaffold_breaks and (not (contig1,base1)==(contig2,base2)) and len([z for z in scaffold_breaks[scaffold1] if x1<z and z<x2])>0:
pass # this is an OO join to remove
else:
g.add_edge( (contig1,base1,end1),(contig2,base2,end2) )
elength[ (contig1,base1,end1),(contig2,base2,end2) ]=x2-x1
if (contig1,base1)==(contig2,base2): contig_length[contig1,base2]=x2-x1
if not ( scaffold1==scaffold2 and scaffold1 in scaffold_breaks):
continue
else:
break_positions = [z for z in scaffold_breaks[scaffold1] if x1<z and z<x2]
if len(break_positions)==0:
continue
edge_breaks[(contig1,base1,end1),(contig2,base2,end2)]=[]
for x in break_positions: # these's a break in here!
edge_breaks[(contig1,base1,end1),(contig2,base2,end2)].append( x-x1 )
for e1,e2 in edge_breaks.keys():
# print(e1,e2)
next_nodes= g.neighbors(e2)
prev_nodes= g.predecessors(e1)
# print(prev_nodes)
if len(list(next_nodes))>1:
raise Exception
if len(list(prev_nodes))>1:
raise Exception
next_node=False
prev_node=False
if len(list(next_nodes)) == 0:
continue
if len(list(previous_nodes)) == 0:
continue
if next_nodes:
next_node = list(next_nodes)[0]
if prev_nodes:
prev_node = list(prev_nodes)[0]
g.remove_edge(e1,e2)
#print(e1,e2,edge_breaks[e1,e2],scaffold[e1[0],e1[1]],next_node,prev_node)
if not (e1[0],e1[1])==(e2[0],e2[1]):
#print("this is just an oo link to udo")
#this is just an edge to remove
continue
ocontig = e1[0]
if e1[2] in ["5",5]: #forward
if next_node: g.remove_edge(e2,next_node)
g.remove_node(e2)
# a b
# prev e1 x1 x2 e2 next
# ----------> ---------------------------> ----------------->
#just implement the first and last break. (this is not necessarily good if the contig is really, really long. XXX TODO )
edge_breaks[e1,e2].sort()
x1=edge_breaks[e1,e2][0]
x2=edge_breaks[e1,e2][-1]
for i in range(len(edge_breaks[e1,e2])-1):
xa = edge_breaks[e1,e2][i ]
xb = edge_breaks[e1,e2][i+1]
new_node_a = ( e1[0] , e1[1]+xa, '5' )
new_node_b = ( e1[0] , e1[1]+xa, '3' )
g.add_edge(new_node_a,new_node_b)
elength[ new_node_a,new_node_b ] = xb-xa
scaffold[e1[0] , e1[1]+xa] = scaffold[e1[0],e1[1]]
new_node_1 = (e1[0],e1[1] ,'3')
new_node_2 = (e1[0],e1[1]+x2,'5')
new_node_3 = (e1[0],e1[1]+x2,'3')
scaffold[e1[0],e1[1]+x2] = scaffold[e1[0],e1[1]]
if debug: print("b",e1,e2,prev_node,next_node,x1,x2,edge_breaks[e1,e2],new_node_1,new_node_2, sep="\t")
g.add_edge(e1,new_node_1)
g.add_edge(new_node_2,new_node_3)
if next_node: g.add_edge(new_node_3,next_node)
LL = elength[e1,e2]
if next_node: LL2 = elength[e2,next_node]
elength[ e1,new_node_1 ] = x1
elength[ new_node_2,new_node_3 ] = LL - x2
if next_node: elength[ new_node_3,next_node ] = LL2
if debug: print("b",e1,e2,prev_node,next_node,x1,x2,edge_breaks[e1,e2],new_node_1,new_node_2, elength.get(( e1,new_node_1 )),elength.get(( new_node_2,new_node_3 )), elength.get((new_node_3,next_node)), sep="\t")
# print("zzz",contig_length[e1[0],e1[1]],LL,elength[e1,e2],edge_breaks[e1,e2],x1,x2,elength[e1,e2],elength[ e1,new_node_1 ],elength[ new_node_2,new_node_3 ])
else: #reverse
if prev_node: g.remove_edge(prev_node,e1)
g.remove_node(e1)
#just implement the first and last break. (this is not necessarily good if the contig is really, really long. XXX TODO )
edge_breaks[e1,e2].sort()
x1=elength[e1,e2]-edge_breaks[e1,e2][-1]
x2=elength[e1,e2]-edge_breaks[e1,e2][0]
# b a
# prev e1 x2 x1 e2 next
# ----------> <---------------------------- ----------------->
# A B C
for i in range(len(edge_breaks[e1,e2])-1):
xb = elength[e1,e2]-edge_breaks[e1,e2][i ]
xa = elength[e1,e2]-edge_breaks[e1,e2][i+1]
new_node_a = ( e1[0] , e2[1]+xa, '5' )
new_node_b = ( e1[0] , e2[1]+xa, '3' )
g.add_edge(new_node_a,new_node_b)
elength[ new_node_a,new_node_b ] = abs(xb-xa)
scaffold[e1[0] , e2[1]+xa] = scaffold[e1[0],e2[1]]
new_node_C = (e1[0],e2[1] ,'3') #
new_node_B = (e1[0],e2[1]+x2,'5') #
new_node_A = (e1[0],e2[1]+x2,'3') #
scaffold[e1[0],e2[1]+x2] = scaffold[e1[0],e2[1]]
g.add_edge(new_node_C,e2) #
g.add_edge(new_node_A,new_node_B) #
if prev_node: g.add_edge(prev_node,new_node_A) #
LL = elength[e1,e2]
if prev_node: LL2 = elength[prev_node,e1]
elength[ new_node_C, e2 ] = x1
elength[ new_node_A,new_node_B ] = LL - x2
if prev_node: elength[ prev_node, new_node_A ] = LL2
# print("zzW",edge_breaks[e1,e2],x1,x2,elength[e1,e2],elength[ new_node_1, e2 ],elength[ new_node_3,new_node_2 ])
# for e1,e2 in elength.keys():
# if elength[e1,e2]<0:
# print("wtf?",e1,e2,elength[e1,e2])
#
# Compute the new layout coordinates and scaffold IDs
#
scaffold_incr={}
node_x={}
contig_length={}
new_scaffold={}
for n in sorted(g.nodes()):
#print(n,scaffold[n[0],n[1]],g.degree(n),g.in_degree(n),g.out_degree(n))
if g.in_degree(n)==0:
scaffold_id=scaffold[n[0],n[1]]
new_scaffold_id = scaffold_id
if scaffold_id in scaffold_incr:
new_scaffold_id = scaffold_id + "."+str(scaffold_incr.get(scaffold_id,0))
new_scaffold[n]=new_scaffold_id
scaffold_incr[scaffold_id] = scaffold_incr.get(scaffold_id,0)+1
ll=list(nx.dfs_preorder_nodes(g,n))
xx=0
lastl=False
for l in ll:
if lastl:
xx+=elength[ lastl,l ]
#print("z",l,lastl,elength[lastl,l],node_x[lastl],xx,new_scaffold[n],sep="\t")
if (lastl[0],lastl[1])==(l[0],l[1]):
contig_length[lastl[0],lastl[1]]=elength[ lastl,l ]
#print(new_scaffold_id,l,xx)
node_x[l]=xx
lastl = l
#
# Build the new layout_lines list
#
self.scaffold_lengths={}
new_layout = []
for n in g.nodes():
if g.in_degree(n)==0:
new_scaffold_id = new_scaffold[n]
ll=list(nx.dfs_preorder_nodes(g,n))
for l in ll:
# print("w",new_scaffold_id,l[0],l[1],l[2],node_x[l])
# print("z",new_scaffold_id,l[0],l[1],l[2],node_x[l],contig_length[l[0],l[1]])
new_layout.append( (new_scaffold_id,l[0],l[1],l[2],node_x[l],contig_length[l[0],l[1]] ) )
# new_layout.append( (new_scaffold_id,l[0],l[1],l[2],node_x[l],elength[l[0],l[1]] ) )
self.scaffold_lengths[new_scaffold_id] = max( node_x[l] , self.scaffold_lengths.get(new_scaffold_id,0))
self.layout_lines=new_layout
# self.layout_lines.sort( key=lambda x: (x[0],x[4]) )
self.index_ocontigs()
def load_assembly(self,infile):
self.scaffold_lengths={}
self.bams=[]
self.layout_lines=[]
self.model_params={}
self.ocontig_lengths={}
self.contig_lengths={}
self.scaffold_ocontigs={}
self.bam_objects=False
# self.model_params={}
f=open(infile,"r")
path = ""
#if infile.startswith("/"):
path = os.path.dirname(os.path.normpath(infile))
for l in f:
#print(l)
if l[0]=="M":
c=l.strip().split()
self.model_params[c[1]] = eval(" ".join(c[2:]))[0]
if l[0]=="B":
#self.bams.append(l[2:].strip())
possible_path = l[2:].strip()
if possible_path.startswith("/"):
self.bams.append(l[2:].strip())
else:
self.bams.append(os.path.join(path,l[2:].strip()))
if l[0]=="S":
#self.shotgun.append(l[2:].strip())
possible_path = l[2:].strip()
if possible_path.startswith("/"):
self.shotgun.append(l[2:].strip())
else:
self.shotgun.append(os.path.join(path,l[2:].strip()))
if l[0]=="P": # Layout
self.layout_lines.append( parse_pline2(l,self.scaffold_lengths) )
scaffold,contig,base,end,x,lll = self.layout_lines[-1]
self.contig_lengths[contig+"_"+str(base)]=lll
self.contig_lengths[contig,base]=lll
self.scaffold_ocontigs[scaffold] = self.scaffold_ocontigs.get(scaffold,[])+[contig]
if l[0]=="D": # Masked regions (from D for shotgun depth)
dummy,ocontig,a,b = l.strip().split()
if not ocontig in self.masked_regions: self.masked_regions[ocontig]=[]
self.masked_regions[ocontig].append((int(a),int(b)))
if l[0]=="L": # Length of input contigs, same as bam header lengths
dummy,ocontig,l = l.strip().split()
#if not ocontig in self.masked_regions: self.masked_regions[ocontig]=[]
self.ocontig_lengths[ocontig] = int(l)
f.close()
self.index_ocontigs()
# print("setup mapper")
self.setup_mapper(debug=True)
def save_assembly(self,outfile):
f=open(outfile,"w")
for b in self.shotgun:
f.write("S {}\n".format(b))
for b in self.bams:
f.write("B {}\n".format(b))
#f.write("M {}\n".format(self.model_params))
if self.model_params:
for k in sorted(self.model_params.keys()):
f.write("M {} {}\n".format(k,(self.model_params[k],)))
for l in self.layout_lines:
f.write("P "+" ".join(map(str,l))+"\n")
for oc in sorted(self.masked_regions.keys()):
for a,b in self.masked_regions[oc]:
f.write("D {} {} {}\n".format(oc,a,b))
for oc in sorted(self.ocontig_lengths.keys()):
f.write("L {} {}\n".format(oc,self.ocontig_lengths[oc]))
# self.masked_regions={}
f.close()
def scaffold_mapping():
for i in range(len(self.layout_lines)):
scaffold1,contig1,base1,end1,x1,l11 = self.layout_lines[i ]
if end1 == "3":
yield("{}_{}".format(contig1,base1),scaffold1)
def as_edges(self):
for i in range(len(self.layout_lines)-1):
scaffold1,contig1,base1,end1,x1,l21 = self.layout_lines[i ]
scaffold2,contig2,base2,end2,x2,l22 = self.layout_lines[i+1]
if scaffold1==scaffold2 :
if contig1==contig2 and base1==base2:
yield(["#edge:","{}_{}.{}".format(contig1,base1,end1) ,"{}_{}.{}".format(contig2,base2,end2) ,{'length':l21,'contig':True}])
else:
yield(["#edge:","{}_{}.{}".format(contig1,base1,end1) ,"{}_{}.{}".format(contig2,base2,end2) ,{'length':x2-x1,'contig':False}])
# yield(["#edge:",contig1+"."+end1,contig2+"."+end2,{'length':x2-x1,'contig':False}])
def contig_coords(self):
seen = {}
for l in self.layout_lines:
scaffold1,contig1,base1,end1,x1,l21 = l
if not (contig1,base1) in seen:
yield( {"contig":contig1,"base":base1,"scaffold":scaffold1,"span":(x1,x1+l21),"flipped":end1 in [3,"3"] })
seen[contig1,base1]=True
def dump_layout(self,f):
# f=open(outfile,"w")
for s in self.scaffold_lengths.keys():
f.write("{} slen\n".format(self.scaffold_lengths[s]))
for l in self.layout_lines:
scaffold1,contig1,base1,end1,x1,l21 = l
f.write("p: {} {}_{}.{} {} - -1 {} {} {}\n".format(scaffold1,contig1,base1,end1,x1,self.scaffold_lengths[scaffold1],l21,False))
# f.close()
#p: 1 Scaffold105707_1.3 3019 - -1 85132546 3048 False
def load_playout(self,layoutfile):
#print(layoutfile)
self.scaffold_lengths={}
# self.contig_lengths={}
if not os.path.exists(layoutfile):
raise Exception
self.layout_lines = [ parse_pline(c,self.scaffold_lengths) for c in open(layoutfile,"rt") if c[:2]=="p:" ]
#for l in self.layout_lines:
#print(l)
self.index_ocontigs()
def check_bams(self):
for bamfile in self.bams:
if not os.path.exists(bamfile):
raise Exception
def set_modelparams(self,datamodel):
if os.path.exists(datamodel):
self.model_params = eval(open(datamodel).read())
else:
self.model_params=datamodel
if __name__=="__main__":
import sys
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d','--debug',default=False,action="store_true",help="Turn on debugging ouput")
parser.add_argument('-E','--edges',default=False,action="store_true",help="Print the layout as edges.")
# parser.add_argument('-E','--edges',default=False,action="store_true",help="Print the layout as edges.")
parser.add_argument('-D','--dump_layout',default=False,action="store_true",help="Print the layout as edges.")
#parser.add_argument('-p','--progress',default=False,action="store_true",help="Print progress info")
parser.add_argument('-M','--datamodel',default=False, help="Name of a file containing the model parameter dictionary")
parser.add_argument('-m','--mask' ,default=[], action="append",help="Name of a file containing regions of the input assembly to mask out")
parser.add_argument('-b','--bamfile',default=[], action="append",help="A chicago bam file")
parser.add_argument('-S','--shotgunbamfile',default=[], action="append",help="A shotgun bam file")
#parser.add_argument('-p','--progress',default=False,action="store_true",help="Print progress info")
parser.add_argument('-L','--layout',default=False,help="File containing a scaffolding layout in 'p:' lines.")
parser.add_argument('-o','--outfile',default=False,help="Filename for serialization of the assembly file.")
parser.add_argument('-i','--infile',default=False,help="Filename for serialised assembly input file.")
args = parser.parse_args()
# print(args)
if args.infile:
asf = HiriseAssembly()
asf.load_assembly(args.infile)
# asf.validate()
else:
asf = HiriseAssembly( {"bams":args.bamfile, "datamodel": args.datamodel, "layout": args.layout , "debug":args.debug, "shotgun": args.shotgunbamfile} )
if args.debug: print("# done with intital load")
asf.validate()
# asf.index_ocontigs()
if args.datamodel:
asf.set_modelparams(args.datamodel)
for segments_file in args.mask:
asf.add_mask_regions(filename=segments_file)
asf.merge_masked_regions()
if args.outfile:
asf.save_assembly( args.outfile )
if args.edges:
for e in asf.as_edges():
print("\t".join(map(str,e)))
# asf.add_breakpoints( [( '1', 1692 ),('1',10000)] )
# asf.add_breakpoints( [( '1', 1692 ),('1',2000),('1',10000)] )
# asf.add_breakpoints( [( '1', 1692 ),('1',2000),('1',2100),('1',10000)] )
# asf.validate()
# asf.load_ocontig_info()
# print("#loaded ocontig info")
# asf.load_ocontig_info()
# print("#loaded ocontig info")
# print(asf.get_links(["Scaffold116325"],links={}))
# print(asf.get_links(["Scaffold116325"],links={}))
# asf.get_scaffold_links("1000",skipI=True)
#1691
if args.dump_layout:
asf.dump_layout(sys.stdout)
```
#### File: docker-hirise/scripts/linker4.py
```python
from __future__ import print_function
from builtins import str
from builtins import map
from builtins import range
import sys
import networkx as nx
import greedy_chicagoan2 as gs
def update_end_distance(end_distance,n,g):
x=0
q=[n]
seen={}
last=False
while len(q)>0:
m=q.pop(0)
if last:
x+=g[last][m]['length']
last=m
if (not m in end_distance) or end_distance[m]>x: end_distance[m]=x
seen[m]=True
for l in g.neighbors(m):
if not l in seen:
q.append(l)
# print end_distance
#def llr(e1,e2):
# return 1.0
L=200000.0
def test_interc(c1,c2,join_options,graph,ends,linked):
os1=oscaffold[c1]
os1_mine = (args.fromS <= os1) and (os1 < args.endS)
if not os1_mine: return([-100])
# print "test interc",c1,c2,oscaffold[c1],oscaffold[c2],ends[oscaffold[c1]],ends[oscaffold[c2]],linked.get(c1),linked.get(c2),len(linked.keys())
if False:
for free_end in ends[scaffold[c1]]:
for c2_end in [c2+".5",c2+".3"]:
if c2_end in linked:
# print 1,free_end,c2_end
try:
gs.test_interc_option( c2_end, linked[c2_end], free_end, join_options, graph )
except Exception as e:
print("c2_end=",c2_end,"--",linked[c2_end],free_end)
print(e)
raise Exception('too connected 1')
for free_end in ends[scaffold[c2]]:
for c1_end in [c1+".5",c1+".3"]:
if c1_end in linked:
# print 2,free_end,c1_end
try:
gs.test_interc_option( c1_end, linked[c1_end], free_end, join_options, graph )
except Exception as e:
print("c1_end=",c1_end,"--",linked[c1_end],free_end)
print(e)
raise Exception('too connected 2')
if len(join_options)>0:
join_options.sort(reverse=True)
# print join_options
return(join_options[0])
return([-100.0])
if __name__=="__main__":
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d','--debug',default=False,action='store_true')
parser.add_argument('-p','--progress',default=False,action='store_true')
parser.add_argument('-L','--links')
parser.add_argument('-a','--chunk1',type=int)
parser.add_argument('-b','--chunk2',type=int)
parser.add_argument('-1','--first')
parser.add_argument('-2','--second')
parser.add_argument('-s','--scaffolds')
parser.add_argument('-S','--alreadyDone')
#parser.add_argument('-b','--besthits')
parser.add_argument('-l','--lengths')
parser.add_argument('-E','--edgefile')
parser.add_argument('--test_intercs',default=False)
parser.add_argument('-F','--filter')
parser.add_argument('-K','--endS',default=100000,type=int)
parser.add_argument('-k','--fromS' ,default=0,type=int)
parser.add_argument('-t','--threshold' ,default=0.10,type=float)
parser.add_argument('-Z','--chunkfile')
parser.add_argument('--set_insert_size_dist_fit_params',default="3.85301461797326,1.42596694138494,1.38674994280385e-05,10940.8191219759,49855.7525034142,0.3,420110993")
args = parser.parse_args()
if args.debug:
args.progress=True
if args.progress: print("#", str(args))
print("#"+str(args))
fit_params={}
try:
fit_params = eval(args.set_insert_size_dist_fit_params )
except Exception as e:
f=open( args.set_insert_size_dist_fit_params )
contents = f.read()
try:
fit_params=eval(contents)
except:
"couldn't deal with option", args.param
f.close
gs.set_exp_insert_size_dist_fit_params(fit_params)
components=[]
component={}
chunk={}
if args.chunkfile:
f = open(args.chunkfile)
while True:
l = f.readline()
if not l: break
c = l.strip().split()
if not c[0]=="c:": continue
if int(c[2])==args.chunk1 or int(c[2])==args.chunk2:
ccn = int(c[1])
contigs = eval(" ".join(c[4:]))
components.append(contigs)
for cc in contigs:
component[cc]=ccn
chunk[cc]=int(c[2])
ll={}
if args.lengths:
f = open(args.lengths)
while True:
l = f.readline()
if not l: break
if l[0]=="#": continue
c=l.strip().split()
ll[c[0]]=int(c[1])
f.close()
oscaffold={}
oslen={}
slen={}
g=nx.Graph()
linked={}
if args.scaffolds:
f=open(args.scaffolds)
while True:
l=f.readline()
if not l: break
c=l.strip().split()
if c[0]=="cc:":
#print l.strip()
# cc: 10 313 ['Scaffold98750_1', 'Scaffold2044
scn=int(c[1])
scs=eval(" ".join(c[3:]))
for s in scs:
oscaffold[s]=scn
g.add_node(s+".3")
g.add_node(s+".5")
g.add_edge(s+".3",s+".5",{"contig":True, "length":ll[s]})
if c[0]=="#edge:":
ht=eval(" ".join(c[3:]))
g.add_edge(c[1],c[2],**ht)
sb,e=c[1][:-2],c[1][-1:]
oslen[oscaffold.get(sb,sb)] = oslen.get(oscaffold.get(sb,sb),0)+ht['length']
if not ht['contig']:
linked[c[1]]=c[2]
linked[c[2]]=c[1]
#print "#add edge",c[1],c[2],eval(" ".join(c[3:])),linked[c[1]],linked[c[2]]
f.close()
for n in g.nodes():
s = n[:-2]
g.add_edge(s+".3",s+".5",{"contig":True, "length":ll[s]})
if not s in oscaffold:
oscaffold[s]=0
# for n in ["Scaffold27527_1","Scaffold40367_1","Scaffold17670_1"]:
# print n,oscaffold[n] #, nx.node_connected_component(g,n)
# print len(linked.keys())
sys.stdout.flush()
sc=1
scaffold={}
for c in nx.connected_components(g):
for cc in c:
scaffold[cc]=sc
scaffold[cc[:-2]]=sc
tl=0
for e1,e2 in nx.dfs_edges(g,c[0]):
tl+=g[e1][e2]['length']
slen[sc]=tl
sc+=1
end_distance={}
ends={}
for n in g.nodes():
if g.degree(n)==1:# and n in oscaffold and oscaffold[n]<args.endS:
update_end_distance(end_distance,n,g)
ends[scaffold[n]] = ends.get(scaffold[n],[])+[n]
#x print n,scaffold[n],ends[scaffold[n]]
print("#done setting edge distances")
sys.stdout.flush()
scaffold_pairs_tested={}
#Scaffold50016_1 Scaffold40593_1 ['Scaffold77744_1.5', 'Scaffold246520_1.5'] ['Scaffold111955_1.3', 'Scaffold216064_1.3'] 1141 455 1 15 1
if args.alreadyDone:
f=open(args.alreadyDone)
while True:
l=f.readline()
if not l: break
if l[0]=="#": continue
if "link score"==l[:10]: continue
c=l.strip().split()
print("# skip",int(c[-5]),int(c[-4]))
s1=scaffold[c[0]]
s2=scaffold[c[1]]
scaffold_pairs_tested[s1,s2]=True
scaffold_pairs_tested[s2,s1]=True
f.close()
max_interc_len = 20000
inter_chunk_pairs={}
links={}
links_interc={}
if args.links:
if args.links=="-":
f = sys.stdin
else:
f = open(args.links)
nlinks=0
while True:
l = f.readline()
if not l: break
if l[0]=="#": continue
# parser.add_argument('-K','--endS',default=100000,type=int)
# parser.add_argument('-k','--fromS' ,default=0,type=int)
c=l.strip().split()
c1,c2=c[0],c[1]
if ( chunk.get(c1)==args.chunk1 and chunk.get(c2)==args.chunk2 ) or ( chunk.get(c2)==args.chunk1 and chunk.get(c1)==args.chunk2 ) :
inter_chunk_pairs[c1,c2]=1
os1=oscaffold.get(c1,-1)
os2=oscaffold.get(c2,-1)
os1_mine = (args.fromS <= os1) and (os1 < args.endS)
os2_mine = (args.fromS <= os2) and (os2 < args.endS)
# if (os1_mine or os2_mine)
if (os1_mine or os2_mine) \
and os1>=0 and os2>=0 \
and (c1 in scaffold and c2 in scaffold) \
and (not ( scaffold[c1],scaffold[c2] ) in scaffold_pairs_tested):
links[c1,c2]=eval(" ".join(c[5:]))
nlinks+=1
if nlinks%10000==1: print("#nlinks=",nlinks,c1,c2,os1,os2,scaffold[c1],scaffold[c2],args.fromS,args.endS)
else:
pass
#print c1,c2,ll[c1],ll[c2],eval(" ".join(c[5:]))
#if oslen[oscaffold.get(c1,c1)] < max_interc_len or oslen[oscaffold.get(c1,c1)] < max_interc_len:
# links_interc[c1,c2] = eval(" ".join(c[5:]))
if args.links=="-":
pass
else:
f.close()
gaps_list=(1000,)
link_scores={}
gs.ll=ll
gs.links=links
pairs_to_test=list(inter_chunk_pairs.keys())
if args.first:
pairs_to_test = [(args.first,args.second)]
# for c1,c2 in links.keys():
# for c1,c2 in [("Scaffold68143_1","Scaffold42944_1"),("Scaffold232867_1","Scaffold519668_1"),("Scaffold82730_1","Scaffold59156_1")]:
for c1,c2 in pairs_to_test:
nlinks_used={}
if c1 in scaffold and c2 in scaffold and scaffold[c1]==scaffold[c2]: continue
if (scaffold[c1],scaffold[c2]) in scaffold_pairs_tested: continue
#if c1+".5" in nx.node_connected_component(g,c2+".5" ): print "wtf? 1 ",c1,c2
#if oscaffold[c1]==oscaffold[c2]: print "wtf? 3 ",c1,c2,oscaffold[c1],oscaffold[c2]
# if not scaffold[c1]%args.slices == args.slice : continue
scaffold_pairs_tested[scaffold[c1],scaffold[c2]]=True
scaffold_pairs_tested[scaffold[c2],scaffold[c1]]=True
# scaffold_pairs_tested[c2,c1]=True
#print oslen[oscaffold.get(c1,c1)]
header="\t".join(map(str,[ c1,c2,ends[scaffold[c1]],ends[scaffold[c2]],scaffold[c1],scaffold[c2],oscaffold.get(c1),oscaffold.get(c2),slen[scaffold[c1]],slen[scaffold[c2]]])) #,scaffold[c1]%args.slices,scaffold[c2]%args.slices,args.slice
# sys.stdout.flush()
linkscore=[]
hit=False
for e1 in ends[scaffold[c1]]:
for e2 in ends[scaffold[c2]]:
# link_scores[e1,e2]=[ gs.link_test(g,e1,e2,gap) for gap in (1, 1000,5000,10000, 20000, 35000, 50000, 80000, 100000,150000,200000,500000 ) ]
link_scores[e1,e2] =[ gs.link_test(g,e1,e2,gap) for gap in gaps_list ]
if args.debug: nlinks_used[e1,e2]=[ gs.nlinks_used(g,e1,e2,gap) for gap in gaps_list ]
if max( link_scores[e1,e2])>args.threshold: hit=True
print(ends[scaffold[c1]], slen[scaffold[c1]],slen[scaffold[c2]], max( link_scores[e1,e2]), max(nlinks_used.get((e1,e2),[-1])))
best_interc=""
if args.test_intercs:
join_options=[]
best_interc=[-1]
try:
best_interc=test_interc(c1,c2,join_options,g,ends,linked)
except Exception as e:
print(e)
print(header)
if best_interc[0]>0:
hit=True
else:
best_interc=""
# def joined(c,graph):
if hit:
print(header,"i:",best_interc)
for e1 in ends[scaffold[c1]]:
for e2 in ends[scaffold[c2]]:
print("link score",e1,e2,"\t".join(map(str,link_scores[e1,e2])))
if args.debug:
for ii in range(len(gaps_list)):
print("#nlink_debug:",e1,e2,ii,nlinks_used[e1,e2][ii],link_scores[e1,e2][ii])
sys.stdout.flush()
# sc[x,y] = link_test(og,x,y)
#link score Scaffold68143_1.5 Scaffold42944_1.3 -12.0363618331 -8.50975484023 3.09050796551 13.1466146475 23.6521102192
#link score Scaffold232867_1.5 Scaffold519668_1.3 14.2053772843 17.5334920011 28.5300425318 38.1409186507 48.2455365211
#link score Scaffold82730_1.3 Scaffold59156_1.3 19.032925025 22.7060494622 34.76613982 45.137277127 55.5327333446
#link score Scaffold139910_1.5 Scaffold88540_1.5 18.4718988438 22.0553877336 33.8485414561 44.045490051 54.4456783858
#link score Scaffold264145_1.3 Scaffold163676_1.5 58.5394818429 61.4418869438 70.6603831852 77.9258584168 83.0945142366
#link score Scaffold48407_1.3 Scaffold136888_1.5 43.5898317791 46.6738612148 56.8244456982 65.4217034062 73.3441397183
#link score Scaffold113693_1.5 Scaffold61032_1.5 23.6894794909 27.0904366266 38.3018179122 47.9797590877 57.7542115065
#link score Scaffold125405_1.5 Scaffold158227_1.3 24.2942327515 27.8092174693 39.3813872363 49.3913743569 59.5690031451
```
#### File: docker-hirise/scripts/local_scramble_dp.py
```python
from __future__ import print_function
from builtins import range
from builtins import object
import math
import itertools
import networkx as nx
class LSbitlinks(object):
def __init__(self,w):
n = 2**w
self.n=n
#print n
bitlinks={}
for d in range(1,w+1):
for f in range(n):
bitlinks[f,d]=[]
for g in range(n):
#print d,f,g,bin(f),bin(g),bin(f>>d),bin((2**(w-d)-1)),bin(g&(2**(w-d)-1)),f>>d == g&(2**(w-d)-1)
if (f>>d) == g&(2**(w-d)-1):
bitlinks[f,d].append(g)
#print d,f,bitlinks[f,d]
self.bitlinks = bitlinks
class LSbacklinks(object):
def __init__(self,w):
g=nx.DiGraph()
g0=nx.Graph()
n=math.factorial(w)
self.n = n
perms = []
self.perms=perms
for p in itertools.permutations(list(range(w))):
perms.append(p)
for x in range(0,w) :
for y in range(x+1,w+1):
d = y-x
for i in range(n):
for j in range(n):
match=True
#print d,x,y,i,j,w-d,"####"
for k in range(w-d):
#print d,x,y,i,j,k,k+d
if not perms[i][k]+d == perms[j][k+d]:
match=False
if match:
g.add_edge((x,i),(y,j),weight=d*d )
g0.add_edge((x,i),(y,j),weight=d*d )
#print (x,i),(y,j),d,"z"
# for e in g.edges():
#print "e:",e
n_backlinks=0
backlinks={}
for i in range(n):
backlinks[i]=[]
nn = nx.dfs_preorder_nodes(g,(0,i))
#print "#",str((0,i))#,list(nn)
sg = g0.subgraph(nn)
#print "#",list(sg.nodes())
t = nx.minimum_spanning_tree(sg)
for a,b in t.edges():
if b<a:
c=b
b=a
a=c
if a==(0,i):
#print "x:",b[0]-a[0],a,b,perms[b[1]],perms[a[1]]
n_backlinks+=1
backlinks[i].append( (b[0]-a[0],b[1]) )
self.backlinks = backlinks
self.n_backlinks=n_backlinks
if __name__=='__main__':
import sys
w=int(sys.argv[1])
bl = LSbacklinks(w)
#print "#",bl.n_backlinks,bl.n,float(bl.n_backlinks)/bl.n
x="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
def sperm(p,s):
r=""
for pp in p:
if pp<len(s):
r+=s[pp]
return r
print(x)
if False:
for i in range(w,len(x)-w+1):
for p in bl.perms:
s1p = sperm(p,x[i:i+w])
for o,p2 in bl.backlinks[p]:
s2 = x[i-o:i]
print("."*(i-o) + sperm(p2,s2) + s1p +"."*(len(x)-(i-o)-w-o),"%")
else:
for i in range(w,len(x)-w+1):
for pi in range(bl.n):
s1p = sperm(bl.perms[pi],x[i:i+w])
for o,p2 in bl.backlinks[pi]:
s2 = x[i-o:i]
print("."*(i-o) + sperm(bl.perms[p2],s2) + s1p +"."*(len(x)-(i-o)-w-o),"%")
fl = LSbitlinks(w)
```
#### File: docker-hirise/scripts/p2fa2.py
```python
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import sys
import string
import random
import os
import re
import pysam
tt = str.maketrans("ACTGactg","TGACtgac")
class fastaWriter(object):
def __init__(self,filename,linelen=60):
# print(filename, file=sys.stderr)
self.f=open(filename,"w")
# print(self.f, file=sys.stderr)
self.linelen=linelen
self.buff=""
self.x=0
self.name=""
def write(self,s):
self.buff+=s
self.x+=len(s)
# print(len(self.buff),self.linelen, file=sys.stderr)
# while len(self.buff)>self.linelen:
wrtn=0
for i in range( 0, self.linelen*(int(old_div(len(self.buff),self.linelen))) , self.linelen ):
wrtn+=self.linelen
self.f.write( self.buff[i:i+self.linelen]+"\n" )
# print "#",self.buff[i:i+self.linelen]
# sys.stdout.flush()
if (len(self.buff)%self.linelen)==0:
self.buff=""
else:
self.buff= self.buff[ -(len(self.buff)%self.linelen) :]
# print(len(self.buff),wrtn, file=sys.stderr)
self.f.flush()
def flush(self):
# while len(self.buff)>self.linelen:
# print(len(self.buff),self.linelen,"flush", file=sys.stderr)
wrtn=0
for i in range( 0, self.linelen*(int(old_div(len(self.buff),self.linelen))) , self.linelen ):
wrtn+=self.linelen
self.f.write( self.buff[i:i+self.linelen]+"\n" )
if len(self.buff)%self.linelen>0:
wrtn+=(len(self.buff)%self.linelen)
# print("flush",self.buff[ -(len(self.buff)%self.linelen) :], file=sys.stderr)
self.f.write( self.buff[ -(len(self.buff)%self.linelen) :] +"\n")
self.buff=""
# print(len(self.buff),wrtn, file=sys.stderr)
self.f.flush()
def next(self,name):
# if self.x>0:
# print("#.",self.name,self.x, file=sys.stderr)
self.x=0
self.flush()
# print(name, file=sys.stderr)
sys.stdout.flush()
self.f.write(">{}\n".format(name))
self.f.flush()
self.name=name
def close(self):
self.f.close()
def rc(s):
try:
s = s.translate(tt)
except:
print(type(tt))
print(tt)
print(type(s))
print(s)
return s[::-1]
if __name__=="__main__":
import sys
# print " ".join(sys.argv)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f','--fasta',required=True)
parser.add_argument('-o','--outfile',required=True)
parser.add_argument('-p','--prefix',required=False)
parser.add_argument('-l','--logfile',required=False,default=False)
parser.add_argument('-g','--gapclose',required=False)
parser.add_argument('-d','--debug',default=False,action="store_true")
parser.add_argument('--seed',required=False,type=int,default=1, help="Seed for random number generation, use -1 for no seed")
args = parser.parse_args()
if args.seed != -1 :
random.seed(args.seed)
if not os.path.exists("%s.fai" % args.fasta):
pysam.faidx(args.fasta)
fastafile = pysam.Fastafile( args.fasta )
outfasta=fastaWriter(args.outfile)
# print("opened outfile", file=sys.stderr)
logfile=False
if args.logfile: logfile=open(args.logfile,"wt")
name_prefix=""
if args.prefix:
name_prefix=args.prefix
else:
import idGen as idGen
name_prefix="Sc" + idGen.id()
print("name_prefix",name_prefix,file=sys.stderr)
# for i in range(5):
# name_prefix += random.choice( "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" )
#31 Scaffold142609_1.5 GGGGTACCAGGTGTCTGGGTGCTGGCAGAGCCGGCACTGATGTTTTCTGGG Scaffold41854_1.5 GGTACCAGGTGTCTGGGTGCTGGCAGAGCCGGCACTGATGTTTTCTGGGGG GGGGTACCAGGTGTCTGGGTGCTGGCAGAGCCGGCACTGATGTTTTCTGGGGG
gapclose_data={}
if args.gapclose:
fh=open(args.gapclose)
while True:
l=fh.readline()
if not l: break
if l[0]=="#": continue
scaffold,end1,kmer1,end2,kmer2,closure=l.strip().split()
gapclose_data[end1,end2]=(kmer1,kmer2,closure)
fh.close()
last_patch=""
kmer=51
trailing_kemr=""
nibbled_seq=""
last_c=-1
last_x=0
last_s="__"
cl=0
while True:
l=sys.stdin.readline()
if not l: break
if not l[:2]=="p:": continue
# print(l.strip(), file=sys.stderr)
c=l.strip().split()
#p: 1 Scaffold63081_1.5 25308872.0 8 31235809 37446086.0 ['79538.0', '8', '+', '31235809', '31278771', '1', '42972', '42972']
scaffold,end,x = c[1],c[2],float(c[3])
clen=int(c[7])
if not scaffold==last_s:
outfasta.next(name_prefix+"_"+str(scaffold))
last_x=0
contig,p=end[:-2],end[-1:]
((ocontig,base),) = re.findall("(.*)_(\d+)$",contig)
base=int(base)
if args.debug: print(scaffold,end,x,contig,p,base,last_c, file=sys.stderr)
if not contig == last_c:
nibble_off=0
nibbled_seq = ""
# if args.debug: print(last_x)
if last_x>0:
if (last_end,end) in gapclose_data:
kmer1,kmer2,closure= gapclose_data[last_end,end]
if args.debug: print("gc->",kmer1,kmer2,closure)
if not kmer1.lower() == closure[:len(kmer1)].lower():
print("kmers mismatch:",kmer1,closure,len(kmer1),len(closure), file=sys.stderr)
raise Exception
outfasta.write( closure[len(kmer1):] )
nibble_off = len(kmer2)
last_patch = closure
nibbled_seq = closure[-len(kmer2):]
elif (end,last_end) in gapclose_data:
if args.debug: print("gc<-")
kmer2,kmer1,closure= gapclose_data[end,last_end]
kmer1 = rc(kmer1)
kmer2 = rc(kmer2)
closure = rc(closure)
last_patch = closure
if not kmer1 == closure[:len(kmer1)]:
print("kmers mismatch:",kmer1,closure,len(kmer1),len(closure), file=sys.stderr)
raise Exception
outfasta.write( closure[len(kmer1):] )
nibble_off = len(kmer2)
nibbled_seq = closure[-len(kmer2):]
pass
else:
outfasta.write( "N" * int(x-last_x) )
nibbled_seq = ""
last_patch = ""
# print(seqs[contig][:500], file=sys.stderr)
x_before=outfasta.x
st="+"
if p=="5":
cseq = fastafile.fetch(reference=ocontig,start=base-1,end=base+clen).decode()
outfasta.write(cseq[nibble_off:])
trailing_kmer = cseq[-kmer:]
if not cseq[:nibble_off].lower()==nibbled_seq.lower():
print("inconsistent gap closure:",contig,nibble_off,cseq[:nibble_off].lower(),"neq",nibbled_seq.lower(),len(cseq),trailing_kmer, file=sys.stderr)
raise Exception
#exit(1)
else:
st="-"
cseq = fastafile.fetch(reference=ocontig,start=base-1,end=base+clen).decode()
if args.debug: print("x",ocontig,len(cseq))
outfasta.write(rc(cseq)[nibble_off:])
trailing_kmer = rc(cseq)[-kmer:]
if not rc(cseq)[:nibble_off].lower()==nibbled_seq.lower():
outfasta.flush()
print("inconsistent gap closure:",contig,nibble_off,rc(cseq)[:nibble_off],"neq",nibbled_seq,"rc",base,clen,ocontig,cseq, file=sys.stderr)
raise Exception
#exit(1)
x_after=outfasta.x
if logfile:
logfile.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(name_prefix+"_"+str(scaffold),ocontig,base-1,base+clen-1,st,x_before,x_after))
else:
pass
# print("#lc:",contig,p,int(x-last_x),len(seqs[contig]), file=sys.stderr)
# print "N" * int(x-last_x)
last_c=contig
last_x=x
last_s=scaffold
last_end=end
outfasta.flush()
outfasta.close()
```
#### File: docker-hirise/scripts/score_links4.py
```python
from __future__ import print_function
from builtins import str
from builtins import map
import sys
import networkx as nx
import chicago_edge_scores as ces
import numpy as np
from scipy.stats import poisson
import math
#G=3.0e9
N=100000000.0
pn=0.3
G=3000000000.0
import math
if __name__=="__main__":
import sys
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument('-t','--threshold',default=0.0 , type=float)
# parser.add_argument('-b','--besthits')
parser.add_argument('-d','--debug',default=False , action='store_true')
# parser.add_argument('-P','--plot',default=False , action='store_true')
parser.add_argument('-M','--set_insert_size_dist_fit_params',default=False )
# parser.add_argument('-G','--maxTrueGap',type=int,default=False)
parser.add_argument('-S','--scoreDelta',type=float,default=2.0)
parser.add_argument('-p','--pvalue',type=float,default=0.000001)
parser.add_argument('-E','--endwindow',type=float,default=False,help="Ignore links where either read is burried more than endwindow bases from the end of its mapped contig.")
parser.add_argument('-N','--maxN',type=int,default=False)
parser.add_argument('-L','--minlen',type=int,default=1000)
args = parser.parse_args()
ces.debug=args.debug
fmodel=open( args.set_insert_size_dist_fit_params )
contents = fmodel.read()
try:
fit_params=eval(contents)
except:
"couldn't deal with option", args.param
fmodel.close
ces.set_exp_insert_size_dist_fit_params(fit_params)
# besthit={}
# if args.besthits:
# besthit={}
# if args.besthits:
# f = open(args.besthits)
# while True:
# l = f.readline()
# if not l: break
# if not l[:5]=="best:": continue
# c=l.strip().split()
# besthit[c[1]]=c[2:]
# print c[1],besthit[c[1]]
# f.close()
# if args.progress: print("#Done reading besthits")
def ppf_cached(y,cache={}):
x=round(y,4)
if x==0:
return(poisson.ppf(0.99999999,y))
if x in cache: return cache[x]
ppf=poisson.ppf(0.99999999,x)
if np.isnan(ppf) or ppf==np.inf:
print("wtf:",y,x)
cache[x]=max(ppf,1)
return ppf
def ppf_mtc(y,N,cache={}):
pp=(1.0-(args.pvalue/N))
if pp==1.0:
pp=1.0-np.finfo(float).resolution
x=round(y,4)
if x==0:
ppf=poisson.ppf(pp,y)
if np.isnan(ppf) or np.isinf(ppf):
print("wtf:",y,x,ppf,(1.0-(args.pvalue/N)),N)
return ppf
if (x) in cache: return cache[x]
ppf=poisson.ppf(pp,x)
cache[x]=max(ppf,1)
return ppf
n_done=0
G2=(ces.model.G*ces.model.G)
while (not args.maxN) or n_done<args.maxN:
l=sys.stdin.readline()
if not l: break
if l[0]=="#": continue
c = l.strip().split("\t")
s1,s2,l1,l2,n = c[0],c[1],int(c[2]),int(c[3]),int(c[4])
if l1<args.minlen : continue
if l2<args.minlen : continue
if args.endwindow and ((l1>args.endwindow*2) or (l2>args.endwindow*2)) :
links = eval( " ".join(c[5:]) )
n=0
for x,y in links:
if not (x<args.endwindow or (l1-x)<args.endwindow): continue
if not (y<args.endwindow or (l2-y)<args.endwindow): continue
n+=1
l1 = min(l1,2*args.endwindow)
l2 = min(l2,2*args.endwindow)
n_done+=1
n_bar0= ces.model.N*ces.model.pn*l1*l2*2/G2
# n=len(links)
# ppf=ppf_cached(n_bar0)
N=int((G2/(l1*l2)))
ppf=ppf_mtc(n_bar0,N)
if (n-ppf)>=args.scoreDelta:
print("\t".join(map(str,[s1,s2,n,ppf,round(n_bar0,4),l1,l2,N])))
``` |
{
"source": "AakashSYadav/SpriD",
"score": 3
} |
#### File: SpriD/Additional/ani.py
```python
import math
from PIL import Image, ImageDraw
imgx = 800
imgy = 600
image = Image.new("RGB", (imgx, imgy))
draw = ImageDraw.Draw(image)
# Second Order ODE (y'' = f(x, y, y')) Solver using Euler method
# n : number of steps (higher the better)
# xa: initial value of independent variable
# xb: final value of independent variable
# ya: initial value of dependent variable
# y1a: initial value of first derivative of dependent variable
# Returns value of y, y1 at xb.
def Euler2(f, xa, xb, ya, y1a, n):
h = (xb - xa) / float(n)
x = xa
y = ya
y1 = y1a
for i in range(n):
y1 += h * f(x, y, y1)
y += h * y1
x += h
return [y, y1]
# Damped spring-mass system driven by sinusoidal force
# y'' = (F0 * math.cos(omega * t - phi) - b * y' - k * y) / m
# y'' : acceleration
# y' : velocity
# y : position
m = 2.0 # mass (kg)
F0 = 4.76 # force amplitude constant (N)
omega = 0.36 # angular frequency (rad/s)
phi = 0.0 # phase constant (rad)
b = 0.0 # friction constant (Ns/m)
k = 20.0 # spring constant (N/m)
def f(x, y, y1):
return (F0 * math.cos(omega * x - phi) - b * y1 - k * y) / m
yaSim = 0.0 # initial position (m)
y1aSim = 0.0 # initial velocity (m/s)
n = 1000 # number of steps for Euler method
xaSim = 0.0 # initial time of simulation (s)
xbSim = 100.0 # final time of simulation (s)
xdSim = xbSim - xaSim # deltaT of simulation
nSim = 1000 # number of time steps of simulation
# find min and max values of position (needed for the graph)
ya = yaSim
y1a = y1aSim
yMin = ya
yMax = ya
for i in range(nSim):
xa = xaSim + xdSim * i / nSim
xb = xaSim + xdSim * (i + 1) / nSim
y_y1 = Euler2(f, xa, xb, ya, y1a, n)
ya = y_y1[0]
y1a = y_y1[1]
if ya < yMin:
yMin = ya
if ya > yMax:
yMax = ya
# draw the graph
ya = yaSim
y1a = y1aSim
for i in range(nSim):
xa = xaSim + xdSim * i / nSim
xb = xaSim + xdSim * (i + 1) / nSim
kxa = (imgx - 1) * (xa - xaSim) / xdSim
kya = (imgy - 1) * (ya - yMin) / (yMax - yMin)
y_y1 = Euler2(f, xa, xb, ya, y1a, n)
ya = y_y1[0]
y1a = y_y1[1]
kxb = (imgx - 1) * (xb - xaSim) / xdSim
kyb = (imgy - 1) * (ya - yMin) / (yMax - yMin)
draw.line((kxa, kya, kxb, kyb), (0, 255, 0)) # (r, g, b)
image.save("Spring_mass system simulation.png", "PNG")
```
#### File: SpriD/Additional/heli.py
```python
from tkinter import *
# for using combobox
from tkinter.ttk import *
# for creating messagebox alert
from tkinter import messagebox
# for adding menu
from tkinter import Menu
# for Image
from PIL import ImageTk, Image
# TODO: http://www.java2s.com/Code/Python/GUI-Tk/LayoutsideTOPLEFT.htm
window = Tk()
window.title("Welcome to SpriD")
window.geometry('700x400')
def interplt(x,y,a):
if a in x:
i=x.index(a)
lbl1.configure(text= y[i])
# return y[i]
else:
for j in range(len(x)):
if(a<x[j]):
x1=x[j-1]
x2=x[j]
y1=y[j-1]
y2=y[j]
m=(y2-y1)/(x2-x1)
c=m*(a-x1)+y1
lbl1.configure(text= c)
# return c
x = [1,2,3,4,5]
y = [2,4,6,8,10]
# button1 = Button(topFrame, text="Calculate", command=lambda: interplt(x,y,float(txt1.get())))
headFrame = Frame(window)
toolbar1 = Frame(window)
toolbar2 = Frame(window)
toolbar3 = Frame(window)
calcFrame = Frame(window)
toolbar4 = Frame(window)
toolbar5 = Frame(window)
toolbar6 = Frame(window)
footFrame = Frame(window)
lbl = Label(headFrame, text="Input the given params",font=("Arial B",14))
lbl.pack(padx=2, pady=2)
headFrame.pack(side=TOP, fill=X, padx=4 )
lbl = Label(toolbar1, text="open", width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
txt = Entry(toolbar1,width=10)
txt.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar1, text="m/s^2",font=("Arial B", 10), width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar1, text="open", width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
txt = Entry(toolbar1,width=10)
txt.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar1, text="m/s^2",font=("Arial B", 10), width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
toolbar1.pack(fill=X, padx=4 )
lbl = Label(toolbar2, text="open", width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
txt = Entry(toolbar2,width=10)
txt.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar2, text="MPa",font=("Arial B", 10), width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
toolbar2.pack(fill=X, padx=4)
lbl = Label(toolbar3, text="open", width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
txt = Entry(toolbar3,width=10)
txt.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar3, text="MPa",font=("Arial B", 10), width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
toolbar3.pack(fill=X, padx=4)
b = Button(calcFrame, text="Calculate")
b.pack()
calcFrame.pack(fill=X)
lbl = Label(toolbar4, text="length", width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar4, text="20.28mm", width=6)
lbl.pack(side=LEFT, padx=2, pady=2)
toolbar4.pack(side=BOTTOM)
window.mainloop()
```
#### File: SpriD/Additional/sim2.py
```python
from vpython import*
import Tkinter as tk
import thread
# will be global
sphere = None
def vthread():
global sphere
vs.scene.title = "Sphere in space (3D drag with right mouse button)"
vs.scene.autoscale = False
sphere = vs.sphere(pos=(0, 0, 0), color=vs.color.green)
def move_sphere_incr_x(event=None):
"""moves along the x axis incrementing x"""
x, y, z = sphere.pos
sphere.pos = (x+1, y, z)
def move_sphere_decr_x(event=None):
"""moves along the x axis decrementing x"""
x, y, z = sphere.pos
sphere.pos = (x-1, y, z)
root = tk.Tk()
w = 300
h = 200
x = 450
y = 100
# use width x height + x_offset + y_offset (no spaces!)
root.geometry("%dx%d+%d+%d" % (w, h, x, y))
root.title("Control Sphere from here")
b_incr_x = tk.Button(root, text="move on x axis increment x")
# bind passes an event to function
b_incr_x.bind("<Button-1>", move_sphere_incr_x)
b_incr_x.grid(row=0, column=0, padx=20, pady=10)
b_decr_x = tk.Button(root, text="move on x axis decrement x")
# bind passes an event to function
b_decr_x.bind("<Button-1>", move_sphere_decr_x)
b_decr_x.grid(row=1, column=0, padx=10)
# use thread to do run VPython and Tkinter simultaneously
# thread.start_new_thread(function, args)
# args is an empty tuple here
sphere = thread.start_new_thread(vthread, ())
root.mainloop()
```
#### File: SpriD/Additional/tabs2.py
```python
from tkinter import *
from tkinter import ttk
# for using combobox
from tkinter.ttk import *
# for creating messagebox alert
from tkinter import messagebox
# for adding menu
from tkinter import Menu
# for Image
from PIL import ImageTk, Image
# add hyperlinks
import webbrowser
# timestamp
import time
import datetime
root = Tk()
var = IntVar()
#__________________________________________________________________________________________________
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^from brain ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\
pi = 3.14159265;
# TODO: pg 510 diameter dependency
# material = [m, A(kpsi), E(mpsi), G(mpsi), density(ld/inch^3), relative cost]
A228 = [0.145, 201, 29.5, 12, 0.284, 2.6]
A229 = [0.147, 147, 30, 11.5, 0.284, 1.3]
A227 = [0.190, 140, 28.8, 11.7, 0.284, 1.0]
A232 = [0.168, 169, 29.5, 11.2, 0.284, 3.1]
A401 = [0.108, 202, 29.5, 11.2, 0.284, 4]
def calcMain():
# def calcMain(fmax, ymax, freelength, solidlength, material, endCondition):
# if inout in si
if var.get()==2:
fmax=float(txt_fmax.get())/4.44822
ymax=float(txt_ymax.get())/25.4
freelength=float(txt_freelength.get())/25.4
solidlength=float(txt_solidlength.get())/25.4
if var.get()==1:
fmax=float(txt_fmax.get())
ymax=float(txt_ymax.get())
freelength=float(txt_freelength.get())
solidlength=float(txt_solidlength.get())
d=0.001
counter = 0
while d>0:
# factor of safety # TODO: take input
ns = 1.2;
# fixed pg510
zeta = 0.15
#for fom
gama = 1;
#for music wire
## TODO: reduce size
material=combo_mat.get();
if material=='A228':
if d <0.033:
E=29.5*1000000
G=12*1000000
elif d >= 0.033 and d<=0.063:
E=29*1000000
G=11.85*1000000
elif d > 0.063 and d<=0.125:
E=28.5*1000000
G=11.75*1000000
elif d > 0.125:
E=28*1000000
G=11.6*1000000
A=A228[1]*1000
m=A228[0]
# E=A228[2]*1000000
# G=A228[3]*1000000
rc=A228[5]
if material=='A229':
if d <0.033:
E=28.8*1000000
G=11.7*1000000
elif d >= 0.033 and d<=0.063:
E=28.7*1000000
G=11.6*1000000
elif d > 0.063 and d<=0.125:
E=28.6*1000000
G=11.5*1000000
elif d > 0.125:
E=28.5*1000000
G=11.4*1000000
A=A229[1]*1000
m=A229[0]
# E=A229[2]*1000000
# G=A229[3]*1000000
rc=A229[5]
if material=='A227':
A=A227[1]*1000
m=A227[0]
E=A227[2]*1000000
G=A227[3]*1000000
rc=A227[5]
if material=='A232':
A=A232[1]*1000
m=A232[0]
E=A232[2]*1000000
G=A232[3]*1000000
rc=A232[5]
if material=='A401':
A=A401[1]*1000
m=A401[0]
E=A401[2]*1000000
G=A401[3]*1000000
rc=A401[5]
# A = 201000 ;
# relative cost for mausc wire
# rc = 2.6;
# kpsi inch
# m = 0.145;
#E = 28.5;
# G = 11750000;
sut = A/(d**m);
ssy = 0.45*sut;
alpha = ssy/ns;
beta = (8*(1+zeta)*fmax)/(pi*(d**2));
# TODO:
C = (2*alpha-beta)/(4*beta) + (((2*alpha-beta)/(4*beta))**2 - (3*alpha)/(4*beta))**(0.5);
D = d*C;
# kb = (4*C+ 2)/(4*C - 3);
# taus = (kb*8*(1+zeta)*fmax*D)/(3.147*(d^3));
# OD = D+d;
Na = (G*(d**4)*ymax)/(8*(D**3)*fmax);
# checking end condition
combo_end['values']= ('Plain','Plain-Ground','Square-Closed','Square-Ground')
endCondition=combo_end.get()
if endCondition=='Plain':
Nt=Na
ls = d*(Nt+1)
if endCondition=='Plain-Ground':
Nt=Na+1
ls = d*Nt
if endCondition=='Square-Closed':
Nt=Na+2
ls = d*(Nt+1)
if endCondition=='Square-Ground':
Nt=Na+2
ls = d*Nt
lo = ls + (1+zeta)*ymax;
fom = -rc*gama*(pi**2)*(d**2)*Nt*D*0.25;
if isinstance(C, complex) or isinstance(Na, complex) or isinstance(ls, complex) or isinstance(lo, complex):
print('complex values')
elif (C >= 4 and C <= 12 and Na >= 3 and Na <= 15 and ls < solidlength and lo < freelength):
# break
# return D, d, Na, ls, lo, fom
if counter==0:
if var.get()==2:
xD=D*25.4
xd=d*25.4
xls=ls*25.4
xlo=lo*25.4
if var.get()==1:
xD=D
xd=d
xls=ls
xlo=lo
# fmax=float(txt_fmax.get())
# ymax=float(txt_ymax.get())
# freelength=float(txt_freelength.get())
# solidlength=float(txt_solidlength.get())
# xD=D
# xd=d
xNa=Na
# xls=ls
# xlo=lo
xfom=fom
counter=counter+1
f=open("res.txt", "a+")
f.write("Wire diameter %f\r\n" % d)
f.write("Spring diameter %f\r\n" % D)
f.write("Na %f\r\n" % Na)
f.write("ls %f\r\n" % ls)
f.write("lo %f\r\n" % lo)
f.write("Figure of merit %f\r\n" % fom)
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
f.write("____________"+st+"_____________\n")
f.close()
elif d>1:
print("iteration stopped")
# messagebox.showinfo("Sorry! couldn't design the spring\nTIP:try different input")
break
d = d+0.001;
# print(xD, xd, xNa, xls, xlo, xfom)
if var.get()==2:
res = "Spring diameter " + str(xD) + "mm\nWire diameter "+str(xd)+"mm\nNa "+str(xNa)+"\nls "+str(xls)+"mm\nlo "+str(xlo)+"mm\nFigure of merit "+str(xfom)
if var.get()==1:
res = "Spring diameter " + str(xD) + "inch\nWire diameter "+str(xd)+"inch\nNa "+str(xNa)+"\nls "+str(xls)+"inch\nlo "+str(xlo)+"inch\nFigure of merit "+str(xfom)
# res = "Spring diameter " + str(xD) + "\nWire diameter "+str(xd)+"\nNa "+str(xNa)+"\nls "+str(xls)+"\nlo "+str(xlo)+"\nFigure of merit "+str(xfom)
lbl_res.configure(text= res)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^from brain ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# for openeing file
def callback(event):
webbrowser.open_new(r"file://file/home/aakash/DME/res.txt")
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ radio @ SI units toggle
def rad_si():
lbl_ut_fmx.configure(text="N")
lbl_ut_ymx.configure(text="mm")
lbl_ut_freelen.configure(text="mm")
lbl_ut_solidlen.configure(text="mm")
toolbar1.pack_forget()
def rad_us():
lbl_ut_fmx.configure(text="lbf")
lbl_ut_ymx.configure(text="inch")
lbl_ut_freelen.configure(text="inch")
lbl_ut_solidlen.configure(text="inch")
toolbar1.pack(fill=X, padx=70 )
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ radio @ SI units toggle
note = ttk.Notebook(root)
tab1 = ttk.Frame(note)
headFrame = Frame(tab1)
toolbar1 = Frame(tab1)
toolbar2 = Frame(tab1)
toolbar3 = Frame(tab1)
toolbar4 = Frame(tab1)
calcFrame = Frame(tab1)
toolbar5 = Frame(tab1)
toolbar6 = Frame(tab1)
toolbar7 = Frame(tab1)
footFrame = Frame(tab1)
lbl = Label(headFrame, text="Input the given params",font=("Arial B",14))
lbl.pack(padx=2, pady=2)
headFrame.pack(side=TOP, fill=X, padx=4 , pady=20)
lbl = Label(toolbar1, text="Maximum force", width=15)
lbl.pack(side=LEFT, padx=2, pady=2)
txt_fmax = Entry(toolbar1,width=10)
txt_fmax.pack(side=LEFT, padx=2, pady=2)
txt_fmax.focus()
lbl_ut_fmx = Label(toolbar1, text="",font=("Arial B", 10), width=6)
lbl_ut_fmx.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar1, text="ymax", width=15)
lbl.pack(side=LEFT, padx=2, pady=2)
txt_ymax = Entry(toolbar1,width=10)
txt_ymax.pack(side=LEFT, padx=2, pady=2)
lbl_ut_ymx = Label(toolbar1, text="",font=("Arial B", 10), width=6)
lbl_ut_ymx.pack(side=LEFT, padx=2, pady=2)
toolbar1.pack(fill=X, padx=70 )
lbl = Label(toolbar2, text="Free Length", width=15)
lbl.pack(side=LEFT, padx=2, pady=2)
txt_freelength = Entry(toolbar2,width=10)
txt_freelength.pack(side=LEFT, padx=2, pady=2)
lbl_ut_freelen = Label(toolbar2, text="",font=("Arial B", 10), width=6)
lbl_ut_freelen.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar2, text="End condition", width=15)
lbl.pack(side=LEFT, padx=2, pady=2)
# txt = Entry(toolbar2,width=10)
# txt.pack(side=LEFT, padx=2, pady=2)
combo_end = Combobox(toolbar2,width=10)
combo_end['values']= ('Plain','Plain-Ground','Square-Closed','Square-Ground')
combo_end.current(1) #set the selected item
combo_end.pack(side=LEFT, padx=2, pady=2)
# lbl = Label(toolbar2, text="m/s^2",font=("Arial B", 10), width=6)
# lbl.pack(side=LEFT, padx=2, pady=2)
toolbar2.pack(fill=X, padx=70)
lbl = Label(toolbar3, text="Solid Length", width=15)
lbl.pack(side=LEFT, padx=2, pady=2)
txt_solidlength = Entry(toolbar3,width=10)
txt_solidlength.pack(side=LEFT, padx=2, pady=2)
lbl_ut_solidlen = Label(toolbar3, text="",font=("Arial B", 10), width=6)
lbl_ut_solidlen.pack(side=LEFT, padx=2, pady=2)
lbl = Label(toolbar3, text="Material", width=15)
lbl.pack(side=LEFT, padx=2, pady=2)
combo_mat = Combobox(toolbar3,width=5)
combo_mat['values']= ('A227','A228','A229','A232','A401')
combo_mat.current(1) #set the selected item
combo_mat.pack(side=LEFT, padx=2, pady=2)
toolbar3.pack(fill=X, padx=70)
lbl = Label(toolbar4, text="Output Units", width=15)
lbl.pack(side=LEFT, padx=2, pady=2)
rad1 = Radiobutton(toolbar4,text='US', value=1, variable=var, command=rad_us)
rad1.pack(side=LEFT, padx=2, pady=2)
rad2 = Radiobutton(toolbar4,text='SI', value=2, variable=var, command=rad_si)
rad2.pack(side=LEFT, padx=2, pady=2)
toolbar4.pack(fill=X, padx=70)
b = Button(calcFrame, text="Calculate",command=calcMain)
b.pack()
calcFrame.pack(fill=X, pady=20)
lbl = Label(toolbar5, text="Results", width=6,font=("Calibri",10))
lbl.pack(padx=2, pady=2)
link = Label(toolbar5, text="(more)", cursor="hand2")
link.pack()
link.bind("<Button-1>",callback)
lbl_res = Label(toolbar5, text="", width=40)
lbl_res.pack(padx=2, pady=2)
toolbar5.pack(side=BOTTOM)
note.add(tab1, text = "Tab One",compound=TOP)
tab2 = ttk.Frame(note)
lbl = Label(tab2, text="gf")
lbl.pack(side = BOTTOM)
note.add(tab2, text = "Tab Two")
note.pack()
root.mainloop()
``` |
{
"source": "aakashthapa060/Atom-Social-Media",
"score": 2
} |
#### File: Atom-Social-Media/posts/views.py
```python
from django.shortcuts import render
from .models import Post
# Create your views here.
def main_view(request):
posts = Post.objects.all()
context = {
"posts": posts
}
return render(request, "main.html", context)
``` |
{
"source": "aakashthapa060/Octo-Notes",
"score": 3
} |
#### File: Octo-Notes/users/views.py
```python
from django import forms
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate,login,logout
from .forms import LoginForm, RegisterForm
# Create your views here.
def login_view(request):
if request.user.is_authenticated:
return redirect("notes:index")
else:
form = LoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data["username"]
password = form.cleaned_data["password"]
user = authenticate(request, username = username, password = password)
if user != None:
login(request, user)
return redirect("notes:index")
else:
print("sorry")
form = LoginForm()
context = {
"form": form
}
return render(request, "users/login.html",context)
def register_view(request):
if request.user.is_authenticated:
return redirect("notes:index")
else:
form = RegisterForm(request.POST or None)
if form.is_valid():
form.save()
return redirect("users:login")
else:
form = RegisterForm()
context = {
"form": form
}
return render(request, "users/register.html", context)
def logout_view(request):
logout(request)
return redirect("users:login")
``` |
{
"source": "Aakash-Tripathi/FaceMaskDetection",
"score": 3
} |
#### File: FaceMaskDetection/src/loader.py
```python
import os
import pandas as pd
import matplotlib.image as img
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from torchvision import transforms
class FMDDataset(Dataset):
def __init__(self, data, path, transform=None):
self.data = data.values
self.path = path
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_name, label = self.data[index]
img_path = os.path.join(self.path, img_name)
image = img.imread(img_path)
if self.transform is not None:
image = self.transform(image)
return image, label
def load_data(batch_size, test_size):
CWDpath = os.getcwd()
path = (CWDpath + r'/data/')
labels = pd.read_csv(path+r'train.csv')
train_path = path+r'FMD/'
transformer = transforms.Compose([transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Resize((256, 256)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.GaussianBlur(
1, sigma=(0.1, 2.0)),
transforms.Normalize(0, 1)])
train, valid = train_test_split(labels, test_size=test_size)
train_dataset = FMDDataset(train, train_path, transformer)
test_dataset = FMDDataset(valid, train_path, transformer)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_dataloader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=True)
return train_dataloader, test_dataloader
```
#### File: FaceMaskDetection/src/pred.py
```python
import torch
from torchvision import models
from torchvision.transforms import transforms
from PIL import Image
import glob
import os
import warnings
warnings.filterwarnings("ignore")
def prediction(img_path):
"""
! FUNCTION FAILS IF THE IMAGE IS GREYSCALE
"""
checkpoint = torch.load(os.getcwd()+r'/models/resnet.pt',
map_location=torch.device('cpu'))
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = torch.nn.Linear(num_ftrs, 128)
model.load_state_dict(checkpoint)
model.eval()
classes = ['no mask', 'mask']
transformer = transforms.Compose([transforms.Resize((256, 256)),
transforms.ToTensor()])
image = Image.open(img_path)
image_tensor = transformer(image).float()
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
input = torch.autograd.Variable(image_tensor)
output = model(input)
index = output.data.numpy().argmax()
pred = classes[index]
return pred
def main():
"""
TODO: serial the each new model with its performance on the test dataset
TODO: Use OpenCV to find faces (masked and unmasked) & make bounding boxes
TODO: Feed in the OpenCV Data to prediction()
TODO: Display prediciton on the bounding boxes
"""
from tqdm import tqdm
pred_path = os.getcwd()+'/data/test/'
images_path = glob.glob(pred_path+'/*.jpg')
pred_dict = {}
for i in tqdm(images_path, desc='Loading Files'):
pred_dict[i[i.rfind('/')+1:]] = prediction(i)
for key, value in pred_dict.items():
print(key, ':', value)
if __name__ == "__main__":
main()
``` |
{
"source": "aakashv8900/django-blog",
"score": 3
} |
#### File: blog/tests/tests_forms.py
```python
from django.test import SimpleTestCase
from blog.forms import PostForm, CommentForm
class TestForms(SimpleTestCase):
def test_post_form_valid_data(self):
form = PostForm(data={
'title': 'Rain Water Harvesting',
'text': 'Rain water harvesting is good for nature.'
})
self.assertTrue(form.is_valid())
def test_comment_form_valid_data(self):
form = CommentForm(data={
'author': 'aakash',
'text': 'Rain water harvesting is good for nature.'
})
self.assertTrue(form.is_valid())
def test_post_form_no_data(self):
form = PostForm(data={})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 2)
def test_comment_form_no_data(self):
form = CommentForm(data={})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 2)
```
#### File: blog/tests/tests_models.py
```python
from django.test import TestCase
from django.contrib.auth.models import User
from django.test import Client
class TestModels(TestCase):
def setUp(self):
user = User.objects.create(username='testuser')
user.set_password('<PASSWORD>')
user.save()
def test_login_with_wrong_password(self):
"""Login with wrong password"""
c = Client()
logged_in = c.login(username='testuser', password='<PASSWORD>')
self.assertFalse(logged_in)
def test_login_with_correct_password(self):
"""Login with correct password"""
c = Client()
logged_in = c.login(username='testuser', password='<PASSWORD>')
self.assertTrue(logged_in)
``` |
{
"source": "aakashvarma/Med-I",
"score": 2
} |
#### File: Med-I/other_resources/ap.py
```python
from hemorrhage import Pred_hemo
from flask import Flask
import json
app=Flask(__name__)
@app.route("/")
def hello():
obj=Pred_hemo()
return json.dumps({"image_data":obj.getData(obj.dirPath),"prediction":obj.prediction()})
if __name__=='__main__':
#hel=hello()
#print(hel)
app.run(debug=True)
```
#### File: Med-I/Python_files/app.py
```python
from alzheimers import Predict_alhzeimer
from hemorrhage import Pred_hemo
from tumor import Pred_tumor
import train
from flask import Flask
import json
import requests
app = Flask(__name__)
@app.route("/")
def hello():
response = requests.get('http://127.0.0.1:8000/image/api')
imgData = response.json()
imgScanType = imgData["rawdata"]["scan"]
try:
age = imgData["rawdata"]["age"]
if (imgData["rawdata"]["gender"] == 'm'):
gender = 1.0
else:
gender = 0.0
educ = imgData["rawdata"]["educ"]
ses = imgData["rawdata"]["ses"]
mmse = imgData["rawdata"]["mmse"]
etiv = imgData["rawdata"]["etiv"]
nebv = imgData["rawdata"]["nebv"]
asf = imgData["rawdata"]["asf"]
# [0.0, 76.0, 16.0, 3.0, 26.0, 1391.0, 0.705, 1.262]
getdata = train.getScalledData([[gender, age, educ, ses, mmse, etiv, nebv, asf]])
ea_prediction = train.predict(getdata)[0]
except:
ea_prediction = 'none'
if imgScanType == 'mri':
try:
obj = Predict_alhzeimer()
return json.dumps({
"image_data" : obj.getData(obj.url),
"prediction" : obj.prediction(),
"ea_prediction" : ea_prediction
})
except:
obj=Pred_tumor()
return json.dumps({
"image_data":obj.getData(obj.url),
"prediction":obj.prediction(),
"ea_prediction" : ea_prediction
})
elif imgScanType == 'ct':
obj = Pred_hemo()
return json.dumps({
"image_data":obj.getData(obj.url),
"prediction":obj.prediction(),
"ea_prediction" : ea_prediction
})
else:
print "error"
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "aakash-v-virani/conversational-ai-chatbot",
"score": 2
} |
#### File: deepspeech_openvino/deepspeech_openvino/deepspeech_asr_base.py
```python
import sys
import os
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IECore
import codecs
from deepspeech_openvino.speech_features import audio_spectrogram, mfcc
from deepspeech_openvino.ctc_beamsearch_decoder import ctc_beam_search_decoder
from abc import abstractmethod
n_input = 26
n_context = 9
n_steps = 16
numcep = n_input
numcontext = n_context
beamwidth = 10
def preprocess_sound(audio_rev, fs):
"""
converts audio to features
"""
audio = np.frombuffer(audio_rev, dtype=np.dtype("<h"))
# normalize to -1 to 1, int 16 to float32
audio = audio / np.float32(32768)
audio = audio.reshape(-1, 1)
spectrogram = audio_spectrogram(
audio, (16000 * 32 / 1000), (16000 * 20 / 1000), True
)
features = mfcc(spectrogram.reshape(1, spectrogram.shape[0], -1), fs, 26)
empty_context = np.zeros((numcontext, numcep), dtype=features.dtype)
features = np.concatenate((empty_context, features, empty_context))
num_strides = len(features) - (n_context * 2)
# Create a view into the array with overlapping strides of size
# numcontext (past) + 1 (present) + numcontext (future)
window_size = 2 * n_context + 1
features = np.lib.stride_tricks.as_strided(
features,
(num_strides, window_size, n_input),
(features.strides[0], features.strides[0], features.strides[1]),
writeable=False,
)
return features
class DeepSpeechASR(object):
def __init__(self, model, device, sample_rate, alphabet_cfg="", SPEECH_LIB_PATH=""):
# TODO asserts on model= have .xml xtension, sample rate should be a number,
# alphabet_cfg a file name, protect it additionally, speechlib path apath
# assert device in supported openvino devices
self.output_text = ""
self.cpu_extension = SPEECH_LIB_PATH
self.sample_rate = ""
self.model_bin = model.replace(".xml", ".bin")
self.model_xml = model
self.device = device
self.alphabet_cfg = alphabet_cfg
self.sample_rate = 16000
self.ie = IECore()
self.exec_net = None
# TODO: Enable cpu extensions
# if self.cpu_extension and 'CPU' in self.device:
# if os.path.exists(self.cpu_extension) and 'CPU' in self.device:
# self.ie.add_extension(self.cpu_extension, "CPU")
# Read IR
log.debug(
"DeepSpeechASR: Loading network files:\n\t{}\n\t{}".format(
self.model_xml, self.model_bin
)
)
self.net = self.ie.read_network(
model=self.model_xml, weights=self.model_bin)
return self._load_model_to_device()
@abstractmethod
def _load_model_to_device(self):
pass
@abstractmethod
def _transcribe(self, features):
pass
def close(self):
log.info("DeepSpeechASR: close()")
if self.exec_net:
del self.exec_net
if self.ie:
del self.ie
def push_data(self, buffer):
features = preprocess_sound(buffer, self.sample_rate)
self.output_text = self._transcribe(features)
def get_result(self, final, finish_processing):
# TODO: do something with final
result = self.output_text
if finish_processing:
self.exec_net = ""
self._load_model_to_device()
return result
class Alphabet(object):
def __init__(self, alphabet_cfg):
self._label_to_str = []
self._str_to_label = {}
self._size = 0
with codecs.open(alphabet_cfg, "r", "utf-8") as fin:
for line in fin:
if line[0:2] == "\\#":
line = "#\n"
elif line[0] == "#":
continue
self._label_to_str += line[:-1] # remove the line ending
self._str_to_label[line[:-1]] = self._size
self._size += 1
def string_from_label(self, label):
return self._label_to_str[label]
def label_from_string(self, string):
return self._str_to_label[string]
def size(self):
return self._size
def main():
# TODO: Write sample app here
# asr = DeepSpeechASR()
pass
if __name__ == "__main__":
main()
```
#### File: src/test/test_asr.py
```python
import unittest
import unittest.mock as mock
class TestSpeechLibrary(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch("zmq_integration_lib.zmq")
def test_speech_manager(self, mock_zmq):
pass
```
#### File: nlp/nlp_server/rasa_wrapper.py
```python
import json
import os
import requests
import jwt
SERVER_URL = "http://localhost:5005"
def get_sanic_server_ssl(app):
# read sanic variables
ssl = {
"cert": os.path.join("/run/secrets", app.config.SERVERCERT),
"key": os.path.join("/run/secrets", app.config.SERVERKEY),
}
return ssl
def get_client_cert(app):
return (
os.path.join("/run/secrets", app.config.TLS_CERT),
os.path.join("/run/secrets", app.config.TLS_KEY),
)
def update_secrets(app):
# read env vars
# SANIC_JWTSECRET
# SANIC_JWTALGORITHM
jwt_secret = None
jwt_algorithm = None
try:
jwt_secret = _read_docker_secret(app.config.JWTSECRET)
jwt_algorithm = _read_docker_secret(app.config.JWTALGORITHM)
except Exception as msg:
print("authenticate failed: ", msg)
app.config["jwt_secret"] = jwt_secret
app.config["jwt_algorithm"] = jwt_algorithm
return app
def _read_docker_secret(secret_name):
# docker secret path
# build path to secret, read and return value of
# secret file
secret = None
secret_file = os.path.join("/run/secrets", secret_name)
with open(secret_file) as f:
secret = f.read()
return secret
def authenticate(app, req):
jwt_token = req.token
try:
jwt_secret = app.config.jwt_secret
jwt_algorithm = app.config.jwt_algorithm
except Exception as msg:
print("authenticate failed: ", msg)
return False
return _verify_token(jwt_token, jwt_secret, jwt_algorithm)
def _verify_token(jwt_token, JWT_SECRET, JWT_ALGORITHM):
ret = False
try:
_ = json.loads(json.dumps(jwt.decode(
jwt_token, JWT_SECRET, JWT_ALGORITHM)))
ret = True
except Exception as msg:
print("Token failed ", msg)
return ret
def forward_to_rasa(app, req):
# connect to rasa and send
print(req.json)
# TODO: remove setting proxies to somewhere else
os.environ["http_proxy"] = ""
os.environ["https_proxy"] = ""
# curl localhost:5005/webhooks/rest/webhook -d '{"sender": "user1", "message":"show atms for Pune Bank"}'
# validate req
data = req.json
# validate data
payload = {"sender": data["sender"], "message": data["message"]}
headers = {"content-type": "application/json"}
# r = requests.post(SERVER_URL + '/webhooks/rest/webhook', headers=headers, data = json.dumps(payload), verify=False,cert = get_client_cert(app) )
r = requests.post(
SERVER_URL + "/webhooks/rest/webhook", headers=headers, data=json.dumps(payload)
)
return r.json()
```
#### File: obp_api/test/test_bank.py
```python
import unittest
from obp_api.banks import Bank
import unittest.mock as mock
class TestBank(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_banks(self):
pass
``` |
{
"source": "aakbar5/handy-python",
"score": 4
} |
#### File: handy-python/general/list_cmp.py
```python
def compare_two_lists(reference, to_cmp):
"""
Compare two python lists and return True
if reference list is having all elements of
to_cmp list.
"""
return all(elem in reference for elem in to_cmp)
list1 = [1, 2, 3, 4]
list2 = [1]
ret = compare_two_lists(list1, list2)
print("List1:", list1)
print("List2:", list2)
print("list1 is having all element of list2") if ret == True else print("Lists are different")
print("")
list1 = [1]
list2 = [1, 2, 3, 4]
ret = compare_two_lists(list1, list2)
print("List1:", list1)
print("List2:", list2)
print("list1 is having all element of list2") if ret == True else print("Lists are different")
print("")
list1 = [1, 2, 3, 4]
list2 = [1, 2, 3, 4]
ret = compare_two_lists(list1, list2)
print("List1:", list1)
print("List2:", list2)
print("list1 is having all element of list2") if ret == True else print("Lists are different")
print("")
list1 = [1, 2, 3, 4]
list2 = [4, 2, 3, 1]
ret = compare_two_lists(list1, list2)
print("List1:", list1)
print("List2:", list2)
print("list1 is having all element of list2") if ret == True else print("Lists are different")
print("")
list1 = [1, 2, 3, 4]
list2 = [10, 11, 12, 14, 15, 16]
ret = compare_two_lists(list1, list2)
print("List1:", list1)
print("List2:", list2)
print("list1 is having all element of list2") if ret == True else print("Lists are different")
print("")
list1 = [1, 2, 3, 4]
list2 = [6, 7, 8, 9]
ret = compare_two_lists(list1, list2)
print("List1:", list1)
print("List2:", list2)
print("list1 is having all element of list2") if ret == True else print("Lists are different")
print("")
```
#### File: handy-python/general/tuple_list_to_list.py
```python
def convert_list_of_tuple_to_list(py_list):
"""
Convert list of tuple(s) to plain list.
"""
if not isinstance(py_list, list):
return
return list(x for tup in py_list for x in tup)
# Simple tuple
t = ("val1", "val2", "val3")
print(type(t), t)
l = [(1,), (2, 3), (4, 5, 6), (4, 5, 6, 7)]
ret = convert_list_of_tuple_to_list(l)
print(type(l), l)
print(type(ret), ret)
l = [(1,), (2,)]
ret = convert_list_of_tuple_to_list(l)
print(type(l), l)
print(type(ret), ret)
``` |
{
"source": "aak-dev/mango-explorer",
"score": 2
} |
#### File: oracles/serum/serum.py
```python
import rx
import rx.operators as ops
import typing
from datetime import datetime
from decimal import Decimal
from pyserum.market.orderbook import OrderBook
from pyserum.market import Market as SerumMarket
from ...accountinfo import AccountInfo
from ...context import Context
from ...market import Market
from ...observables import observable_pipeline_error_reporter
from ...oracle import Oracle, OracleProvider, OracleSource, Price
from ...spotmarket import SpotMarket
# # 🥭 Serum
#
# This file contains code specific to oracles on the [Serum DEX](https://projectserum.com/).
#
# # 🥭 SerumOracle class
#
# Implements the `Oracle` abstract base class specialised to the Serum DEX.
#
class SerumOracle(Oracle):
def __init__(self, spot_market: SpotMarket):
name = f"Serum Oracle for {spot_market.symbol}"
super().__init__(name, spot_market)
self.spot_market: SpotMarket = spot_market
self.source: OracleSource = OracleSource("Serum", name, spot_market)
self._serum_market: SerumMarket = None
def fetch_price(self, context: Context) -> Price:
if self._serum_market is None:
self._serum_market = SerumMarket.load(context.client, self.spot_market.address, context.dex_program_id)
bids_address = self._serum_market.state.bids()
asks_address = self._serum_market.state.asks()
bid_ask_account_infos = AccountInfo.load_multiple(context, [bids_address, asks_address])
if len(bid_ask_account_infos) != 2:
raise Exception(
f"Failed to get bid/ask data from Serum for market address {self.spot_market.address} (bids: {bids_address}, asks: {asks_address}).")
bids = OrderBook.from_bytes(self._serum_market.state, bid_ask_account_infos[0].data)
asks = OrderBook.from_bytes(self._serum_market.state, bid_ask_account_infos[1].data)
top_bid = list(bids.orders())[-1]
top_ask = list(asks.orders())[0]
top_bid_price = self.spot_market.quote.round(Decimal(top_bid.info.price))
top_ask_price = self.spot_market.quote.round(Decimal(top_ask.info.price))
mid_price = (top_bid_price + top_ask_price) / 2
return Price(self.source, datetime.now(), self.spot_market, top_bid_price, mid_price, top_ask_price)
def to_streaming_observable(self, context: Context) -> rx.core.typing.Observable:
return rx.interval(1).pipe(
ops.subscribe_on(context.pool_scheduler),
ops.start_with(-1),
ops.map(lambda _: self.fetch_price(context)),
ops.catch(observable_pipeline_error_reporter),
ops.retry(),
)
# # 🥭 SerumOracleProvider class
#
# Implements the `OracleProvider` abstract base class specialised to the Serum Network.
#
class SerumOracleProvider(OracleProvider):
def __init__(self) -> None:
super().__init__("Serum Oracle Factory")
def oracle_for_market(self, context: Context, market: Market) -> typing.Optional[Oracle]:
if isinstance(market, SpotMarket):
return SerumOracle(market)
else:
optional_spot_market = context.market_lookup.find_by_symbol(market.symbol)
if optional_spot_market is None:
return None
if isinstance(optional_spot_market, SpotMarket):
return SerumOracle(optional_spot_market)
return None
def all_available_symbols(self, context: Context) -> typing.Sequence[str]:
all_markets = context.market_lookup.all_markets()
symbols: typing.List[str] = []
for spot_market in all_markets:
symbols += [spot_market.symbol]
return symbols
``` |
{
"source": "aakejiang/milvus",
"score": 2
} |
#### File: python_client/base/index_wrapper.py
```python
import sys
from pymilvus import Index
sys.path.append("..")
from check.func_check import ResponseChecker
from utils.api_request import api_request
TIMEOUT = 20
INDEX_NAME = "_default_idx"
class ApiIndexWrapper:
index = None
def init_index(self, collection, field_name, index_params, index_name=None, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT * 2)
index_name = INDEX_NAME if index_name is None else index_name
index_name = kwargs.get("index_name", index_name)
kwargs.update({"timeout": timeout, "index_name": index_name})
""" In order to distinguish the same name of index """
func_name = sys._getframe().f_code.co_name
res, is_succ = api_request([Index, collection, field_name, index_params], **kwargs)
self.index = res if is_succ is True else None
check_result = ResponseChecker(res, func_name, check_task, check_items, is_succ,
collection=collection, field_name=field_name,
index_params=index_params, **kwargs).run()
return res, check_result
def drop(self, index_name=None ,check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
index_name = INDEX_NAME if index_name is None else index_name
index_name = kwargs.get("index_name", index_name)
kwargs.update({"timeout": timeout, "index_name": index_name})
func_name = sys._getframe().f_code.co_name
res, is_succ = api_request([self.index.drop], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, is_succ, **kwargs).run()
return res, check_result
@property
def params(self):
return self.index.params
@property
def collection_name(self):
return self.index.collection_name
@property
def field_name(self):
return self.index.field_name
```
#### File: python_client/chaos/test_chaos_bulk_load.py
```python
import threading
import pytest
import os
import time
import json
from time import sleep
from pathlib import Path
from minio import Minio
from pymilvus import connections
from chaos.checker import (InsertFlushChecker, SearchChecker, QueryChecker, BulkLoadChecker, Op)
from common.cus_resource_opts import CustomResourceOperations as CusResource
from common.milvus_sys import MilvusSys
from utils.util_log import test_log as log
from utils.util_k8s import wait_pods_ready, get_pod_list, get_pod_ip_name_pairs, get_milvus_instance_name
from utils.util_common import findkeys, update_key_value
from chaos import chaos_commons as cc
from common.common_type import CaseLabel
from common import common_func as cf
from chaos import constants
# from bulk_load.bulk_load_data import gen_file_name
from bulk_load.minio_comm import copy_files_to_minio
from delayed_assert import expect, assert_expectations
def assert_statistic(checkers, expectations={}):
for k in checkers.keys():
# expect succ if no expectations
succ_rate = checkers[k].succ_rate()
total = checkers[k].total()
average_time = checkers[k].average_time
if expectations.get(k, '') == constants.FAIL:
log.info(
f"Expect Fail: {str(k)} succ rate {succ_rate}, total: {total}, average time: {average_time:.4f}")
expect(succ_rate < 0.49 or total < 2,
f"Expect Fail: {str(k)} succ rate {succ_rate}, total: {total}, average time: {average_time:.4f}")
else:
log.info(
f"Expect Succ: {str(k)} succ rate {succ_rate}, total: {total}, average time: {average_time:.4f}")
expect(succ_rate > 0.90 and total > 2,
f"Expect Succ: {str(k)} succ rate {succ_rate}, total: {total}, average time: {average_time:.4f}")
def get_querynode_info(release_name):
querynode_id_pod_pair = {}
querynode_ip_pod_pair = get_pod_ip_name_pairs(
"chaos-testing", f"app.kubernetes.io/instance={release_name}, component=querynode")
ms = MilvusSys()
for node in ms.query_nodes:
ip = node["infos"]['hardware_infos']["ip"].split(":")[0]
querynode_id_pod_pair[node["identifier"]] = querynode_ip_pod_pair[ip]
return querynode_id_pod_pair
class TestChaosBase:
expect_create = constants.SUCC
expect_insert = constants.SUCC
expect_flush = constants.SUCC
expect_index = constants.SUCC
expect_search = constants.SUCC
expect_query = constants.SUCC
host = '127.0.0.1'
port = 19530
_chaos_config = None
health_checkers = {}
class TestChaos(TestChaosBase):
@pytest.fixture(scope="function", autouse=True)
def connection(self, host, port):
connections.add_connection(default={"host": host, "port": port})
connections.connect(alias='default')
if connections.has_connection("default") is False:
raise Exception("no connections")
instance_name = get_milvus_instance_name(constants.CHAOS_NAMESPACE, host)
self.host = host
self.port = port
self.instance_name = instance_name
@pytest.fixture(scope="function", autouse=True)
def init_health_checkers(self):
log.info("init health checkers")
checkers = {
# Op.insert: InsertFlushChecker(collection_name=c_name),
# Op.search: SearchChecker(collection_name=c_name, replica_number=2),
Op.bulk_load: BulkLoadChecker()
# Op.query: QueryChecker(collection_name=c_name, replica_number=2)
}
self.health_checkers = checkers
@pytest.fixture(scope="function", autouse=True)
def prepare_bulk_load(self, nb=1000, row_based=True):
if Op.bulk_load not in self.health_checkers:
log.info("bulk_load checker is not in health checkers, skip prepare bulk load")
return
log.info("bulk_load checker is in health checkers, prepare data firstly")
release_name = self.instance_name
minio_ip_pod_pair = get_pod_ip_name_pairs("chaos-testing", f"release={release_name}, app=minio")
ms = MilvusSys()
minio_ip = list(minio_ip_pod_pair.keys())[0]
minio_port = "9000"
minio_endpoint = f"{minio_ip}:{minio_port}"
bucket_name = ms.index_nodes[0]["infos"]["system_configurations"]["minio_bucket_name"]
schema = cf.gen_default_collection_schema()
data = cf.gen_default_list_data_for_bulk_load(nb=nb)
fields_name = [field.name for field in schema.fields]
if not row_based:
data_dict = dict(zip(fields_name, data))
if row_based:
entities = []
for i in range(nb):
entity_value = [field_values[i] for field_values in data]
entity = dict(zip(fields_name, entity_value))
entities.append(entity)
data_dict = {"rows": entities}
file_name = "bulk_load_data_source.json"
files = [file_name]
#TODO: npy file type is not supported so far
log.info("generate bulk load file")
with open(file_name, "w") as f:
f.write(json.dumps(data_dict))
log.info("upload file to minio")
client = Minio(minio_endpoint, access_key="minioadmin", secret_key="minioadmin", secure=False)
client.fput_object(bucket_name, file_name, file_name)
self.health_checkers[Op.bulk_load].update(schema=schema, files=files, row_based=row_based)
log.info("prepare data for bulk load done")
def teardown(self):
chaos_res = CusResource(kind=self._chaos_config['kind'],
group=constants.CHAOS_GROUP,
version=constants.CHAOS_VERSION,
namespace=constants.CHAOS_NAMESPACE)
meta_name = self._chaos_config.get('metadata', None).get('name', None)
chaos_res.delete(meta_name, raise_ex=False)
sleep(2)
log.info(f'Alive threads: {threading.enumerate()}')
@pytest.mark.tags(CaseLabel.L3)
@pytest.mark.parametrize("target_component", ["minio"]) # "minio", "proxy", "rootcoord", "datacoord", "datanode", "etcd"
@pytest.mark.parametrize("chaos_type", ["pod_kill"]) # "pod_kill", "pod_failure"
def test_bulk_load(self, chaos_type, target_component):
# start the monitor threads to check the milvus ops
log.info("*********************Chaos Test Start**********************")
log.info(connections.get_connection_addr('default'))
release_name = self.instance_name
cc.start_monitor_threads(self.health_checkers)
chaos_config = cc.gen_experiment_config(f"{str(Path(__file__).absolute().parent)}/chaos_objects/{chaos_type}/chaos_{target_component}_{chaos_type}.yaml")
chaos_config['metadata']['name'] = f"test-bulk-load-{int(time.time())}"
kind = chaos_config['kind']
meta_name = chaos_config.get('metadata', None).get('name', None)
update_key_value(chaos_config, "release", release_name)
update_key_value(chaos_config, "app.kubernetes.io/instance", release_name)
self._chaos_config = chaos_config # cache the chaos config for tear down
log.info(f"chaos_config: {chaos_config}")
# wait 20s
sleep(constants.WAIT_PER_OP * 10)
# assert statistic:all ops 100% succ
log.info("******1st assert before chaos: ")
assert_statistic(self.health_checkers)
# apply chaos object
chaos_res = CusResource(kind=chaos_config['kind'],
group=constants.CHAOS_GROUP,
version=constants.CHAOS_VERSION,
namespace=constants.CHAOS_NAMESPACE)
chaos_res.create(chaos_config)
log.info("chaos injected")
sleep(constants.WAIT_PER_OP * 10)
# reset counting
cc.reset_counting(self.health_checkers)
# wait 120s
sleep(constants.CHAOS_DURATION)
log.info(f'Alive threads: {threading.enumerate()}')
# assert statistic
log.info("******2nd assert after chaos injected: ")
assert_statistic(self.health_checkers,
expectations={
Op.bulk_load: constants.FAIL,
})
# delete chaos
chaos_res.delete(meta_name)
log.info("chaos deleted")
sleep(2)
# wait all pods ready
log.info(f"wait for pods in namespace {constants.CHAOS_NAMESPACE} with label app.kubernetes.io/instance={release_name}")
wait_pods_ready(constants.CHAOS_NAMESPACE ,f"app.kubernetes.io/instance={release_name}")
log.info(f"wait for pods in namespace {constants.CHAOS_NAMESPACE} with label release={release_name}")
wait_pods_ready(constants.CHAOS_NAMESPACE, f"release={release_name}")
log.info("all pods are ready")
# reconnect if needed
sleep(constants.WAIT_PER_OP * 2)
cc.reconnect(connections, alias='default')
# recheck failed tasks in third assert
self.health_checkers[Op.bulk_load].recheck_failed_task = True
# reset counting again
cc.reset_counting(self.health_checkers)
# wait 50s (varies by feature)
sleep(constants.WAIT_PER_OP * 10)
# assert statistic: all ops success again
log.info("******3rd assert after chaos deleted: ")
assert_statistic(self.health_checkers)
# assert all expectations
assert_expectations()
log.info("*********************Chaos Test Completed**********************")
```
#### File: python_client/chaos/test_chaos.py
```python
import threading
import pytest
import os
import time
import json
from time import sleep
from pymilvus import connections
from chaos.checker import (CreateChecker, InsertFlushChecker,
SearchChecker, QueryChecker, IndexChecker, Op)
from common.cus_resource_opts import CustomResourceOperations as CusResource
from utils.util_log import test_log as log
from utils.util_k8s import wait_pods_ready, get_pod_list
from utils.util_common import findkeys
from chaos import chaos_commons as cc
from common.common_type import CaseLabel
from chaos import constants
from delayed_assert import expect, assert_expectations
def assert_statistic(checkers, expectations={}):
for k in checkers.keys():
# expect succ if no expectations
succ_rate = checkers[k].succ_rate()
total = checkers[k].total()
average_time = checkers[k].average_time
if expectations.get(k, '') == constants.FAIL:
log.info(f"Expect Fail: {str(k)} succ rate {succ_rate}, total: {total}, average time: {average_time:.4f}")
expect(succ_rate < 0.49 or total < 2,
f"Expect Fail: {str(k)} succ rate {succ_rate}, total: {total}, average time: {average_time:.4f}")
else:
log.info(f"Expect Succ: {str(k)} succ rate {succ_rate}, total: {total}, average time: {average_time:.4f}")
expect(succ_rate > 0.90 or total > 2,
f"Expect Succ: {str(k)} succ rate {succ_rate}, total: {total}, average time: {average_time:.4f}")
def check_cluster_nodes(chaos_config):
# if all pods will be effected, the expect is all fail.
# Even though the replicas is greater than 1, it can not provide HA, so cluster_nodes is set as 1 for this situation.
if "all" in chaos_config["metadata"]["name"]:
return 1
selector = findkeys(chaos_config, "selector")
selector = list(selector)
log.info(f"chaos target selector: {selector}")
# assert len(selector) == 1
selector = selector[0] # chaos yaml file must place the effected pod selector in the first position
namespace = selector["namespaces"][0]
labels_dict = selector["labelSelectors"]
labels_list = []
for k,v in labels_dict.items():
labels_list.append(k+"="+v)
labels_str = ",".join(labels_list)
pods = get_pod_list(namespace, labels_str)
return len(pods)
def record_results(checkers):
res = ""
for k in checkers.keys():
check_result = checkers[k].check_result()
res += f"{str(k):10} {check_result}\n"
return res
class TestChaosBase:
expect_create = constants.SUCC
expect_insert = constants.SUCC
expect_flush = constants.SUCC
expect_index = constants.SUCC
expect_search = constants.SUCC
expect_query = constants.SUCC
host = '127.0.0.1'
port = 19530
_chaos_config = None
health_checkers = {}
def parser_testcase_config(self, chaos_yaml, chaos_config):
cluster_nodes = check_cluster_nodes(chaos_config)
tests_yaml = constants.TESTS_CONFIG_LOCATION + 'testcases.yaml'
tests_config = cc.gen_experiment_config(tests_yaml)
test_collections = tests_config.get('Collections', None)
for t in test_collections:
test_chaos = t.get('testcase', {}).get('chaos', {})
if test_chaos in chaos_yaml:
expects = t.get('testcase', {}).get('expectation', {}).get('cluster_1_node', {})
# for the cluster_n_node
if cluster_nodes > 1:
expects = t.get('testcase', {}).get('expectation', {}).get('cluster_n_node', {})
log.info(f"yaml.expects: {expects}")
self.expect_create = expects.get(Op.create.value, constants.SUCC)
self.expect_insert = expects.get(Op.insert.value, constants.SUCC)
self.expect_flush = expects.get(Op.flush.value, constants.SUCC)
self.expect_index = expects.get(Op.index.value, constants.SUCC)
self.expect_search = expects.get(Op.search.value, constants.SUCC)
self.expect_query = expects.get(Op.query.value, constants.SUCC)
log.info(f"self.expects: create:{self.expect_create}, insert:{self.expect_insert}, "
f"flush:{self.expect_flush}, index:{self.expect_index}, "
f"search:{self.expect_search}, query:{self.expect_query}")
return True
return False
class TestChaos(TestChaosBase):
@pytest.fixture(scope="function", autouse=True)
def connection(self, host, port):
connections.add_connection(default={"host": host, "port": port})
connections.connect(alias='default')
if connections.has_connection("default") is False:
raise Exception("no connections")
self.host = host
self.port = port
@pytest.fixture(scope="function", autouse=True)
def init_health_checkers(self):
checkers = {
Op.create: CreateChecker(),
Op.insert: InsertFlushChecker(),
Op.flush: InsertFlushChecker(flush=True),
Op.index: IndexChecker(),
Op.search: SearchChecker(),
Op.query: QueryChecker()
}
self.health_checkers = checkers
def teardown(self):
chaos_res = CusResource(kind=self._chaos_config['kind'],
group=constants.CHAOS_GROUP,
version=constants.CHAOS_VERSION,
namespace=constants.CHAOS_NAMESPACE)
meta_name = self._chaos_config.get('metadata', None).get('name', None)
chaos_res.delete(meta_name, raise_ex=False)
sleep(2)
log.info(f'Alive threads: {threading.enumerate()}')
@pytest.mark.tags(CaseLabel.L3)
@pytest.mark.parametrize('chaos_yaml', cc.get_chaos_yamls())
def test_chaos(self, chaos_yaml):
# start the monitor threads to check the milvus ops
log.info("*********************Chaos Test Start**********************")
log.info(f"chaos_yaml: {chaos_yaml}")
log.info(connections.get_connection_addr('default'))
cc.start_monitor_threads(self.health_checkers)
# parse chaos object
chaos_config = cc.gen_experiment_config(chaos_yaml)
release_name = constants.RELEASE_NAME
log.info(f"release_name: {release_name}")
chaos_config['metadata']['name'] = release_name
kind = chaos_config['kind']
meta_name = chaos_config.get('metadata', None).get('name', None)
chaos_config_str = json.dumps(chaos_config)
chaos_config_str = chaos_config_str.replace("milvus-chaos", release_name)
chaos_config = json.loads(chaos_config_str)
self._chaos_config = chaos_config # cache the chaos config for tear down
log.info(f"chaos_config: {chaos_config}")
# parse the test expectations in testcases.yaml
if self.parser_testcase_config(chaos_yaml, chaos_config) is False:
log.error("Fail to get the testcase info in testcases.yaml")
assert False
# init report
dir_name = "./reports"
file_name = f"./reports/{meta_name}.log"
if not os.path.exists(dir_name):
os.makedirs(dir_name)
# wait 20s
sleep(constants.WAIT_PER_OP * 2)
# assert statistic:all ops 100% succ
log.info("******1st assert before chaos: ")
assert_statistic(self.health_checkers)
try:
with open(file_name, "a+") as f:
ts = time.strftime("%Y-%m-%d %H:%M:%S")
f.write(f"{meta_name}-{ts}\n")
f.write("1st assert before chaos:\n")
f.write(record_results(self.health_checkers))
except Exception as e:
log.info(f"Fail to write to file: {e}")
# apply chaos object
chaos_res = CusResource(kind=chaos_config['kind'],
group=constants.CHAOS_GROUP,
version=constants.CHAOS_VERSION,
namespace=constants.CHAOS_NAMESPACE)
chaos_res.create(chaos_config)
log.info("chaos injected")
# verify the chaos is injected
log.info(f"kubectl get {kind} {meta_name} -n {constants.CHAOS_NAMESPACE}")
os.system(f"kubectl get {kind} {meta_name} -n {constants.CHAOS_NAMESPACE}")
sleep(constants.WAIT_PER_OP * 2)
# reset counting
cc.reset_counting(self.health_checkers)
# wait 120s
sleep(constants.CHAOS_DURATION)
log.info(f'Alive threads: {threading.enumerate()}')
# assert statistic
log.info("******2nd assert after chaos injected: ")
assert_statistic(self.health_checkers,
expectations={Op.create: self.expect_create,
Op.insert: self.expect_insert,
Op.flush: self.expect_flush,
Op.index: self.expect_index,
Op.search: self.expect_search,
Op.query: self.expect_query
})
try:
with open(file_name, "a+") as f:
f.write("2nd assert after chaos injected:\n")
f.write(record_results(self.health_checkers))
except Exception as e:
log.error(f"Fail to write the report: {e}")
# delete chaos
chaos_res.delete(meta_name)
log.info("chaos deleted")
# verify the chaos is deleted
log.info(f"kubectl get {kind} {meta_name} -n {constants.CHAOS_NAMESPACE}")
os.system(f"kubectl get {kind} {meta_name} -n {constants.CHAOS_NAMESPACE}")
log.info(f'Alive threads: {threading.enumerate()}')
sleep(2)
# wait all pods ready
log.info(f"wait for pods in namespace {constants.CHAOS_NAMESPACE} with label app.kubernetes.io/instance={meta_name}")
wait_pods_ready(constants.CHAOS_NAMESPACE, f"app.kubernetes.io/instance={meta_name}")
log.info(f"wait for pods in namespace {constants.CHAOS_NAMESPACE} with label release={meta_name}")
wait_pods_ready(constants.CHAOS_NAMESPACE, f"release={meta_name}")
log.info("all pods are ready")
# reconnect if needed
sleep(constants.WAIT_PER_OP * 2)
cc.reconnect(connections, alias='default')
# reset counting again
cc.reset_counting(self.health_checkers)
# wait 50s (varies by feature)
sleep(constants.WAIT_PER_OP * 5)
# assert statistic: all ops success again
log.info("******3rd assert after chaos deleted: ")
assert_statistic(self.health_checkers)
try:
with open(file_name, "a+") as f:
f.write("3rd assert after chaos deleted:\n")
f.write(record_results(self.health_checkers))
except Exception as e:
log.info(f"Fail to write the report: {e}")
# assert all expectations
assert_expectations()
log.info("*********************Chaos Test Completed**********************")
```
#### File: python_client/common/milvus_sys.py
```python
import ujson
import json
from pymilvus.grpc_gen import milvus_pb2 as milvus_types
from pymilvus import connections
sys_info_req = ujson.dumps({"metric_type": "system_info"})
sys_statistics_req = ujson.dumps({"metric_type": "system_statistics"})
sys_logs_req = ujson.dumps({"metric_type": "system_logs"})
class MilvusSys:
def __init__(self, alias='default'):
self.alias = alias
self.handler = connections._fetch_handler(alias=self.alias)
if self.handler is None:
raise Exception(f"Connection {alias} is disconnected or nonexistent")
# TODO: for now it only supports non_orm style API for getMetricsRequest
req = milvus_types.GetMetricsRequest(request=sys_info_req)
self.sys_info = self.handler._stub.GetMetrics(req, wait_for_ready=True, timeout=None)
req = milvus_types.GetMetricsRequest(request=sys_statistics_req)
self.sys_statistics = self.handler._stub.GetMetrics(req, wait_for_ready=True, timeout=None)
req = milvus_types.GetMetricsRequest(request=sys_logs_req)
self.sys_logs = self.handler._stub.GetMetrics(req, wait_for_ready=True, timeout=None)
@property
def build_version(self):
"""get the first node's build version as milvus build version"""
return self.nodes[0].get('infos').get('system_info').get('system_version')
@property
def build_time(self):
"""get the first node's build time as milvus build time"""
return self.nodes[0].get('infos').get('system_info').get('build_time')
@property
def deploy_mode(self):
"""get the first node's deploy_mode as milvus deploy_mode"""
return self.nodes[0].get('infos').get('system_info').get('deploy_mode')
@property
def simd_type(self):
"""
get simd type that milvus is running against
return the first query node's simd type
"""
for node in self.query_nodes:
return node.get('infos').get('system_configurations').get('simd_type')
raise Exception("No query node found")
@property
def query_nodes(self):
"""get all query nodes in Milvus deployment"""
query_nodes = []
for node in self.nodes:
if 'querynode' == node.get('infos').get('type'):
query_nodes.append(node)
return query_nodes
@property
def data_nodes(self):
"""get all data nodes in Milvus deployment"""
data_nodes = []
for node in self.nodes:
if 'datanode' == node.get('infos').get('type'):
data_nodes.append(node)
return data_nodes
@property
def index_nodes(self):
"""get all index nodes in Milvus deployment"""
index_nodes = []
for node in self.nodes:
if 'indexnode' == node.get('infos').get('type'):
index_nodes.append(node)
return index_nodes
@property
def proxy_nodes(self):
"""get all proxy nodes in Milvus deployment"""
proxy_nodes = []
for node in self.nodes:
if 'proxy' == node.get('infos').get('type'):
proxy_nodes.append(node)
return proxy_nodes
@property
def nodes(self):
"""get all the nodes in Milvus deployment"""
all_nodes = json.loads(self.sys_info.response).get('nodes_info')
online_nodes = [node for node in all_nodes if node["infos"]["has_error"] is False]
return online_nodes
def get_nodes_by_type(self, node_type=None):
"""get milvus nodes by node type"""
target_nodes = []
if node_type is not None:
for node in self.nodes:
if str(node_type).lower() == str(node.get('infos').get('type')).lower():
target_nodes.append(node)
return target_nodes
if __name__ == '__main__':
connections.connect(host="10.96.250.111", port="19530")
ms = MilvusSys()
```
#### File: python_client/testcases/test_e2e.py
```python
import time
import pytest
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel
from utils.util_log import test_log as log
prefix = "e2e_"
class TestE2e(TestcaseBase):
""" Test case of end to end"""
@pytest.mark.tags(CaseLabel.L1)
def test_milvus_default(self):
# create
name = cf.gen_unique_str(prefix)
t0 = time.time()
collection_w = self.init_collection_wrap(name=name, active_trace=True)
tt = time.time() - t0
assert collection_w.name == name
entities = collection_w.num_entities
log.info(f"assert create collection: {tt}, init_entities: {entities}")
# insert
data = cf.gen_default_list_data()
t0 = time.time()
_, res = collection_w.insert(data)
tt = time.time() - t0
log.info(f"assert insert: {tt}")
assert res
# flush
t0 = time.time()
num_entities, check_result = collection_w.flush(timeout=180)
assert check_result
assert num_entities == len(data[0]) + entities
tt = time.time() - t0
entities = collection_w.num_entities
log.info(f"assert flush: {tt}, entities: {entities}")
# search
collection_w.load()
search_vectors = cf.gen_vectors(1, ct.default_dim)
search_params = {"metric_type": "L2", "params": {"nprobe": 16}}
t0 = time.time()
res_1, _ = collection_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
param=search_params, limit=1)
tt = time.time() - t0
log.info(f"assert search: {tt}")
assert len(res_1) == 1
collection_w.release()
# index
d = cf.gen_default_list_data()
collection_w.insert(d)
log.info(f"assert index entities: {collection_w.num_entities}")
_index_params = {"index_type": "IVF_SQ8", "params": {"nlist": 64}, "metric_type": "L2"}
t0 = time.time()
index, _ = collection_w.create_index(field_name=ct.default_float_vec_field_name,
index_params=_index_params,
name=cf.gen_unique_str())
tt = time.time() - t0
log.info(f"assert index: {tt}")
assert len(collection_w.indexes) == 1
# search
t0 = time.time()
collection_w.load()
tt = time.time() - t0
log.info(f"assert load: {tt}")
search_vectors = cf.gen_vectors(1, ct.default_dim)
t0 = time.time()
res_1, _ = collection_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
param=search_params, limit=1)
tt = time.time() - t0
log.info(f"assert search: {tt}")
# query
term_expr = f'{ct.default_int64_field_name} in [1001,1201,4999,2999]'
t0 = time.time()
res, _ = collection_w.query(term_expr)
tt = time.time() - t0
log.info(f"assert query result {len(res)}: {tt}")
```
#### File: python_client/testcases/test_insert.py
```python
from ssl import ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY
import threading
import numpy as np
import pandas as pd
import random
import pytest
from pymilvus import Index, DataType
from pymilvus.exceptions import MilvusException
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "insert"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
default_search_exp = "int64 >= 0"
class TestInsertParams(TestcaseBase):
""" Test case of Insert interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_data_type(self, request):
if isinstance(request.param, list) or request.param is None:
pytest.skip("list and None type is valid data type")
yield request.param
@pytest.fixture(scope="module", params=ct.get_invalid_strs)
def get_invalid_field_name(self, request):
if isinstance(request.param, (list, dict)):
pytest.skip()
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dataframe_data(self):
"""
target: test insert DataFrame data
method: 1.create collection
2.insert dataframe data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_list_data(self):
"""
target: test insert list-like data
method: 1.create 2.insert list data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_non_data_type(self, get_non_data_type):
"""
target: test insert with non-dataframe, non-list data
method: insert with data (non-dataframe and non-list type)
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "Data type is not support"}
collection_w.insert(data=get_non_data_type, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("data", [[], pd.DataFrame()])
def test_insert_empty_data(self, data):
"""
target: test insert empty data
method: insert empty
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "The data fields number is not match with schema"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_dataframe_only_columns(self):
"""
target: test insert with dataframe just columns
method: dataframe just have columns
expected: num entities is zero
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_empty_field_name_dataframe(self):
"""
target: test insert empty field name df
method: dataframe with empty column
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: ' '}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_invalid_field_name_dataframe(self, get_invalid_field_name):
"""
target: test insert with invalid dataframe data
method: insert with invalid field name dataframe
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: get_invalid_field_name}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
def test_insert_dataframe_index(self):
"""
target: test insert dataframe with index
method: insert dataframe with index
expected: todo
"""
pass
@pytest.mark.tags(CaseLabel.L2)
def test_insert_none(self):
"""
target: test insert None
method: data is None
expected: return successfully with zero results
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
mutation_res, _ = collection_w.insert(data=None)
assert mutation_res.insert_count == 0
assert len(mutation_res.primary_keys) == 0
assert collection_w.is_empty
assert collection_w.num_entities == 0
@pytest.mark.tags(CaseLabel.L1)
def test_insert_numpy_data(self):
"""
target: test insert numpy.ndarray data
method: 1.create by schema 2.insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_numpy_data(nb=10)
collection_w.insert(data=data)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_dataframe(self):
"""
target: test insert binary dataframe
method: 1. create by schema 2. insert dataframe
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_data(self):
"""
target: test insert list-like binary data
method: 1. create by schema 2. insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
data, _ = cf.gen_default_binary_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_single(self):
"""
target: test insert single
method: insert one entity
expected: verify num
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=1)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == 1
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == 1
@pytest.mark.tags(CaseLabel.L2)
def test_insert_dim_not_match(self):
"""
target: test insert with not match dim
method: insert data dim not equal to schema dim
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
dim = 129
df = cf.gen_default_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_dim_not_match(self):
"""
target: test insert binary with dim not match
method: insert binary data dim not equal to schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
dim = 120
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_field_name_not_match(self):
"""
target: test insert field name not match
method: data field name not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_float_field_name: "int"}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_field_value_not_match(self):
"""
target: test insert data value not match
method: insert data value type not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
df = cf.gen_default_dataframe_data(nb)
new_float_value = pd.Series(data=[float(i) for i in range(nb)], dtype="float64")
df.iloc[:, 1] = new_float_value
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_value_less(self):
"""
target: test insert value less than other
method: int field value less than vec-field value
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb - 1)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_vector_value_less(self):
"""
target: test insert vector value less than other
method: vec field value less than int field
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb - 1, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_fields_more(self):
"""
target: test insert with fields more
method: field more than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
new_values = [i for i in range(ct.default_nb)]
df.insert(3, 'new', new_values)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_fields_less(self):
"""
target: test insert with fields less
method: fields less than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
df.drop(ct.default_float_vec_field_name, axis=1, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_list_order_inconsistent_schema(self):
"""
target: test insert data fields order inconsistent with schema
method: insert list data, data fields order inconsistent with schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [float_values, int_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_dataframe_order_inconsistent_schema(self):
"""
target: test insert with dataframe fields inconsistent with schema
method: insert dataframe, and fields order inconsistent with schema
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = pd.Series(data=[i for i in range(nb)])
float_values = pd.Series(data=[float(i) for i in range(nb)], dtype="float32")
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
df = pd.DataFrame({
ct.default_float_field_name: float_values,
ct.default_float_vec_field_name: float_vec_values,
ct.default_int64_field_name: int_values
})
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_inconsistent_data(self):
"""
target: test insert with inconsistent data
method: insert with data that same field has different type data
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=100)
data[0][1] = 1.0
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
collection_w.insert(data, check_task=CheckTasks.err_res, check_items=error)
class TestInsertOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert interface operations
******************************************************************
"""
@pytest.fixture(scope="function", params=[8, 4096])
def dim(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connection(self):
"""
target: test insert without connection
method: insert after remove connection
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
data = cf.gen_default_list_data(10)
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/12680")
@pytest.mark.parametrize("vec_fields", [[cf.gen_float_vec_field(name="float_vector1")],
[cf.gen_binary_vec_field()],
[cf.gen_binary_vec_field(), cf.gen_binary_vec_field("binary_vec")]])
def test_insert_multi_float_vec_fields(self, vec_fields):
"""
target: test insert into multi float vec fields collection
method: create collection with different schema and insert
expected: verify num entities
"""
schema = cf.gen_schema_multi_vector_fields(vec_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_drop_collection(self):
"""
target: test insert and drop
method: insert data and drop collection
expected: verify collection if exist
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name in collection_list
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
collection_w.drop()
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name not in collection_list
@pytest.mark.tags(CaseLabel.L1)
def test_insert_create_index(self):
"""
target: test insert and create index
method: 1. insert 2. create index
expected: verify num entities and index
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L1)
def test_insert_after_create_index(self):
"""
target: test insert after create index
method: 1. create index 2. insert data
expected: verify index and num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_after_index(self):
"""
target: test insert binary after index
method: 1.create index 2.insert binary data
expected: 1.index ok 2.num entities correct
"""
schema = cf.gen_default_binary_collection_schema()
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
collection_w.create_index(ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.indexes[0] == index
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_create_index(self):
"""
target: test create index in auto_id=True collection
method: 1.create auto_id=True collection and insert
2.create index
expected: index correct
"""
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
# create index
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert without ids
expected: verify primary_keys and num_entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_twice_auto_id_true(self):
"""
target: test insert ids fields twice when auto_id=True
method: 1.create collection with auto_id=True 2.insert twice
expected: verify primary_keys unique
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
nb = 10
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
primary_keys = mutation_res.primary_keys
assert cf._check_primary_keys(primary_keys, nb)
mutation_res_1, _ = collection_w.insert(data=df)
primary_keys.extend(mutation_res_1.primary_keys)
assert cf._check_primary_keys(primary_keys, nb * 2)
assert collection_w.num_entities == nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_list_data(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert list data with ids field values
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data()
mutation_res, _ = collection_w.insert(data=data[1:])
assert mutation_res.insert_count == ct.default_nb
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true_with_dataframe_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'Auto_id is True, primary field should not have data'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_with_list_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_same_values(self):
"""
target: test insert same ids with auto_id false
method: 1.create collection with auto_id=False 2.insert same int64 field values
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb=nb)
data[0] = [1 for i in range(nb)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == data[0]
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_negative_values(self):
"""
target: test insert negative ids with auto_id false
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb)
data[0] = [i for i in range(0, -nb, -1)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="issue 15416")
def test_insert_multi_threading(self):
"""
target: test concurrent insert
method: multi threads insert
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
thread_num = 4
threads = []
primary_keys = df[ct.default_int64_field_name].values.tolist()
def insert(thread_i):
log.debug(f'In thread-{thread_i}')
mutation_res, _ = collection_w.insert(df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == primary_keys
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for t in threads:
t.join()
assert collection_w.num_entities == ct.default_nb * thread_num
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="Currently primary keys are not unique")
def test_insert_multi_threading_auto_id(self):
"""
target: test concurrent insert auto_id=True collection
method: 1.create auto_id=True collection 2.concurrent insert
expected: verify primary keys unique
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_insert_multi_times(self, dim):
"""
target: test insert multi times
method: insert data multi times
expected: verify num entities
"""
step = 120
nb = 12000
collection_w = self.init_collection_general(prefix, dim=dim)[0]
for _ in range(nb // step):
df = cf.gen_default_dataframe_data(step, dim)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == step
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_all_datatype_collection(self):
"""
target: test insert into collection that contains all datatype fields
method: 1.create all datatype collection 2.insert data
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_dataframe_all_data_type(nb=nb)
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == nb
class TestInsertAsync(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert async
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_sync(self):
"""
target: test async insert
method: insert with async=True
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_false(self):
"""
target: test insert with false async
method: async = false
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
mutation_res, _ = collection_w.insert(data=df, _async=False)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_callback(self):
"""
target: test insert with callback func
method: insert with callback func
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result)
future.done()
mutation_res = future.result()
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self):
"""
target: test insert with async
method: insert 5w entities with callback func
expected: verify num entities
"""
nb = 50000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self):
"""
target: test insert async with callback
method: insert 10w entities with timeout=1
expected: raise exception
"""
nb = 100000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True, _callback=None, timeout=0.2)
with pytest.raises(MilvusException):
future.result()
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_data(self):
"""
target: test insert async with invalid data
method: insert async with invalid data
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, _async=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_partition(self):
"""
target: test insert async with invalid partition
method: insert async with invalid partition
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
err_msg = "partitionID of partitionName:p can not be find"
future, _ = collection_w.insert(data=df, partition_name="p", _async=True)
future.done()
with pytest.raises(MilvusException, match=err_msg):
future.result()
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_no_vectors_raise_exception(self):
"""
target: test insert vectors with no vectors
method: set only vector field and insert into collection
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "fleldSchema lack of vector field."}
future, _ = collection_w.insert(data=df, _async=True, check_task=CheckTasks.err_res, check_items=error)
def assert_mutation_result(mutation_res):
assert mutation_res.insert_count == ct.default_nb
class TestInsertOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert interface operations
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_default_partition(self):
"""
target: test insert entities into default partition
method: create partition and insert info collection
expected: the collection insert count equals to nb
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_w1 = self.init_partition_wrap(collection_w)
data = cf.gen_default_list_data(nb=ct.default_nb)
mutation_res, _ = collection_w.insert(data=data, partition_name=partition_w1.name)
assert mutation_res.insert_count == ct.default_nb
def test_insert_partition_not_existed(self):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb=ct.default_nb)
error = {ct.err_code: 1, ct.err_msg: "partitionID of partitionName:p can not be existed"}
mutation_res, _ = collection_w.insert(data=df, partition_name="p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_partition_repeatedly(self):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it repeatedly, with the partition_name param
expected: the collection row count equals to nq
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
df = cf.gen_default_dataframe_data(nb=ct.default_nb)
mutation_res, _ = collection_w.insert(data=df, partition_name=partition_w1.name)
new_res, _ = collection_w.insert(data=df, partition_name=partition_w2.name)
assert mutation_res.insert_count == ct.default_nb
assert new_res.insert_count == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self):
"""
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection insert count equals to nq
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_name = cf.gen_unique_str(prefix)
partition_w1 = self.init_partition_wrap(collection_w, partition_name=partition_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df, partition_name=partition_w1.name)
assert mutation_res.insert_count == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self):
"""
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_collection_schema_all_datatype
error = {ct.err_code: 0, ct.err_msg: "Data type is not support"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_lack_vector_field(self):
"""
target: test insert entities, with no vector field
method: remove entity values of vector field
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_vector_field_dtype(self):
"""
target: test insert entities, with vector field type is error
method: vector field dtype is not existed
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
vec_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.NONE)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
df =[field_one, field_two, vec_field]
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType."}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_vector_field_name(self):
"""
target: test insert entities, with no vector field name
method: vector field name is error
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
vec_field = cf.gen_float_vec_field(name=ct.get_invalid_strs)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
df =[field_one, field_two, vec_field]
error = {ct.err_code: 0, ct.err_msg: "Data type is not support."}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
class TestInsertBinary(TestcaseBase):
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self):
"""
target: test insert entities and create partition
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
partition_name = cf.gen_unique_str(prefix)
partition_w1 = self.init_partition_wrap(collection_w, partition_name=partition_name)
mutation_res, _ =collection_w.insert(data=df, partition_name=partition_w1.name)
assert mutation_res.insert_count == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_multi_times(self):
"""
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi
expected: the collection row count equals to nb
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
nums = 2
for i in range(nums):
mutation_res, _ = collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb*nums
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self):
"""
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
class TestInsertInvalid(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert invalid params
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self):
"""
target: test insert, with using auto id is invaild, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
int_field = cf.gen_float_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
df =[int_field, vec_field]
error = {ct.err_code: 0, ct.err_msg: "Primary key type must be DataType.INT64."}
mutation_res, _ =collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
df = cf.gen_default_list_data(ct.default_nb)
error={ct.err_code: 1, 'err_msg': "partition name is illegal"}
mutation_res, _ = collection_w.insert(data=df, partition_name="p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_with_invalid_field_value(self):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
vec_field = ct.get_invalid_vectors
df =[field_one, field_two, vec_field]
error = {ct.err_code: 0, ct.err_msg: "The field of schema type must be FieldSchema."}
mutation_res, _ = collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
class TestInsertInvalidBinary(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert invalid params of binary
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_ids_binary_invalid(self):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
field_one = cf.gen_float_field(is_primary=True)
field_two = cf.gen_float_field()
vec_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_binary_vec_field_name, dtype=DataType.BINARY_VECTOR)
df = [field_one, field_two, vec_field]
error = {ct.err_code: 0, ct.err_msg: "Data type is not support."}
mutation_res, _ = collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_binary_partition_name(self):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
partition_name = ct.get_invalid_strs
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
error = {ct.err_code: 1, 'err_msg': "The types of schema and data do not match."}
mutation_res, _ = collection_w.insert(data=df, partition_name=partition_name, check_task=CheckTasks.err_res, check_items=error)
class TestInsertString(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert string
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_insert_string_field_is_primary(self):
"""
target: test insert string is primary
method: 1.create a collection and string field is primary
2.insert string field data
expected: Insert Successfully
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_string_pk_default_collection_schema()
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[2]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("string_fields", [[cf.gen_string_field(name="string_field1")],
[cf.gen_string_field(name="string_field2")],
[cf.gen_string_field(name="string_field3")]])
def test_insert_multi_string_fields(self, string_fields):
"""
target: test insert multi string fields
method: 1.create a collection
2.Insert multi string fields
expected: Insert Successfully
"""
schema = cf.gen_schema_multi_string_fields(string_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_string_fields(string_fields=string_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_string_field_invalid_data(self):
"""
target: test insert string field data is not match
method: 1.create a collection
2.Insert string field data is not match
expected: Raise exceptions
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
df = cf.gen_default_dataframe_data(nb)
new_float_value = pd.Series(data=[float(i) for i in range(nb)], dtype="float64")
df.iloc[:, 2] = new_float_value
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_string_field_name_invalid(self):
"""
target: test insert string field name is invaild
method: 1.create a collection
2.Insert string field name is invalid
expected: Raise exceptions
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = [cf.gen_int64_field(),cf.gen_string_field(name=ct.get_invalid_strs), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: 'Data type is not support.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_string_field_length_exceed(self):
"""
target: test insert string field exceed the maximum length
method: 1.create a collection
2.Insert string field length is exceeded maximum value of 65535
expected: Raise exceptions
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nums = 70000
field_one = cf.gen_int64_field()
field_two = cf.gen_float_field()
field_three = cf.gen_string_field(max_length=nums)
vec_field = cf.gen_float_vec_field()
df = [field_one, field_two, field_three, vec_field]
error = {ct.err_code: 0, ct.err_msg: 'Data type is not support.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_string_field_dtype_invalid(self):
"""
target: test insert string field with invaild dtype
method: 1.create a collection
2.Insert string field dtype is invalid
expected: Raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
string_field = self.field_schema_wrap.init_field_schema(name="string", dtype=DataType.STRING)[0]
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field()
df = [string_field, int_field, vec_field]
error = {ct.err_code: 0, ct.err_msg: 'Data type is not support.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_string_field_auto_id_is_true(self):
"""
target: test create collection with string field
method: 1.create a collection
2.Insert string field with auto id is true
expected: Raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
int_field = cf.gen_int64_field()
vec_field = cf.gen_float_vec_field()
string_field = cf.gen_string_field(is_primary=True, auto_id=True)
df = [int_field, string_field, vec_field]
error = {ct.err_code: 0, ct.err_msg: 'Data type is not support.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
``` |
{
"source": "aakgun/pytorch-VideoDataset",
"score": 3
} |
#### File: pytorch-VideoDataset/models/R2Plus1D.py
```python
import math
import torch.nn as nn
from torch.nn.modules.utils import _triple
class SpatioTemporalConv(nn.Module):
r"""Applies a factored 3D convolution over an input signal composed of several input
planes with distinct spatial and time axes, by performing a 2D convolution over the
spatial axes to an intermediate subspace, followed by a 1D convolution over the time
axis to produce the final output.
Args:
in_channels (int): Number of channels in the input tensor
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to the sides of the input during their respective convolutions. Default: 0
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False, first_conv=False):
super(SpatioTemporalConv, self).__init__()
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
spatial_kernel_size = (1, kernel_size[1], kernel_size[2])
spatial_stride = (1, stride[1], stride[2])
spatial_padding = (0, padding[1], padding[2])
temporal_kernel_size = (kernel_size[0], 1, 1)
temporal_stride = (stride[0], 1, 1)
temporal_padding = (padding[0], 0, 0)
if first_conv:
intermed_channels = 45
else:
intermed_channels = int(math.floor(
(kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels) / (
kernel_size[1] * kernel_size[2] * in_channels + kernel_size[0] * out_channels)))
self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
stride=spatial_stride, padding=spatial_padding, bias=bias)
self.bn1 = nn.BatchNorm3d(intermed_channels)
self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size,
stride=temporal_stride, padding=temporal_padding, bias=bias)
self.bn2 = nn.BatchNorm3d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.relu(self.bn1(self.spatial_conv(x)))
x = self.relu(self.bn2(self.temporal_conv(x)))
return x
class ResBlock(nn.Module):
r"""Single block for the ResNet network. Uses SpatioTemporalConv in
the standard ResNet block layout (conv->batchnorm->ReLU->conv->batchnorm->sum->ReLU)
Args:
in_channels (int): Number of channels in the input tensor
out_channels (int): Number of channels in the output produced by the block
kernel_size (int or tuple): Size of the convolving kernels
downsample (bool, optional): If ``True``, the output size is to be smaller than the input. Default: ``False``
"""
def __init__(self, in_channels, out_channels, kernel_size, downsample=False):
super(ResBlock, self).__init__()
self.downsample = downsample
padding = kernel_size // 2
if self.downsample:
self.conv1 = SpatioTemporalConv(in_channels, out_channels, kernel_size, padding=padding, stride=2)
self.downsampleconv = SpatioTemporalConv(in_channels, out_channels, 1, stride=2)
self.downsamplebn = nn.BatchNorm3d(out_channels)
else:
self.conv1 = SpatioTemporalConv(in_channels, out_channels, kernel_size, padding=padding)
self.bn1 = nn.BatchNorm3d(out_channels)
self.conv2 = SpatioTemporalConv(out_channels, out_channels, kernel_size, padding=padding)
self.bn2 = nn.BatchNorm3d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
res = self.relu(self.bn1(self.conv1(x)))
res = self.bn2(self.conv2(res))
if self.downsample:
x = self.downsamplebn(self.downsampleconv(x))
return self.relu(x + res)
class ResLayer(nn.Module):
r"""Forms a single layer of the ResNet network, with a number of repeating
blocks of same output size stacked on top of each other
Args:
in_channels (int): Number of channels in the input tensor
out_channels (int): Number of channels in the output produced by the layer
kernel_size (int or tuple): Size of the convolving kernels
layer_size (int): Number of blocks to be stacked to form the layer
downsample (bool, optional): If ``True``, the first block in layer will implement downsampling. Default: ``False``
"""
def __init__(self, in_channels, out_channels, kernel_size, layer_size, downsample=False):
super(ResLayer, self).__init__()
# implement the first block
self.block1 = ResBlock(in_channels, out_channels, kernel_size, downsample)
# prepare module list to hold all (layer_size - 1) blocks
self.blocks = nn.ModuleList([])
for i in range(layer_size - 1):
# all these blocks are identical, and have downsample = False by default
self.blocks += [ResBlock(out_channels, out_channels, kernel_size)]
def forward(self, x):
x = self.block1(x)
for block in self.blocks:
x = block(x)
return x
class FeatureLayer(nn.Module):
r"""Forms the overall ResNet feature extractor by initializng 5 layers, with the number of blocks in
each layer set by layer_sizes, and by performing a global average pool at the end producing a
512-dimensional vector for each element in the batch.
Args:
layer_sizes (tuple): An iterable containing the number of blocks in each layer
"""
def __init__(self, layer_sizes, input_channel=3):
super(FeatureLayer, self).__init__()
self.conv1 = SpatioTemporalConv(input_channel, 64, (1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3),
first_conv=True)
self.conv2 = ResLayer(64, 64, 3, layer_sizes[0])
self.conv3 = ResLayer(64, 128, 3, layer_sizes[1], downsample=True)
self.conv4 = ResLayer(128, 256, 3, layer_sizes[2], downsample=True)
self.conv5 = ResLayer(256, 512, 3, layer_sizes[3], downsample=True)
# global average pooling of the output
self.pool = nn.AdaptiveAvgPool3d(1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool(x)
return x.view(-1, 512)
class R2Plus1D(nn.Module):
r"""Forms a complete ResNet classifier producing vectors of size num_classes, by initializng 5 layers,
with the number of blocks in each layer set by layer_sizes, and by performing a global average pool
at the end producing a 512-dimensional vector for each element in the batch,
and passing them through a Linear layer.
Args:
num_classes(int): Number of classes in the data
layer_sizes (tuple): An iterable containing the number of blocks in each layer
"""
def __init__(self, num_classes, layer_sizes, input_channel=3):
super(R2Plus1D, self).__init__()
self.feature = FeatureLayer(layer_sizes, input_channel)
self.fc = nn.Linear(512, num_classes)
self.__init_weight()
def forward(self, x):
x = self.feature(x)
logits = self.fc(x)
return logits
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
```
#### File: aakgun/pytorch-VideoDataset/transforms.py
```python
import torch
import torchvision
import numpy as np
import PIL
import collections
import random
import cv2
import os
__all__ = ['VideoFilePathToTensor', 'VideoFolderPathToTensor', 'VideoResize', 'VideoRandomCrop', 'VideoCenterCrop', 'VideoRandomHorizontalFlip',
'VideoRandomVerticalFlip', 'VideoGrayscale']
class VideoFilePathToTensor(object):
""" load video at given file path to torch.Tensor (C x L x H x W, C = 3)
It can be composed with torchvision.transforms.Compose().
Args:
max_len (int): Maximum output time depth (L <= max_len). Default is None.
If it is set to None, it will output all frames.
fps (int): sample frame per seconds. It must lower than or equal the origin video fps.
Default is None.
padding_mode (str): Type of padding. Default to None. Only available when max_len is not None.
- None: won't padding, video length is variable.
- 'zero': padding the rest empty frames to zeros.
- 'last': padding the rest empty frames to the last frame.
"""
def __init__(self, max_len=None, fps=None, padding_mode=None):
self.max_len = max_len
self.fps = fps
assert padding_mode in (None, 'zero', 'last')
self.padding_mode = padding_mode
self.channels = 3 # only available to read 3 channels video
def __call__(self, path):
"""
Args:
path (str): path of video file.
Returns:
torch.Tensor: Video Tensor (C x L x H x W)
"""
# open video file
cap = cv2.VideoCapture(path)
assert(cap.isOpened())
# calculate sample_factor to reset fps
sample_factor = 1
if self.fps:
old_fps = cap.get(cv2.CAP_PROP_FPS) # fps of video
sample_factor = int(old_fps / self.fps)
assert(sample_factor >= 1)
# init empty output frames (C x L x H x W)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
time_len = None
if self.max_len:
# time length has upper bound
if self.padding_mode:
# padding all video to the same time length
time_len = self.max_len
else:
# video have variable time length
time_len = min(int(num_frames / sample_factor), self.max_len)
else:
# time length is unlimited
time_len = int(num_frames / sample_factor)
frames = torch.FloatTensor(self.channels, time_len, height, width)
for index in range(time_len):
frame_index = sample_factor * index
# read frame
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
ret, frame = cap.read()
if ret:
# successfully read frame
# BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame)
# (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
frames[:, index, :, :] = frame.float()
else:
# reach the end of the video
if self.padding_mode == 'zero':
# fill the rest frames with 0.0
frames[:, index:, :, :] = 0
elif self.padding_mode == 'last':
# fill the rest frames with the last frame
assert(index > 0)
frames[:, index:, :, :] = frames[:, index-1, :, :].view(self.channels, 1, height, width)
break
frames /= 255
cap.release()
return frames
class VideoFolderPathToTensor(object):
""" load video at given folder path to torch.Tensor (C x L x H x W)
It can be composed with torchvision.transforms.Compose().
Args:
max_len (int): Maximum output time depth (L <= max_len). Default is None.
If it is set to None, it will output all frames.
padding_mode (str): Type of padding. Default to None. Only available when max_len is not None.
- None: won't padding, video length is variable.
- 'zero': padding the rest empty frames to zeros.
- 'last': padding the rest empty frames to the last frame.
"""
def __init__(self, max_len=None, padding_mode=None):
self.max_len = max_len
assert padding_mode in (None, 'zero', 'last')
self.padding_mode = padding_mode
def __call__(self, path):
"""
Args:
path (str): path of video folder.
Returns:
torch.Tensor: Video Tensor (C x L x H x W)
"""
# get video properity
frames_path = sorted([os.path.join(path,f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))])
frame = cv2.imread(frames_path[0])
height, width, channels = frame.shape
num_frames = len(frames_path)
# init empty output frames (C x L x H x W)
time_len = None
if self.max_len:
# time length has upper bound
if self.padding_mode:
# padding all video to the same time length
time_len = self.max_len
else:
# video have variable time length
time_len = min(num_frames, self.max_len)
else:
# time length is unlimited
time_len = num_frames
frames = torch.FloatTensor(channels, time_len, height, width)
# load the video to tensor
for index in range(time_len):
if index < num_frames:
# frame exists
# read frame
frame = cv2.imread(frames_path[index])
# BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame)
# (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
frames[:, index, :, :] = frame.float()
else:
# reach the end of the video
if self.padding_mode == 'zero':
# fill the rest frames with 0.0
frames[:, index:, :, :] = 0
elif self.padding_mode == 'last':
# fill the rest frames with the last frame
assert(index > 0)
frames[:, index:, :, :] = frames[:, index-1, :, :].view(channels, 1, height, width)
break
frames /= 255
return frames
class VideoResize(object):
""" resize video tensor (C x L x H x W) to (C x L x h x w)
Args:
size (sequence): Desired output size. size is a sequence like (H, W),
output size will matched to this.
interpolation (int, optional): Desired interpolation. Default is 'PIL.Image.BILINEAR'
"""
def __init__(self, size, interpolation=PIL.Image.BILINEAR):
assert isinstance(size, collections.Iterable) and len(size) == 2
self.size = size
self.interpolation = interpolation
def __call__(self, video):
"""
Args:
video (torch.Tensor): Video to be scaled (C x L x H x W)
Returns:
torch.Tensor: Rescaled video (C x L x h x w)
"""
h, w = self.size
C, L, H, W = video.size()
rescaled_video = torch.FloatTensor(C, L, h, w)
# use torchvision implemention to resize video frames
transform = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.Resize(self.size, self.interpolation),
torchvision.transforms.ToTensor(),
])
for l in range(L):
frame = video[:, l, :, :]
frame = transform(frame)
rescaled_video[:, l, :, :] = frame
return rescaled_video
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class VideoRandomCrop(object):
""" Crop the given Video Tensor (C x L x H x W) at a random location.
Args:
size (sequence): Desired output size like (h, w).
"""
def __init__(self, size):
assert len(size) == 2
self.size = size
def __call__(self, video):
"""
Args:
video (torch.Tensor): Video (C x L x H x W) to be cropped.
Returns:
torch.Tensor: Cropped video (C x L x h x w).
"""
H, W = video.size()[2:]
h, w = self.size
assert H >= h and W >= w
top = np.random.randint(0, H - h)
left = np.random.randint(0, W - w)
video = video[:, :, top : top + h, left : left + w]
return video
class VideoCenterCrop(object):
""" Crops the given video tensor (C x L x H x W) at the center.
Args:
size (sequence): Desired output size of the crop like (h, w).
"""
def __init__(self, size):
self.size = size
def __call__(self, video):
"""
Args:
video (torch.Tensor): Video (C x L x H x W) to be cropped.
Returns:
torch.Tensor: Cropped Video (C x L x h x w).
"""
H, W = video.size()[2:]
h, w = self.size
assert H >= h and W >= w
top = int((H - h) / 2)
left = int((W - w) / 2)
video = video[:, :, top : top + h, left : left + w]
return video
class VideoRandomHorizontalFlip(object):
""" Horizontal flip the given video tensor (C x L x H x W) randomly with a given probability.
Args:
p (float): probability of the video being flipped. Default value is 0.5.
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, video):
"""
Args:
video (torch.Tensor): Video to flipped.
Returns:
torch.Tensor: Randomly flipped video.
"""
if random.random() < self.p:
# horizontal flip the video
video = video.flip([3])
return video
class VideoRandomVerticalFlip(object):
""" Vertical flip the given video tensor (C x L x H x W) randomly with a given probability.
Args:
p (float): probability of the video being flipped. Default value is 0.5.
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, video):
"""
Args:
video (torch.Tensor): Video to flipped.
Returns:
torch.Tensor: Randomly flipped video.
"""
if random.random() < self.p:
# horizontal flip the video
video = video.flip([2])
return video
class VideoGrayscale(object):
""" Convert video (C x L x H x W) to grayscale (C' x L x H x W, C' = 1 or 3)
Args:
num_output_channels (int): (1 or 3) number of channels desired for output video
"""
def __init__(self, num_output_channels=1):
assert num_output_channels in (1, 3)
self.num_output_channels = num_output_channels
def __call__(self, video):
"""
Args:
video (torch.Tensor): Video (3 x L x H x W) to be converted to grayscale.
Returns:
torch.Tensor: Grayscaled video (1 x L x H x W or 3 x L x H x W)
"""
C, L, H, W = video.size()
grayscaled_video = torch.FloatTensor(self.num_output_channels, L, H, W)
# use torchvision implemention to convert video frames to gray scale
transform = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.Grayscale(self.num_output_channels),
torchvision.transforms.ToTensor(),
])
for l in range(L):
frame = video[:, l, :, :]
frame = transform(frame)
grayscaled_video[:, l, :, :] = frame
return grayscaled_video
``` |
{
"source": "aakhundov/deep-german",
"score": 3
} |
#### File: aakhundov/deep-german/cnn_deep_german.py
```python
import sys
import tensorflow as tf
from datetime import datetime
from read_data import read_data_sets
from cnn_word_model import CNNWordModel
MAX_WORD_LEN = 31
ALPHABET_SIZE = 31
NUM_GENDERS = 3
EPOCHS = 50
BATCH_SIZE = 128 # -batch 128, 256, or 512
NUM_LAYERS = 3 # -layers 0, 1, 2, or 3
DROPOUT_RATE = 0.5 # -dropout 0.0 or 0.5
LEARNING_RATE = 1e-3 # -learning 1e-2, 1e-3, or 1e-4
WINDOW_SIZE = 7 # -window 3, 5, 7, or 9
CONV_FILTERS = [32, 64]
NUM_HIDDEN = [512, 256, 128]
def echo(*args):
print("[{0}] ".format(datetime.now()), end="")
print(*args)
def log(file, message=""):
file.write(message + "\n")
if message != "":
echo(message)
else:
print()
while len(sys.argv) > 1:
option = sys.argv[1]; del sys.argv[1]
if option == "-batch":
BATCH_SIZE = int(sys.argv[1]); del sys.argv[1]
elif option == "-window":
WINDOW_SIZE = int(sys.argv[1]); del sys.argv[1]
elif option == "-dropout":
DROPOUT_RATE = float(sys.argv[1]); del sys.argv[1]
elif option == "-learning":
LEARNING_RATE = float(sys.argv[1]); del sys.argv[1]
else:
print(sys.argv[0], ": invalid option", option)
sys.exit(1)
model_name = "{0}_{1}_{2}_{3}_{4}_{5}".format(
"CNN", NUM_LAYERS, WINDOW_SIZE,
LEARNING_RATE, DROPOUT_RATE, BATCH_SIZE
)
log_path = "./results/logs/" + model_name + ".txt"
model_path = "./results/models/" + model_name + ".ckpt"
log_file = open(log_path, "w")
print("hidden layers:", NUM_LAYERS)
print("hidden units:", NUM_HIDDEN[:NUM_LAYERS])
print("conv. filters:", CONV_FILTERS)
print("window size:", WINDOW_SIZE)
print("learning rate:", LEARNING_RATE)
print("dropout rate:", DROPOUT_RATE)
print("batch size:", BATCH_SIZE)
print()
# BUILDING GRAPH
echo("Creating placeholders...")
xs = tf.placeholder(tf.float32, [None, MAX_WORD_LEN, ALPHABET_SIZE])
ys = tf.placeholder(tf.float32, [None, NUM_GENDERS])
dropout = tf.placeholder(tf.float32)
echo("Creating model...")
model = CNNWordModel(
xs, ys, dropout,
CONV_FILTERS, WINDOW_SIZE, NUM_LAYERS, NUM_HIDDEN,
tf.train.AdamOptimizer(LEARNING_RATE)
)
# PREPARING DATA
echo("Preparing data...")
# preparing words dataset
dataset = read_data_sets()
print()
echo("Training set:", dataset.train.words.shape[0])
echo("Validation set:", dataset.validation.words.shape[0])
echo("Testing set:", dataset.test.words.shape[0])
print()
# EXECUTING THE GRAPH
best_epoch = 0
best_val_error = 1.0
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
echo("Initializing variables...")
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
echo("Training...")
print()
steps_per_epoch = dataset.train.words.shape[0] // BATCH_SIZE
for epoch in range(1, EPOCHS+1):
for step in range(steps_per_epoch):
batch_xs, batch_ys, seq_len = dataset.train.next_batch(BATCH_SIZE)
sess.run(
model.training,
feed_dict={
xs: batch_xs,
ys: batch_ys,
dropout: DROPOUT_RATE
}
)
val_loss, val_error = 0, 0
val_batches = dataset.validation.words.shape[0] // 1024
for i in range(val_batches):
b_val_loss, b_val_error = sess.run(
[model.loss, model.error],
feed_dict={
xs: dataset.validation.words[1024 * i:1024 * (i + 1)],
ys: dataset.validation.genders[1024 * i:1024 * (i + 1)],
dropout: 0.0
}
)
val_loss += b_val_loss / val_batches
val_error += b_val_error / val_batches
if val_error < best_val_error:
best_epoch = epoch
best_val_error = val_error
saver.save(sess, model_path)
log(log_file, "Epoch {:2d}: error {:3.2f}% loss {:.4f}".format(
epoch, 100 * val_error, val_loss
))
saver.restore(sess, model_path)
test_loss, test_error = 0, 0
test_batches = dataset.test.words.shape[0] // 1024
for i in range(test_batches):
b_test_loss, b_test_error = sess.run(
[model.loss, model.error],
feed_dict={
xs: dataset.test.words[1024 * i:1024 * (i + 1)],
ys: dataset.test.genders[1024 * i:1024 * (i + 1)],
dropout: 0.0
}
)
test_loss += b_test_loss / test_batches
test_error += b_test_error / test_batches
log(log_file)
log(log_file, "Best epoch: {0}".format(best_epoch))
log(log_file)
log(log_file, "Test Set: error {:3.2f}% loss {:.4f}".format(
100 * test_error, test_loss
))
log_file.close()
```
#### File: aakhundov/deep-german/evaluate_auto.py
```python
import sys
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from read_data import nouns_to_one_hot
from rnn_word_model import RNNWordModel
MAX_WORD_LEN = 31
ALPHABET_SIZE = 31
NUM_GENDERS = 3
MIN_GEN_WORD_LEN = 3
MAX_GEN_WORD_LEN = 20
GEN_WORDS_PER_GENDER = 10000
german_chars = "abcdefghijklmnopqrstuvwxyzßäöü"
total_chars = len(german_chars)
noun_endings = {
"masculine": ["ant", "ast", "er", "ich", "eich",
"ig", "eig", "ling", "or", "us", "ismus"],
"feminine": ["anz", "e", "ei", "enz", "heit", "ie", "ik", "keit",
"schaft", "sion", "sis", "tion", "tät", "ung", "ur"],
"neutral": ["chen", "lein", "en", "il", "ing",
"ma", "ment", "nis", "tum", "um", ]
}
def generate_nouns(noun_ending, count=1, min_len=1, max_len=30):
end_len = len(noun_ending)
min_len = max(min_len, end_len+1)
max_len = max(max_len, min_len+1)
result = []
for _ in range(count):
length = np.random.randint(low=min_len-end_len, high=max_len-end_len)
word = "".join([german_chars[np.random.randint(total_chars)] for _ in range(length)])
word += noun_ending
result.append(word)
return result
# parsing hyperparameters from model name
model_name = "BasicLSTMCell_2_0.01_0.5_128"
if len(sys.argv) > 1:
model_name = sys.argv[1]
model_tokens = model_name.split("_")
model_path = "./results/models/" + model_name + ".ckpt"
CELL_TYPE = getattr(rnn, model_tokens[0])
NUM_LAYERS = int(model_tokens[1])
LEARNING_RATE = float(model_tokens[2])
DROPOUT_RATE = float(model_tokens[3])
NUM_HIDDEN = [128, 128, 128]
# creating placeholders and model
xs = tf.placeholder(tf.float32, [None, MAX_WORD_LEN, ALPHABET_SIZE])
ys = tf.placeholder(tf.float32, [None, NUM_GENDERS])
seq = tf.placeholder(tf.int32, [None])
dropout = tf.placeholder(tf.float32)
model = RNNWordModel(
xs, ys, seq, dropout,
CELL_TYPE, NUM_LAYERS, NUM_HIDDEN,
tf.train.AdamOptimizer(LEARNING_RATE)
)
# running a session
saver = tf.train.Saver()
np.random.seed(12345)
with tf.Session() as sess:
saver.restore(sess, model_path)
for gender in noun_endings.keys():
print(gender, "endings")
print("-------------------------------------")
for ending in noun_endings[gender]:
nouns = generate_nouns(
ending, GEN_WORDS_PER_GENDER,
MIN_GEN_WORD_LEN, MAX_GEN_WORD_LEN
)
one_hot, seq_len = nouns_to_one_hot(nouns)
prediction = sess.run(
model.prediction,
feed_dict={
xs: one_hot,
seq: seq_len,
dropout: 0.0
}
)
counts = [0, 0, 0]
for g in np.argmax(prediction, axis=1):
counts[g] += 1
fractions = [c / sum(counts) * 100 for c in counts]
print("-{:<10}{:<10.2f}{:<10.2f}{:<10.2f}".format(
ending, *fractions
))
print()
```
#### File: aakhundov/deep-german/mlp_deep_german.py
```python
import sys
import numpy as np
import tensorflow as tf
from datetime import datetime
from read_data import read_data_sets
from mlp_word_model import MLPWordModel
MAX_WORD_LEN = 31
ALPHABET_SIZE = 31
NUM_GENDERS = 3
EPOCHS = 30
# the hyperparameters below are
# configurable with CL arguments
BATCH_SIZE = 128 # -batch 128, 256, or 512
NUM_LAYERS = 3 # -layers 1, 2, or 3
ACTIVATION = tf.nn.relu # -activation "sigmoid", "tanh", or "relu"
DROPOUT_RATE = 0.0 # -dropout 0.0 or 0.5
LEARNING_RATE = 1e-3 # -learning 1e-2, 1e-3, or 1e-4
NUM_HIDDEN = [512, 256, 128]
def echo(*args):
print("[{0}] ".format(datetime.now()), end="")
print(*args)
def log(file, message=""):
file.write(message + "\n")
if message != "":
echo(message)
else:
print()
while len(sys.argv) > 1:
option = sys.argv[1]; del sys.argv[1]
if option == "-batch":
BATCH_SIZE = int(sys.argv[1]); del sys.argv[1]
elif option == "-layers":
NUM_LAYERS = int(sys.argv[1]); del sys.argv[1]
elif option == "-activation":
ACTIVATION = getattr(tf.nn, sys.argv[1]); del sys.argv[1]
elif option == "-dropout":
DROPOUT_RATE = float(sys.argv[1]); del sys.argv[1]
elif option == "-learning":
LEARNING_RATE = float(sys.argv[1]); del sys.argv[1]
else:
print(sys.argv[0], ": invalid option", option)
sys.exit(1)
model_name = "{0}_{1}_{2}_{3}_{4}_{5}".format(
"MLP", NUM_LAYERS, ACTIVATION.__name__,
LEARNING_RATE, DROPOUT_RATE, BATCH_SIZE
)
log_path = "./results/logs/" + model_name + ".txt"
model_path = "./results/models/" + model_name + ".ckpt"
log_file = open(log_path, "w")
print("hidden layers:", NUM_LAYERS)
print("activation:", ACTIVATION.__name__)
print("hidden units:", NUM_HIDDEN[:NUM_LAYERS])
print("learning rate:", LEARNING_RATE)
print("dropout rate:", DROPOUT_RATE)
print("batch size:", BATCH_SIZE)
print()
# BUILDING GRAPH
echo("Creating placeholders...")
xs = tf.placeholder(tf.float32, [None, MAX_WORD_LEN * ALPHABET_SIZE])
ys = tf.placeholder(tf.float32, [None, NUM_GENDERS])
dropout = tf.placeholder(tf.float32)
echo("Creating model...")
model = MLPWordModel(
xs, ys, dropout, ACTIVATION, NUM_LAYERS, NUM_HIDDEN,
tf.train.AdamOptimizer(LEARNING_RATE)
)
# PREPARING DATA
echo("Preparing data...")
# preparing words dataset
dataset = read_data_sets()
print()
echo("Training set:", dataset.train.words.shape[0])
echo("Validation set:", dataset.validation.words.shape[0])
echo("Testing set:", dataset.test.words.shape[0])
print()
# EXECUTING THE GRAPH
best_epoch = 0
best_val_error = 1.0
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
echo("Initializing variables...")
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
echo("Training...")
print()
steps_per_epoch = dataset.train.words.shape[0] // BATCH_SIZE
for epoch in range(1, EPOCHS+1):
for step in range(steps_per_epoch):
batch_xs, batch_ys, seq_len = dataset.train.next_batch(BATCH_SIZE)
batch_xs = np.reshape(batch_xs, [-1, MAX_WORD_LEN * ALPHABET_SIZE])
sess.run(
model.training,
feed_dict={
xs: batch_xs,
ys: batch_ys,
dropout: DROPOUT_RATE
}
)
val_loss, val_error = sess.run(
[model.loss, model.error],
feed_dict={
xs: np.reshape(dataset.validation.words, [-1, MAX_WORD_LEN * ALPHABET_SIZE]),
ys: dataset.validation.genders,
dropout: 0.0
}
)
if val_error < best_val_error:
best_epoch = epoch
best_val_error = val_error
saver.save(sess, model_path)
log(log_file, "Epoch {:2d}: error {:3.2f}% loss {:.4f}".format(
epoch, 100 * val_error, val_loss
))
saver.restore(sess, model_path)
test_loss, test_error = sess.run(
[model.loss, model.error],
feed_dict={
xs: np.reshape(dataset.test.words, [-1, MAX_WORD_LEN * ALPHABET_SIZE]),
ys: dataset.test.genders,
dropout: 0.0
}
)
log(log_file)
log(log_file, "Best epoch: {0}".format(best_epoch))
log(log_file)
log(log_file, "Test Set: error {:3.2f}% loss {:.4f}".format(
100 * test_error, test_loss
))
log_file.close()
``` |
{
"source": "aakhundov/mnist-challenge",
"score": 3
} |
#### File: mnist-challenge/code/gp.py
```python
import sys
import numpy as np
import scipy.spatial.distance as sp
import data
# computing covariance matrix for GP by means of SE kernel:
# depending on what Xs and Ys are, this can be K, K*, or K**.
# scipy.spatial.distance.pdist and .cdist are used to compute
# the matrix of squared Euclidian distances efficiently.
# "lsquared" is an argument, sigmaF is assumed to be 1.
def compute_se_kernel_matrix(Xs, Ys, lsquared):
if Xs is Ys:
dist = sp.squareform(sp.pdist(Xs, "sqeuclidean"))
else:
dist = sp.cdist(Xs, Ys, "sqeuclidean")
return np.exp(-dist / (2.0 * lsquared))
# actual Gaussian Process: the target values for unobserved data
# X* ("X_s") are inferred based on the observed dataset "X" and the
# corresponding labels "T". "lsquared" is used while computing the
# SE-kernel function. sigmaF and sigmaN are taken as 1 and 0, as
# only mean, and not variance, is of practical importance here.
def gaussian_process(X, T, X_s, lsquared):
# computing the K matrix for the observed data
K = compute_se_kernel_matrix(X, X, lsquared)
print "K matrix computed"
# computing the K* transposed matrix
K_s_T = compute_se_kernel_matrix(X_s, X, lsquared)
print "K*^T matrix computed"
# inverting the K matrix
K_inv = np.linalg.inv(K)
print "K matrix inverted"
# multiplying the K*^T and K^-1 matrices
K_s_T_times_K_inv = np.dot(K_s_T, K_inv)
print "K*^T times K^-1 matrix computed"
inputs, classes = len(X_s), T.max()+1
predictions = np.zeros((inputs, classes))
# for each class k:
for k in range(classes):
# transforming target labels into k-class vs. rest
# representation: 1.0 for the k-class, -1.0 for rest
k_class_vs_rest = np.where(T == k, 1.0, -1.0)
# inferring the corresponding k-class (1.0) vs. the rest (-1.0) values
# for the unobserved data by multiplying pre-computed [K*^T times K^-1]
# matrix by the above "k_class_vs_rest" vector. what is obtained is the
# set of the mean values of the k-class vs. rest regression in the
# unobserved data points X* ("X_s")
result = np.dot(K_s_T_times_K_inv, k_class_vs_rest)
# storing the predicted k-class vs.
# rest means in the data points X*
predictions[:, k] = result
print "{0} binary classifications done".format(classes)
# inferring actual target labels in accordance with
# the highest predicted k-class vs. rest mean
labels = np.argmax(predictions, axis=1)
print "Class labels detected"
print
return labels
# comparing predicted vs. true labels
# and returning the corresponding error score
def get_error_score(T_predicted, T_true):
count = np.count_nonzero(np.array(T_predicted != T_true))
return count * 100.0 / len(T_predicted)
if __name__ == "__main__":
# command-line arguments (and their default values)
ntrain = 60000 # number of training samples used
ntest = 10000 # number of testing samples used
deskew = True # deskew input images or not (by default: yes)
normalize = True # normalize input vectors or not (by default: yes)
lsquared = 33.0 # l^2 used in SE kernel computation
# processing command-line arguments
while len(sys.argv) > 1:
option = sys.argv[1]; del sys.argv[1]
if option == "-ntrain":
ntrain = int(sys.argv[1]); del sys.argv[1]
elif option == "-ntest":
ntest = int(sys.argv[1]); del sys.argv[1]
elif option == "-deskew":
deskew = int(sys.argv[1]); del sys.argv[1]
elif option == "-normalize":
normalize = int(sys.argv[1]); del sys.argv[1]
elif option == "-lsquared":
lsquared = float(sys.argv[1]); del sys.argv[1]
else:
print sys.argv[0], ": invalid option", option
sys.exit(1)
print "Gaussian Processes"
print
print "Reading data..."
# reading the data and applying configured pre-processing steps
X_train, T_train = data.get_training_data(ntrain, normalize=normalize, deskew=deskew)
X_test, T_test = data.get_testing_data(ntest, normalize=normalize, deskew=deskew)
print "{0} training data read".format(len(X_train))
print "{0} testing data read".format(len(X_test))
print
# running a Gaussian process on training and testing sets, with "lsquared"
T_predicted = gaussian_process(X_train, T_train, X_test, lsquared=lsquared)
# evaluating the model performance on the testing set
print "Testing Set Error: {0:.3f}".format(
get_error_score(T_predicted, T_test)
)
print
```
#### File: mnist-challenge/code/logreg.py
```python
import sys
import numpy as np
import data
# softmax function computed for an array of
# input vectors (inputs are rows of matrix "X")
def softmax(X):
exped = np.nan_to_num(np.exp(X))
summed = np.sum(exped, axis=1)
return exped / summed[:, np.newaxis]
# classifying the data "X"/"T" using the weights "W"
# and returning the corresponding error score
def get_error_score(X, W, T):
linear = np.dot(X, W)
classified = np.argmax(linear, axis=1)
incorrect = np.sum(classified != T)
return incorrect * 100.0 / len(X)
# cross-entropy loss function, with L2-regularization
def get_loss(X, W, T, regularization):
sm = softmax(np.dot(X, W))
logs = np.nan_to_num(np.log(sm[np.arange(len(X)), T]))
regs = 0.5 * regularization * np.sum(W * W)
return -1.0 / len(X) * np.sum(logs) + regs
# gradient of the loss function with respect
# to the weights "W", with L2-regularization
def get_gradient(X, W, T, regularization):
delta = softmax(np.dot(X, W))
delta[np.arange(len(X)), T] -= 1
regs = regularization * W
return np.dot(X.T, delta) / len(X) + regs
# fitting logistic regression model to "X"/"T" data
# by means of mini-batch stochastic gradient descent.
# the rest of the argument names seem self-explanatory
def train_logreg(X, T, epochs, batch_size, learning_rate, regularization, dataset_split, verbose=False):
# splitting the data into training and validation sets
# according to the value of "dataset_split" argument
training_set_size = int(dataset_split * len(X))
X_training, T_training = X[:training_set_size].copy(), T[:training_set_size].copy()
X_validation, T_validation = X[training_set_size:].copy(), T[training_set_size:].copy()
inputs, outputs = X.shape[1], T.max()+1
W = np.zeros((inputs, outputs))
weights = []
training_losses, training_errors = [], []
validation_losses, validation_errors = [], []
# storing the initial values of
# weights, errors, and losses
weights.append(W.copy())
training_losses.append(get_loss(X_training, W, T_training, regularization))
training_errors.append(get_error_score(X_training, W, T_training))
validation_losses.append(get_loss(X_validation, W, T_validation, regularization))
validation_errors.append(get_error_score(X_validation, W, T_validation))
if verbose:
print
print "Training with {0}".format((batch_size, learning_rate, regularization))
print "Stage\t\tTr. Loss/Error\tVal. Loss/Error"
print "------------------------------------------------------"
print "Initial\t\t{0:.3f}\t{1:.2f}\t{2:.3f}\t{3:.2f}".format(
training_losses[-1], training_errors[-1],
validation_losses[-1], validation_errors[-1]
)
# for each training epoch:
for epoch in range(0, epochs):
# randomly shuffling the training set
p = np.random.permutation(len(X_training))
X_training, T_training = X_training[p], T_training[p]
# for each mini-batch (of "batch_size") computing
# the gradient and updating the weights "W" (subtracting
# gradient) after scaling the gradient by the "learning_rate"
for b in range(0, X_training.shape[0] / batch_size):
X_batch = X_training[b * batch_size:(b+1) * batch_size]
T_batch = T_training[b * batch_size:(b+1) * batch_size]
W -= learning_rate * get_gradient(X_batch, W, T_batch, regularization)
# storing the weights, errors, and
# losses after each training epoch
weights.append(W.copy())
training_losses.append(get_loss(X_training, W, T_training, regularization))
training_errors.append(get_error_score(X_training, W, T_training))
validation_losses.append(get_loss(X_validation, W, T_validation, regularization))
validation_errors.append(get_error_score(X_validation, W, T_validation))
if verbose:
print "Epoch #{0}\t{1:.3f}\t{2:.2f}\t{3:.3f}\t{4:.2f}".format(
epoch + 1,
training_losses[-1], training_errors[-1],
validation_losses[-1], validation_errors[-1]
)
# selecting the weights resulting from the epoch
# with the lowest error score on the validation set
best_epoch = np.argmin(validation_errors)
best_weights = weights[best_epoch]
if verbose:
print "------------------------------------------------------"
print "Best Epoch: {0}".format(best_epoch)
print "Training Loss: {0:.3f}, Training Error: {1:.2f}".format(
training_losses[best_epoch], training_errors[best_epoch])
print "Validation Loss: {0:.3f}, Validation Error: {1:.2f}".format(
validation_losses[best_epoch], validation_errors[best_epoch])
print
return best_weights, best_epoch, training_losses, training_errors, validation_losses, validation_errors
if __name__ == "__main__":
ntrain = 60000 # number of training samples used
ntest = 10000 # number of testing samples used
deskew = True # deskew input images or not (by default: yes)
normalize = True # normalize input vectors or not (by default: yes)
evaluate = False # evaluate on testing data or not (by default: no)
verbose = False # output details of each training epoch
epochs = 100 # number of training epochs
dataset_split = 0.8 # training / validation set split
batch_sizes = [50, 100, 200] # different mini-batch sizes tried
learning_rates = [0.02, 0.05, 0.1, 0.5, 1.0] # different learning rates tried
regularizations = [0.0, 0.0001, 0.0005] # different regularization parameters tried
# processing command-line arguments
while len(sys.argv) > 1:
option = sys.argv[1]; del sys.argv[1]
if option == "-ntrain":
ntrain = int(sys.argv[1]); del sys.argv[1]
elif option == "-ntest":
ntest = int(sys.argv[1]); del sys.argv[1]
elif option == "-deskew":
deskew = int(sys.argv[1]); del sys.argv[1]
elif option == "-normalize":
normalize = int(sys.argv[1]); del sys.argv[1]
elif option == "-evaluate":
evaluate = int(sys.argv[1]); del sys.argv[1]
elif option == "-verbose":
verbose = int(sys.argv[1]); del sys.argv[1]
elif option == "-epochs":
epochs = int(sys.argv[1]); del sys.argv[1]
elif option == "-dataset_split":
dataset_split = float(sys.argv[1]); del sys.argv[1]
elif option == "-batch_sizes":
batch_sizes = [int(b) for b in sys.argv[1].split(",")]; del sys.argv[1]
elif option == "-learning_rates":
learning_rates = [float(l) for l in sys.argv[1].split(",")]; del sys.argv[1]
elif option == "-regularizations":
regularizations = [float(r) for r in sys.argv[1].split(",")]; del sys.argv[1]
else:
print sys.argv[0], ": invalid option", option
sys.exit(1)
np.seterr(over="ignore", divide="ignore")
print "Logistic Regression"
print
print "Reading data..."
# reading the data, applying configured pre-processing, and adding 1.0 to each vector as a bias input
X_train, T_train = data.get_training_data(ntrain, normalize=normalize, deskew=deskew, add_ones=True)
X_test, T_test = data.get_testing_data(ntest, normalize=normalize, deskew=deskew, add_ones=True)
print "{0} training data read".format(len(X_train))
print "{0} testing data read".format(len(X_test))
print
weights, errors, params = [], [], []
print "{0:25}\tV. Loss\t\tV. Error".format("(Batch, Learn, Reg)")
print "-----------------------------------------------------------"
for batch_size in batch_sizes:
for learning_rate in learning_rates:
for regularization in regularizations:
# fixing the seed of randomization for the sake of
# reproducibility of the randomized training process
np.random.seed(1)
# training a model for each combination of hyperparameters
W_best, E_best, T_loss, T_err, V_loss, V_err = train_logreg(
X_train, T_train,
epochs, batch_size,
learning_rate, regularization,
dataset_split, verbose=verbose
)
# storing the weights and the corresponding validation
# error resulting from the above training, together with
# the values of the hyperparameters tried
weights.append(W_best)
errors.append(V_err[E_best])
params.append((
batch_size,
learning_rate,
regularization
))
print "{0:25}\t{1:.3f}\t\t{2:.3f}".format(
params[-1], V_loss[E_best], V_err[E_best]
)
# selecting the set of hyperparameters,
# which caused the lowest validation error,
# with the respective resulting weights
P_selected = params[np.argmin(errors)]
W_selected = weights[np.argmin(errors)]
print "-----------------------------------------------------------"
print "Best Params: {0}, validation error: {1:.3f}".format(P_selected, np.min(errors))
print
if evaluate:
# evaluating the model performance on the testing set
print "Testing Set Error: {0:.3f}".format(
get_error_score(X_test, W_selected, T_test)
)
print
``` |
{
"source": "aakhundov/sequence-labeling",
"score": 3
} |
#### File: sequence-labeling/convert/convert_kaggle.py
```python
import os
import csv
import argparse
import common
def convert():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source-folder", type=str, default="../data/sources/kaggle")
parser.add_argument("-t", "--target-folder", type=str, default="../data/ready/nerc/kaggle")
parser.add_argument("-i", "--iobes", type=bool, default=True)
args = parser.parse_args()
print("Source folder: {}".format(args.source_folder))
print("Target folder: {}".format(args.target_folder))
print("Convert to IOBES: {}".format(args.iobes))
print()
sentence_pairs = []
file_path = os.path.join(args.source_folder, "ner_dataset.csv")
with open(file_path, encoding="iso-8859-1") as f:
file_lines = [l[:-1] for l in f.readlines()]
print("processing data from {}".format(file_path))
running_pairs = []
for tokens in csv.reader(file_lines[1:]):
if tokens[0].startswith("Sentence:") and len(running_pairs) > 0:
sentence_pairs.append(
common.convert_to_iobes_tags(running_pairs)
if args.iobes else running_pairs
)
running_pairs = []
running_pairs.append(tokens[1::2])
if len(running_pairs) > 0:
sentence_pairs.append(running_pairs)
if not os.path.exists(args.target_folder):
os.makedirs(args.target_folder)
label_count_pairs = common.get_label_count_pairs(sentence_pairs)
common.report_statistics(sentence_pairs, label_count_pairs)
for target, dataset in zip(
["train", "val", "test"],
common.shuffle_and_split(
sentence_pairs, split_points=(0.8, 0.9)
)
):
sentences_written, tokens_written = 0, 0
out_path = os.path.join(args.target_folder, target + ".txt")
with open(out_path, "w+", encoding="utf-8") as out:
for sentence in dataset:
out.write("{}\t{}\n".format(
" ".join([p[0] for p in sentence]),
" ".join([p[1] for p in sentence]),
))
tokens_written += len(sentence)
sentences_written = len(dataset)
print("{:,} sentences ({:,} tokens) written to {}".format(
sentences_written, tokens_written, out_path
))
label_path = os.path.join(args.target_folder, "labels.txt")
with open(label_path, "w+", encoding="utf-8") as out:
for lb in label_count_pairs:
out.write("{}\n".format(lb[0]))
print("{} labels written to {}".format(
len(label_count_pairs), label_path
))
if __name__ == "__main__":
convert()
```
#### File: aakhundov/sequence-labeling/evaluate.py
```python
import os
import argparse
import numpy as np
import tensorflow as tf
from model.input import input_fn
from model.model import model_fn
from util.embeddings import load_embeddings
from util.metrics import compute_metrics, get_class_f1_summary
from util.metrics import get_performance_summary, visualize_predictions
from util.misc import fetch_in_batches, read_params_from_log
def evaluate():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-r", metavar="results-folder", type=str, required=True,
help="the path to the folder with the training results")
parser.add_argument(
"-f", metavar="data-file", type=str, default="val.txt",
help="the file (within the original data folder) "
"with the data to evaluate the model on")
parser.add_argument(
"-b", metavar="batch-size", type=int, default=2000,
help="the batch size used for the evaluation")
parser.add_argument(
"-v", metavar="num-to-show", type=int, default=0,
help="the number of predicted sentence samples to visualize")
args = parser.parse_args()
assert os.path.exists(args.results_folder)
print("Results folder: {}".format(args.results_folder))
print("Data file: {}".format(args.data_file))
print("Batch size: {}".format(args.batch_size))
print("Samples to show: {}".format(args.num_to_show))
print()
params = read_params_from_log(os.path.join(args.results_folder, "log.txt"))
data_folder = params["data folder"]
embeddings_name, embeddings_id = params["embeddings"].split(", ")
byte_lstm_units = int(params["byte lstm units"]) if "byte lstm units" in params else 64
word_lstm_units = int(params["word lstm units"]) if "word lstm units" in params else 128
byte_projection_dim = int(params["byte projection dim"]) if "byte projection dim" in params else 50
byte_lstm_layers = int(params["byte lstm layers"]) if "byte lstm layers" in params else 1
word_lstm_layers = int(params["word lstm layers"]) if "word lstm layers" in params else 1
use_byte_embeddings = int(params["use byte embeddings"]) if "use byte embeddings" in params else 1
use_word_embeddings = int(params["use word embeddings"]) if "use word embeddings" in params else 1
use_crf_layer = int(params["use crf layer"]) if "use crf layer" in params else 1
label_file = os.path.join(data_folder, "labels.txt")
data_file = os.path.join(data_folder, args.data_file)
data_count = sum(1 for _ in open(data_file, encoding="utf-8"))
print("Loading embeddings data...")
emb_words, emb_vectors, uncased_embeddings = load_embeddings(embeddings_name, embeddings_id)
label_names = [line[:-1] for line in open(label_file, encoding="utf-8").readlines()]
print("Setting up input pipeline...")
with tf.device("/cpu:0"):
next_input_values = input_fn(
tf.data.TextLineDataset(data_file),
batch_size=args.batch_size, lower_case_words=uncased_embeddings,
shuffle=False, cache=False, repeat=False
).make_one_shot_iterator().get_next()
print("Building the model...")
emb_words_placeholder = tf.placeholder(tf.string, [len(emb_words)])
emb_vectors_placeholder = tf.placeholder(tf.float32, emb_vectors.shape)
_, loss, _, predictions, labels, sentence_length, sentences, _, _ = model_fn(
input_values=next_input_values, label_vocab=label_names,
embedding_words=emb_words_placeholder, embedding_vectors=emb_vectors_placeholder,
byte_lstm_units=byte_lstm_units, word_lstm_units=word_lstm_units,
byte_lstm_layers=byte_lstm_layers, word_lstm_layers=word_lstm_layers,
byte_projection_dim=byte_projection_dim,
use_byte_embeddings=bool(use_byte_embeddings),
use_word_embeddings=bool(use_word_embeddings),
use_crf_layer=bool(use_crf_layer)
)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
print("Initializing variables...")
sess.run(tf.tables_initializer(), feed_dict={emb_words_placeholder: emb_words})
sess.run(tf.global_variables_initializer(), feed_dict={emb_vectors_placeholder: emb_vectors})
del emb_words, emb_vectors
tf.train.Saver([
v for v in tf.global_variables()
if "known_word_embeddings" not in v.name
]).restore(sess, os.path.join(
args.results_folder, "model", "nlp-model"
))
print("Evaluating...")
print()
e_loss, e_predictions, e_labels, e_sentence_len, e_sentences = fetch_in_batches(
sess, [loss, predictions, labels, sentence_length, sentences], total=data_count,
progress_callback=lambda fetched: print("{} / {} done".format(fetched, data_count))
)
e_metrics = compute_metrics(e_labels, e_predictions, e_sentence_len, label_names)
e_message, e_key_metric = get_performance_summary(e_metrics, len(label_names))
e_class_summary = get_class_f1_summary(e_metrics, label_names)
np.set_printoptions(threshold=np.nan, linewidth=1000)
print()
print("Loss: {:.3f}".format(e_loss))
print("Key metric: {:.2f}".format(e_key_metric))
print()
print("Performance summary:\n")
print(e_message)
print()
print("Confusion matrix:\n")
print(e_metrics["confusion"])
print()
if e_class_summary != "":
print("Per-class summary:\n")
print(e_class_summary)
if args.num_to_show > 0:
print("Predicted sentence samples:\n")
print(visualize_predictions(
e_sentences, e_labels, e_predictions,
e_sentence_len, label_names, args.num_to_show
))
if __name__ == "__main__":
evaluate()
```
#### File: sequence-labeling/model/input.py
```python
import tensorflow as tf
def input_fn(input_lines, batch_size=None, lower_case_words=False,
shuffle=False, cache=True, repeat=True, num_threads=4):
"""Convert 1D string tf.data.Dataset input_lines into an input pipeline."""
def split_string(s, delimiter, skip_empty=True):
"""Split a single string tensor s into multiple string tokens by delimiter."""
return tf.string_split([s], delimiter, skip_empty=skip_empty).values
def decode_word(word, max_len):
"""Convert string tensor word into a list of encoding bytes zero-padded up to max_len."""
w_bytes = tf.concat(([1], tf.decode_raw(word, tf.uint8), [2]), axis=0)
padded = tf.pad(w_bytes, [[0, max_len - tf.shape(w_bytes)[0]]])
return padded
def get_word_lengths(words, padding=2):
"""Compute a length of each word in a 1D string tensor."""
return tf.map_fn(
lambda x: tf.size(tf.string_split([x], "")) + padding,
words, dtype=tf.int32
)
def get_word_bytes(words, max_len):
"""Retrieve UTF-8 bytes of each word in a 1D string tensor."""
return tf.map_fn(
lambda x: decode_word(x, max_len),
words, dtype=tf.uint8
)
def words_to_lower(words):
"""Convert all strings in a 1D tensor to lower-case (same shape tensor is returned)."""
def to_lower(byte_string):
return str(byte_string, encoding="utf-8").lower()
return tf.reshape(tf.map_fn(
lambda x: tf.py_func(
to_lower, [x], tf.string, stateful=False
), words
), [-1])
# splitting input lines into sentence and label parts (by "\t")
# extra "\t" is added to create labels placeholder if line contains no labels
data = input_lines.map(lambda l: split_string(l + "\t", "\t", False), num_threads)
# splitting sentence and label parts into respective tokens (by " ")
data = data.map(lambda sp: (sp[0], split_string(sp[0], " "), split_string(sp[1], " ")), num_threads)
# adding sentence lengths; result: (full sentences, sentence tokens, sentence length, label tokens)
data = data.map(lambda sl, st, lt: (sl, st, tf.shape(st)[0], lt), num_threads)
if shuffle:
if cache:
# if caching is required, it is
# done before shuffling to maintain
# different batches in every epoch
data = data.cache()
# shuffling the entire dataset
data = data.shuffle(1000000000)
# generating padded batch_size-batches of everything so far
# or a single batch of the entire dataset if batch_size=None
data = data.padded_batch(
batch_size if batch_size is not None else 1000000000,
([], [-1], [], [-1]), ("", "", 0, "")
)
# adding a tuple of unique words in a batch and their respective indices
data = data.map(lambda *d: (d, tf.unique(tf.reshape(d[1], [-1]))), num_threads)
# reshaping unique words' index (resulting from tf.unique) to 2D sentence tokens' shape
data = data.map(lambda d, u: (d, (u[0], tf.reshape(u[1], tf.shape(d[1])))), num_threads)
# adding length of each unique word in a batch
data = data.map(lambda d, u: (d, u, get_word_lengths(u[0])), num_threads)
# (temporarily) adding the maximum length among unique words
data = data.map(lambda d, u, uwl: (d, u, uwl, tf.reduce_max(uwl)), num_threads)
# replacing the maximum length by the 2D tf.uint8 tensor of encoding bytes of unique words
data = data.map(lambda d, u, uwl, mwl: (d, (u, uwl, get_word_bytes(u[0], mwl))), num_threads)
if lower_case_words:
# if required, all unique words are converted to lower case (using Python function)
data = data.map(lambda d, w: (d, ((words_to_lower(w[0][0]), w[0][1]), w[1], w[2])))
if not shuffle and cache:
# if shuffling is not required, caching the
# final dataset at once (before repeating)
data = data.cache()
if repeat:
# if repeating is required,
# doing so infinitely
data = data.repeat()
return data.prefetch(1)
```
#### File: aakhundov/sequence-labeling/train.py
```python
import os
import time
import shutil
import argparse
import numpy as np
import tensorflow as tf
from model.input import input_fn
from model.model import model_fn
from util.embeddings import load_embeddings
from util.metrics import compute_metrics, get_class_f1_summary
from util.metrics import get_performance_summary, visualize_predictions
from util.misc import fetch_in_batches
def echo(log, *messages):
print(*messages)
joined = " ".join([str(m) for m in messages])
log.write(joined + "\n")
log.flush()
def create_training_artifacts(data_folder):
if not os.path.exists("results"):
os.mkdir("results")
results_folder = "results/" + data_folder.replace("/", "_") + time.strftime("%Y%m%d_%H%M%S")
results_folder = results_folder.replace("data_ready_", "").replace("data_", "")
model_folder = os.path.join(results_folder, "model/")
source_folder = os.path.join(results_folder, "source/")
os.mkdir(results_folder)
os.mkdir(model_folder)
os.mkdir(source_folder)
for folder in ["./", "model/", "util/"]:
destination = source_folder
if folder != "./":
destination += folder
os.makedirs(destination)
for source_file in [f for f in os.listdir(folder) if f.endswith(".py")]:
shutil.copy(folder + source_file, destination + source_file)
log_file = open(os.path.join(results_folder, "log.txt"), "w+", encoding="utf-8")
model_path = os.path.join(model_folder, "nlp-model")
return model_path, log_file
def train():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-d", metavar="data-folder", type=str, required=True,
help="the path to the folder with prepared "
"train/val/test data and the labels")
parser.add_argument(
"-em", metavar="embeddings-name", type=str, default="glove",
help="the word embeddings to use ('glove', 'polyglot', or 'senna')")
parser.add_argument(
"-emid", metavar="embeddings-id", type=str, default="6B.100d",
help="the version of the word embeddings (e.g. 'en' for 'polyglot')")
parser.add_argument(
"-ep", metavar="epochs", type=int, default=100,
help="the number of epochs to train")
parser.add_argument(
"-b", metavar="batch-size", type=int, default=8,
help="the batch size (number of sentences per batch) for training")
parser.add_argument(
"-eb", metavar="eval-batch-size", type=int, default=2000,
help="the batch size (number of sentences per batch) for validation")
parser.add_argument(
"-lr", metavar="initial-learning-rate", type=float, default=0.001,
help="the initial value of the learning rate (during the first epoch)")
parser.add_argument(
"-lrd", metavar="lr-decay-rate", type=float, default=0.05,
help="the learning rate decay factor: LR_t = "
"LR_init / (1 + LR_decay_factor * (t - 1))")
parser.add_argument(
"-blu", metavar="byte-lstm-units", type=int, default=64,
help="the hidden state size (cell size) for the Byte Bi-LSTM")
parser.add_argument(
"-wlu", metavar="word-lstm-units", type=int, default=128,
help="the hidden state size (cell size) for the Word Bi-LSTM")
parser.add_argument(
"-bpd", metavar="byte-projection-dim", type=int, default=50,
help="the dimensionality of the byte projections")
parser.add_argument(
"-bll", metavar="byte-lstm-layers", type=int, default=1,
help="the number of layers in the Byte Bi-LSTM")
parser.add_argument(
"-wll", metavar="word-lstm-layers", type=int, default=1,
help="the number of layers in the Word Bi-LSTM")
parser.add_argument(
"-be", metavar="use-byte-embeddings", type=int, default=1,
help="use byte embeddings (1) or not (0)")
parser.add_argument(
"-we", metavar="use-word-embeddings", type=int, default=1,
help="use word embeddings (1) or not (0)")
parser.add_argument(
"-crf", metavar="use-crf-layer", type=int, default=1,
help="use CRF layer (1) or not (0)")
args = parser.parse_args()
assert os.path.exists(args.data_folder)
if not args.data_folder.endswith("/"):
args.data_folder += "/"
print("Loading embeddings data...")
emb_words, emb_vectors, uncased_embeddings = load_embeddings(args.embeddings_name, args.embeddings_id)
label_names = [line[:-1] for line in open(args.data_folder + "labels.txt", encoding="utf-8").readlines()]
print("Setting up input pipeline...")
with tf.device("/cpu:0"):
train_data = input_fn(
tf.data.TextLineDataset(args.data_folder + "train.txt"),
batch_size=args.batch_size, lower_case_words=uncased_embeddings,
shuffle=True, cache=True, repeat=True
)
train_eval_data = input_fn(
tf.data.TextLineDataset(args.data_folder + "train.txt"),
batch_size=args.eval_batch_size, lower_case_words=uncased_embeddings,
shuffle=False, cache=True, repeat=True
)
val_data = input_fn(
tf.data.TextLineDataset(args.data_folder + "val.txt"),
batch_size=args.eval_batch_size, lower_case_words=uncased_embeddings,
shuffle=False, cache=True, repeat=True
)
train_data_count = sum(1 for _ in open(args.data_folder + "train.txt", encoding="utf-8"))
val_data_count = sum(1 for _ in open(args.data_folder + "val.txt", encoding="utf-8"))
data_handle = tf.placeholder(tf.string, shape=())
next_input_values = tf.data.Iterator.from_string_handle(
data_handle, train_data.output_types, train_data.output_shapes
).get_next()
print("Building the model...")
emb_words_placeholder = tf.placeholder(tf.string, [len(emb_words)])
emb_vectors_placeholder = tf.placeholder(tf.float32, emb_vectors.shape)
train_op, loss, accuracy, predictions, labels, \
sentence_length, sentences, dropout_rate, completed_epochs = model_fn(
input_values=next_input_values, label_vocab=label_names,
embedding_words=emb_words_placeholder, embedding_vectors=emb_vectors_placeholder,
byte_lstm_units=args.byte_lstm_units, word_lstm_units=args.word_lstm_units,
byte_lstm_layers=args.byte_lstm_layers, word_lstm_layers=args.word_lstm_layers,
byte_projection_dim=args.byte_projection_dim, training=True,
initial_learning_rate=args.initial_learning_rate, lr_decay_rate=args.lr_decay_rate,
use_byte_embeddings=bool(args.use_byte_embeddings),
use_word_embeddings=bool(args.use_word_embeddings),
use_crf_layer=bool(args.use_crf_layer)
)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
print("Initializing variables...")
sess.run(tf.tables_initializer(), feed_dict={emb_words_placeholder: emb_words})
sess.run(tf.global_variables_initializer(), feed_dict={emb_vectors_placeholder: emb_vectors})
del emb_words, emb_vectors
train_handle = sess.run(train_data.make_one_shot_iterator().string_handle())
train_eval_handle = sess.run(train_eval_data.make_one_shot_iterator().string_handle())
val_handle = sess.run(val_data.make_one_shot_iterator().string_handle())
best_metric, best_epoch = -1, 0
saver = tf.train.Saver([
v for v in tf.global_variables()
if "known_word_embeddings" not in v.name
])
print("Creating training artifacts...")
model_path, log = create_training_artifacts(args.data_folder)
print("Training...")
print()
echo(log, "data folder: {}".format(args.data_folder))
echo(log, "embeddings: {}, {}".format(args.embeddings_name, args.embeddings_id))
echo(log, "epochs: {}".format(args.epochs))
echo(log, "batch size: {}".format(args.batch_size))
echo(log, "initial learning rate: {}".format(args.initial_learning_rate))
echo(log, "l.r. decay rate: {}".format(args.lr_decay_rate))
echo(log, "byte lstm units: {}".format(args.byte_lstm_units))
echo(log, "word lstm units: {}".format(args.word_lstm_units))
echo(log, "byte projection dim: {}".format(args.byte_projection_dim))
echo(log, "byte lstm layers: {}".format(args.byte_lstm_layers))
echo(log, "word lstm layers: {}".format(args.word_lstm_layers))
echo(log, "use byte embeddings: {}".format(args.use_byte_embeddings))
echo(log, "use word embeddings: {}".format(args.use_word_embeddings))
echo(log, "use crf layer: {}".format(args.use_crf_layer))
echo(log)
for epoch in range(args.epochs):
for step in range(-(-train_data_count // args.batch_size)):
try:
sess.run(train_op, feed_dict={
data_handle: train_handle,
completed_epochs: epoch,
dropout_rate: 0.5
})
except Exception as ex:
print(ex)
for set_name, set_handle, set_size in [
["train", train_eval_handle, train_data_count],
["val", val_handle, val_data_count]
]:
eval_loss, eval_labels, eval_predictions, eval_sentence_len = fetch_in_batches(
sess, [loss, labels, predictions, sentence_length], set_size,
feed_dict={data_handle: set_handle, dropout_rate: 0.0}
)
eval_metrics = compute_metrics(eval_labels, eval_predictions, eval_sentence_len, label_names)
eval_message, eval_key_metric = get_performance_summary(eval_metrics, len(label_names))
echo(log, "{:<22} {}".format(
"{0}.{1:<8} L {2:.3f}".format(
epoch + 1, set_name, eval_loss
), eval_message
))
echo(log)
if eval_key_metric > best_metric:
best_epoch = epoch + 1
best_metric = eval_key_metric
saver.save(sess, model_path)
saver.restore(sess, model_path)
best_labels, best_predictions, best_sentence_len, best_sentences = fetch_in_batches(
sess, [labels, predictions, sentence_length, sentences], val_data_count,
feed_dict={data_handle: val_handle, dropout_rate: 0.0}
)
best_metrics = compute_metrics(best_labels, best_predictions, best_sentence_len, label_names)
best_message, best_key_metric = get_performance_summary(best_metrics, len(label_names))
best_class_summary = get_class_f1_summary(best_metrics, label_names)
np.set_printoptions(threshold=np.nan, linewidth=1000)
echo(log)
echo(log, "Best epoch: {}".format(best_epoch))
echo(log, "Best metric: {:.2f}".format(best_key_metric))
echo(log)
echo(log, "Confusion matrix:\n")
echo(log, best_metrics["confusion"])
echo(log)
if best_class_summary != "":
echo(log, "Per-class summary:\n")
echo(log, best_class_summary)
echo(log, "Predicted sentence samples:\n")
echo(log, visualize_predictions(
best_sentences, best_labels, best_predictions,
best_sentence_len, label_names, 100
))
if __name__ == "__main__":
train()
``` |
{
"source": "aakhundov/tf-attend-infer-repeat",
"score": 2
} |
#### File: tf-attend-infer-repeat/air/vae.py
```python
import tensorflow as tf
import tensorflow.contrib.layers as layers
def vae(inputs, input_dim, rec_hidden_units, latent_dim,
gen_hidden_units, likelihood_std=0.0, activation=tf.nn.softplus):
input_size = tf.shape(inputs)[0]
next_layer = inputs
for i in range(len(rec_hidden_units)):
with tf.variable_scope("recognition_" + str(i+1)) as scope:
next_layer = layers.fully_connected(
next_layer, rec_hidden_units[i], activation_fn=activation, scope=scope
)
with tf.variable_scope("rec_mean") as scope:
recognition_mean = layers.fully_connected(next_layer, latent_dim, activation_fn=None, scope=scope)
with tf.variable_scope("rec_log_variance") as scope:
recognition_log_variance = layers.fully_connected(next_layer, latent_dim, activation_fn=None, scope=scope)
with tf.variable_scope("rec_sample"):
standard_normal_sample = tf.random_normal([input_size, latent_dim])
recognition_sample = recognition_mean + standard_normal_sample * tf.sqrt(tf.exp(recognition_log_variance))
next_layer = recognition_sample
for i in range(len(gen_hidden_units)):
with tf.variable_scope("generative_" + str(i+1)) as scope:
next_layer = layers.fully_connected(
next_layer, gen_hidden_units[i], activation_fn=activation, scope=scope
)
with tf.variable_scope("gen_mean") as scope:
generative_mean = layers.fully_connected(next_layer, input_dim, activation_fn=None, scope=scope)
with tf.variable_scope("gen_sample"):
standard_normal_sample2 = tf.random_normal([input_size, input_dim])
generative_sample = generative_mean + standard_normal_sample2 * likelihood_std
reconstruction = tf.nn.sigmoid(
generative_sample
)
return reconstruction, recognition_mean, recognition_log_variance, recognition_mean
```
#### File: tf-attend-infer-repeat/demo/demo_window.py
```python
import tkinter as tk
import tkinter.ttk as ttk
from .pixel_canvas import PixelCanvas
class DemoWindow(ttk.Frame):
def __init__(self, master, model_wrapper,
canvas_size=50, window_size=28,
refresh_period=50, test_image=None, **kw):
ttk.Frame.__init__(self, master=master, **kw)
self.master = master
self.model_wrapper = model_wrapper
self.canvas_size = canvas_size
self.window_size = window_size
self.refresh_period = refresh_period
self._create_interface()
if test_image is not None:
self.cnv_orig.set_image(test_image)
self.columnconfigure(0, weight=410, minsize=215)
self.columnconfigure(1, weight=410, minsize=210)
self.columnconfigure(2, weight=140, minsize=65)
self.rowconfigure(0, weight=0, minsize=50)
self.rowconfigure(1, weight=1, minsize=220)
self.rowconfigure(2, weight=0, minsize=0)
self.master.after(50, lambda: master.focus_force())
self.master.after(100, self._reconstruct_image)
def _create_interface(self):
self.frm_controls = ttk.Frame(self, padding=(10, 15, 10, 10))
self.frm_controls.grid(row=0, column=0, columnspan=3, sticky=(tk.N, tk.S, tk.W, tk.E))
self.lbl_draw_mode = ttk.Label(self.frm_controls, text="Drawing Mode:")
self.lbl_line_width = ttk.Label(self.frm_controls, text="Line Width:")
self.lbl_refresh_rate = ttk.Label(self.frm_controls, text="Refresh (ms):")
self.var_draw_mode = tk.IntVar(value=1)
self.rad_draw = ttk.Radiobutton(self.frm_controls, text="Draw", variable=self.var_draw_mode, value=1)
self.rad_erase = ttk.Radiobutton(self.frm_controls, text="Erase", variable=self.var_draw_mode, value=0)
self.btn_clear = ttk.Button(
self.frm_controls, text="Clear Image",
command=lambda: self.cnv_orig.clear_image()
)
self.var_width = tk.StringVar(self.frm_controls)
self.spn_width = tk.Spinbox(
self.frm_controls, values=(1, 2, 3, 4, 5), width=10,
state="readonly", textvariable=self.var_width
)
self.var_rate = tk.StringVar(self.frm_controls)
self.spn_rate = tk.Spinbox(
self.frm_controls, values=(10, 20, 50, 100, 200, 500, 1000), width=10,
state="readonly", textvariable=self.var_rate
)
self.var_bbox = tk.IntVar(value=1)
self.cbx_bbox = ttk.Checkbutton(self.frm_controls, text="Bounding Boxes", variable=self.var_bbox)
self.lbl_draw_mode.grid(row=0, column=0, columnspan=2, sticky=(tk.N, tk.W))
self.lbl_line_width.grid(row=0, column=3, sticky=(tk.N, tk.W))
self.lbl_refresh_rate.grid(row=0, column=4, sticky=(tk.N, tk.W))
self.rad_draw.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.rad_erase.grid(row=1, column=1, sticky=(tk.N, tk.S, tk.W, tk.E), padx=(0, 20))
self.btn_clear.grid(row=1, column=2, sticky=(tk.N, tk.S, tk.W, tk.E), padx=(0, 20))
self.spn_width.grid(row=1, column=3, sticky=(tk.N, tk.S, tk.W, tk.E), padx=(0, 20))
self.spn_rate.grid(row=1, column=4, sticky=(tk.N, tk.S, tk.W, tk.E), padx=(0, 20))
self.cbx_bbox.grid(row=1, column=5, sticky=(tk.N, tk.S, tk.W, tk.E))
self.var_draw_mode.trace("w", lambda *_: self._set_draw_mode(self.var_draw_mode.get() == 1))
self.var_width.trace("w", lambda *_: self.cnv_orig.set_line_width(int(self.var_width.get())))
self.var_rate.trace("w", lambda *_: self._set_refresh_period(int(self.var_rate.get())))
self.var_bbox.trace("w", lambda *_: self._set_bbox_visibility(self.var_bbox.get() == 1))
self.frm_canvas_orig = ttk.Frame(self, padding=(10, 10, 5, 10))
self.frm_canvas_orig.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_canvas_orig.columnconfigure(0, weight=1, minsize=200)
self.frm_canvas_orig.rowconfigure(0, weight=0, minsize=20)
self.frm_canvas_orig.rowconfigure(1, weight=1, minsize=200)
self.lbl_orig = ttk.Label(self.frm_canvas_orig, text="Original Image (draw here):")
self.cnv_orig = PixelCanvas(
self.frm_canvas_orig, self.canvas_size, self.canvas_size, drawable=True,
highlightthickness=0, borderwidth=0, width=400, height=400
)
self.lbl_orig.grid(row=0, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.cnv_orig.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_canvas_rec = ttk.Frame(self, padding=(5, 10, 5, 10))
self.frm_canvas_rec.grid(row=1, column=1, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_canvas_rec.columnconfigure(0, weight=1, minsize=200)
self.frm_canvas_rec.rowconfigure(0, weight=0, minsize=20)
self.frm_canvas_rec.rowconfigure(1, weight=1, minsize=200)
self.lbl_rec = ttk.Label(self.frm_canvas_rec, text="Reconstructed Image:")
self.cnv_rec = PixelCanvas(
self.frm_canvas_rec, self.canvas_size, self.canvas_size, drawable=False,
highlightthickness=0, borderwidth=0, width=400, height=400
)
self.lbl_rec.grid(row=0, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.cnv_rec.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_windows = ttk.Frame(self, padding=(0, 0, 0, 0))
self.frm_windows.grid(row=1, column=2, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_windows.columnconfigure(0, weight=1)
self.frm_canvas_win, self.lbl_win, self.cnv_win = [], [], []
for i in range(3):
self.frm_windows.rowconfigure(i, weight=1)
frm_canvas_win = ttk.Frame(
self.frm_windows,
padding=(5, 10 if i == 0 else 0, 10, 10 if i == 2 else 0)
)
frm_canvas_win.grid(row=i, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
frm_canvas_win.columnconfigure(0, weight=1, minsize=50)
frm_canvas_win.rowconfigure(0, weight=0, minsize=20)
frm_canvas_win.rowconfigure(1, weight=1, minsize=50)
lbl_win = ttk.Label(
frm_canvas_win, text="VAE Rec. #{0}:".format(i+1)
)
cnv_win = PixelCanvas(
frm_canvas_win, self.window_size, self.window_size, drawable=False,
highlightthickness=0, borderwidth=0, width=120, height=120
)
lbl_win.grid(row=0, column=0, sticky=(tk.S, tk.W))
cnv_win.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.frm_canvas_win.append(frm_canvas_win)
self.lbl_win.append(lbl_win)
self.cnv_win.append(cnv_win)
self.lbl_status = ttk.Label(self, borderwidth=1, relief="sunken", padding=(5, 2))
self.lbl_status.grid(row=2, column=0, columnspan=3, sticky=(tk.N, tk.S, tk.W, tk.E))
self.cnv_orig.bind("<Button-2>", lambda *_: self.cnv_orig.clear_image())
self.cnv_orig.bind("<Button-3>", lambda *_: self.cnv_orig.clear_image())
self.var_draw_mode.set(1)
self.var_width.set("3")
self.var_rate.set("50")
self.var_bbox.set(1)
def _reconstruct_image(self):
dig, pos, rec, win, lat, loss = self.model_wrapper.infer(
[self.cnv_orig.get_image()]
)
self.cnv_rec.set_image(rec[0])
self.cnv_rec.set_bbox_positions(pos[0])
self.cnv_orig.set_bbox_positions(pos[0])
for i in range(len(self.cnv_win)):
if i < len(win[0]):
self.cnv_win[i].set_image(win[0][i])
self.cnv_win[i].set_bbox_positions(
[[0.0, -2.0, -2.0]] * i + [[0.99, 0.0, 0.0]]
)
else:
self.cnv_win[i].clear_image()
self.cnv_win[i].set_bbox_positions([])
self.lbl_status.configure(
text="Reconstruction loss (negative log-likelihood): {0:.3f}".format(
abs(loss[0])
)
)
self.master.after(self.refresh_period, self._reconstruct_image)
def _set_refresh_period(self, value):
self.refresh_period = value
def _set_bbox_visibility(self, visible):
self.cnv_orig.set_bbox_visibility(visible)
self.cnv_rec.set_bbox_visibility(visible)
def _set_draw_mode(self, draw):
self.cnv_orig.set_erasing_mode(not draw)
self.cnv_orig.config(cursor=("cross" if draw else "icon"))
```
#### File: tf-attend-infer-repeat/demo/model_wrapper.py
```python
import numpy as np
class ModelWrapper:
def __init__(self, model, session, data_placeholder, canvas_size=50, window_size=28):
self.model = model
self.session = session
self.data_placeholder = data_placeholder
self.canvas_size = canvas_size
self.window_size = window_size
def infer(self, images):
all_digits, all_positions = [], []
all_windows, all_latents = [], []
all_reconstructions, all_loss = [], []
rec_digits, rec_scales, rec_shifts, reconstructions, \
rec_windows, rec_latents, rec_loss = self.session.run(
[
self.model.rec_num_digits, self.model.rec_scales,
self.model.rec_shifts, self.model.reconstruction,
self.model.rec_windows, self.model.rec_latents,
self.model.reconstruction_loss
],
feed_dict={
self.data_placeholder: [np.ravel(img) for img in images]
}
)
for i in range(len(rec_digits)):
digits = int(rec_digits[i])
reconstruction = np.reshape(
reconstructions[i], (self.canvas_size, self.canvas_size)
)
positions = []
windows, latents = [], []
for j in range(digits):
positions.append(np.array([rec_scales[i][j][0]] + list(rec_shifts[i][j])))
windows.append(np.reshape(rec_windows[i][j], (self.window_size, self.window_size)))
latents.append(rec_latents[i][j])
all_digits.append(digits)
all_positions.append(np.array(positions))
all_reconstructions.append(reconstruction)
all_windows.append(np.array(windows))
all_latents.append(np.array(latents))
all_loss.append(rec_loss[i])
return all_digits, all_positions, all_reconstructions, all_windows, all_latents, all_loss
``` |
{
"source": "aakinlalu/textlabelling",
"score": 3
} |
#### File: src/textlabelling/csvmodel.py
```python
class CSVModel:
def __init__(self, output_dir:str):
self.output_dir=output_dir
def load_model(self):
print("Loading from", self.output_dir)
nlp = spacy.load(self.output_dir)
return nlp
def write_result_csv(self, DATA:list, filename:str)->str:
nlp = self.load_model()
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['text','intents','classes'])
for text in DATA:
intents = []
labels = []
doc = nlp(text)
for ent in doc.ents:
intents.append(ent.text)
labels.append(ent.label_)
writer.writerow([text,intents,labels])
print(f'{filename} has been created')
#print([(text, ent.text, ent.label_)for ent in doc.ents])
#print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
def write_df_csv(self, df:pd.DataFrame, filename:str)->str:
nlp = self.load_model()
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['created_date','nps_vervatism','nps_score','intents','classes'])
for index, row in df.iterrows():
intents = []
labels = []
if type(row[1])==str:
if len(row[1])>1:
doc = nlp(row[1])
for ent in doc.ents:
intents.append(ent.text)
labels.append(ent.label_)
writer.writerow([row[0],row[1],row[2],intents,labels])
def print_result_console(self, df:pd.DataFrame)->str:
nlp = self.load_model()
for index, row in df.iterrows():
intents = []
labels = []
if type(row[2])==str:
if len(row[2])>1:
doc = nlp(row[2])
for ent in doc.ents:
intents.append(ent.text)
labels.append(ent.label_)
print(f'NPS_Score: {row[2]}')
print(f'NPS_verbatism:{row[1]}')
print('intent:', intents)
print('entities:', labels)
print('')
```
#### File: src/textlabelling/trainner.py
```python
import dataclasses
from pathlib import Path
import random
import spacy
from spacy.lang.en import English
from spacy.util import minibatch, compounding
from spacy.gold import GoldParse
from spacy.scorer import Scorer
def save_model(nlp, model_dir=None):
"""
:param model_dir:
:param model:
"""
try:
if model_dir is not None:
model_dir = Path(model_dir)
if not model_dir.exists():
model_dir.mkdir()
nlp.to_disk(model_dir)
else:
model_dir = 'Ner_model'
nlp.to_disk(model_dir)
print("Saved model to", model_dir)
except ValueError as e:
return e
def evaluate(nlp: spacy.lang.en.English, TEST_DATA: list):
'''
Evaluate model on test data
:param nlp:
:param TEST_DATA:
:return:
'''
try:
if type(nlp) == spacy.lang.en.English and type(TEST_DATA) == list:
scorer = Scorer()
for input_, annot in TEST_DATA:
doc_gold_text = nlp.make_doc(input_)
gold = GoldParse(doc_gold_text, entities=annot)
pred_value = nlp(input_)
scorer.score(pred_value, gold)
return scorer.scores
else:
raise ValueError('The argments is be spacy model and list')
except ValueError as e:
return e
class TrainNer:
def __init__(self,TRAIN_DATA:list,
model: spacy.lang.en.English = None,
min_batch_size: float = 4.0,
max_batch_size: float = 32.0,
n_iter: int = 100):
self.TRAIN_DATA=TRAIN_DATA
self.model=model
self.min_batch_size=min_batch_size
self.max_batch_size=max_batch_size
self.n_iter=n_iter
def train_model(self, drop=0.5) -> spacy.lang.en.English:
"""
Load the model, set up the pipeline and train the entity recognizer.
:param drop:
:return:
"""
if self.model is not None:
nlp = spacy.load(self.model) # load existing spaCy model
print("Loaded model '%s'" % self.model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe("ner")
# add labels
for _, annotations in self.TRAIN_DATA:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
with nlp.disable_pipes(*other_pipes): # only train NER
# reset and initialize the weights randomly – but only if we're
# training a new model
if self.model is None:
optimizer = nlp.begin_training()
else:
optimizer = nlp.resume_training()
for itn in range(self.n_iter):
random.shuffle(self.TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batch_size = compounding(self.min_batch_size, self.max_batch_size, 1.001)
batches = minibatch(self.TRAIN_DATA, size=batch_size)
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
sgd=optimizer,
drop=drop, # dropout - make it harder to memorise data
losses=losses,
)
print("Losses", losses)
return nlp, losses
def test_model(self, TEST_DATA:list):
"""
:param TEST_DATA:
"""
model = self.train_model()
# test the trained model
for text, _ in TEST_DATA:
doc = model(text)
print("Classes", [(ent.text, ent.label_) for ent in doc.ents])
print("")
``` |
{
"source": "aakloul/connectors",
"score": 2
} |
#### File: export-report-pdf/src/export-report-pdf.py
```python
import yaml
import os
import time
import datetime
from pycti.utils.constants import StixCyberObservableTypes
from weasyprint import HTML
from pycti import OpenCTIConnectorHelper, get_config_variable
from jinja2 import Environment, FileSystemLoader
class ExportReportPdf:
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
# ExportReportPdf specific config settings
self.primary_color = get_config_variable(
"EXPORT_REPORT_PDF_PRIMARY_COLOR",
["export_report_pdf", "primary_color"],
config,
)
self.secondary_color = get_config_variable(
"EXPORT_REPORT_PDF_SECONDARY_COLOR",
["export_report_pdf", "secondary_color"],
config,
)
self.current_dir = os.path.abspath(os.path.dirname(__file__))
self.set_colors()
self.company_address_line_1 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_1",
["export_report_pdf", "company_address_line_1"],
config,
)
self.company_address_line_2 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_2",
["export_report_pdf", "company_address_line_2"],
config,
)
self.company_address_line_3 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_3",
["export_report_pdf", "company_address_line_3"],
config,
)
self.company_phone_number = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_PHONE_NUMBER",
["export_report_pdf", "company_phone_number"],
config,
)
self.company_email = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_EMAIL",
["export_report_pdf", "company_email"],
config,
)
self.company_website = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_WEBSITE",
["export_report_pdf", "company_website"],
config,
)
self.indicators_only = get_config_variable(
"EXPORT_REPORT_PDF_INDICATORS_ONLY",
["export_report_pdf", "indicators_only"],
config,
)
self.defang_urls = get_config_variable(
"EXPORT_REPORT_PDF_DEFANG_URLS",
["export_report_pdf", "defang_urls"],
config,
)
def _process_message(self, data):
file_name = data["file_name"]
# TODO this can be implemented to filter every entity and observable
# max_marking = data["max_marking"]
entity_type = data["entity_type"]
if entity_type != "Report":
raise ValueError(
f'This Connector can only process entities of type "Report" and not of type "{entity_type}".'
)
# Get the Report
report_dict = self.helper.api.report.read(id=data["entity_id"])
# Extract values for inclusion in output pdf
report_marking = report_dict.get("objectMarking", None)
if report_marking:
report_marking = report_marking[-1]["definition"]
report_name = report_dict["name"]
report_description = report_dict.get("description", "No description available.")
report_confidence = report_dict["confidence"]
report_id = report_dict["id"]
report_external_refs = [
external_ref_dict["url"]
for external_ref_dict in report_dict["externalReferences"]
]
report_objs = report_dict["objects"]
report_date = datetime.datetime.now().strftime("%b %d %Y")
context = {
"report_name": report_name,
"report_description": report_description,
"report_marking": report_marking,
"report_confidence": report_confidence,
"report_external_refs": report_external_refs,
"report_date": report_date,
"company_address_line_1": self.company_address_line_1,
"company_address_line_2": self.company_address_line_2,
"company_address_line_3": self.company_address_line_3,
"company_phone_number": self.company_phone_number,
"company_email": self.company_email,
"company_website": self.company_website,
"entities": {},
"observables": {},
}
# Process each STIX Object
for report_obj in report_objs:
obj_entity_type = report_obj["entity_type"]
obj_id = report_obj["standard_id"]
# Handle StixCyberObservables entities
if obj_entity_type == "StixFile" or StixCyberObservableTypes.has_value(
obj_entity_type
):
observable_dict = self.helper.api.stix_cyber_observable.read(id=obj_id)
# If only include indicators and
# the observable doesn't have an indicator, skip it
if self.indicators_only and not observable_dict["indicators"]:
self.helper.log_info(
f"Skipping {obj_entity_type} observable with value {observable_dict['observable_value']} as it was not an Indicator."
)
continue
if obj_entity_type not in context["observables"]:
context["observables"][obj_entity_type] = []
# Defang urls
if self.defang_urls and obj_entity_type == "Url":
observable_dict["observable_value"] = observable_dict[
"observable_value"
].replace("http", "hxxp", 1)
context["observables"][obj_entity_type].append(observable_dict)
# Handle all other entities
else:
reader_func = self.get_reader(obj_entity_type)
if reader_func is None:
self.helper.log_error(
f'Could not find a function to read entity with type "{obj_entity_type}"'
)
continue
entity_dict = reader_func(id=obj_id)
if obj_entity_type not in context["entities"]:
context["entities"][obj_entity_type] = []
context["entities"][obj_entity_type].append(entity_dict)
# Render html with input variables
env = Environment(loader=FileSystemLoader(self.current_dir))
template = env.get_template("resources/report.html")
html_string = template.render(context)
# Generate pdf from html string
pdf_contents = HTML(string=html_string, base_url="resources").write_pdf()
# Upload the output pdf
self.helper.log_info(f"Uploading: {file_name}")
self.helper.api.stix_domain_object.add_file(
id=report_id,
file_name=file_name,
data=pdf_contents,
mime_type="application/pdf",
)
return "Export done"
def set_colors(self):
with open(
os.path.join(self.current_dir, "resources/report.css.template"), "r"
) as f:
new_css = f.read()
new_css = new_css.replace("<primary_color>", self.primary_color)
new_css = new_css.replace("<secondary_color>", self.secondary_color)
with open(os.path.join(self.current_dir, "resources/report.css"), "w") as f:
f.write(new_css)
def get_reader(self, entity_type):
"""
Returns the function to use for calling the OpenCTI to
read data for a particular entity type.
entity_type: a str representing the entity type, i.e. Indicator
returns: a function or None if entity type is not supported
"""
reader = {
"Stix-Domain-Object": self.helper.api.stix_domain_object.read,
"Attack-Pattern": self.helper.api.attack_pattern.read,
"Campaign": self.helper.api.campaign.read,
"Note": self.helper.api.note.read,
"Observed-Data": self.helper.api.observed_data.read,
"Organization": self.helper.api.identity.read,
"Opinion": self.helper.api.opinion.read,
"Report": self.helper.api.report.read,
"Sector": self.helper.api.identity.read,
"System": self.helper.api.identity.read,
"Course-Of-Action": self.helper.api.course_of_action.read,
"Identity": self.helper.api.identity.read,
"Indicator": self.helper.api.indicator.read,
"Individual": self.helper.api.identity.read,
"Infrastructure": self.helper.api.infrastructure.read,
"Intrusion-Set": self.helper.api.intrusion_set.read,
"Malware": self.helper.api.malware.read,
"Threat-Actor": self.helper.api.threat_actor.read,
"Tool": self.helper.api.tool.read,
"Vulnerability": self.helper.api.vulnerability.read,
"Incident": self.helper.api.incident.read,
"City": self.helper.api.location.read,
"Country": self.helper.api.location.read,
"Region": self.helper.api.location.read,
"Position": self.helper.api.location.read,
"Location": self.helper.api.location.read,
}
return reader.get(entity_type, None)
# Start the main loop
def start(self):
self.helper.listen(self._process_message)
if __name__ == "__main__":
try:
connector_export_report_pdf = ExportReportPdf()
connector_export_report_pdf.start()
except Exception as e:
print(e)
time.sleep(10)
exit(0)
``` |
{
"source": "aakmsk/EasyMazeEnv",
"score": 3
} |
#### File: aakmsk/EasyMazeEnv/EasyMaze.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import gym
from gym import spaces
# gym.Envを継承したEasyMazeクラス
class EasyMaze(gym.Env):
# この環境ではrenderのモードとしてrgb_arrayのみを用意していることを宣言しておく
# GymのWrapperなどから参照される可能性がある
metadata = {'render.modes': ['rgb_array']}
m = 0.2 # 迷路の周りの外枠の幅
c = 1 # 各セルの幅
agent_color = "blue" # エージェントの色
maze_color = "green" # 迷路の色
# 迷路の枠の描画関連情報
maze_info_rec = {"xy":[(0, 0), (0, m+4*c), (m+4*c, 0), (0, 0),
(m, m+c), (m+c, m+3*c), (m+3*c, m+c)],
"width":[m, 2*m+4*c, m, 2*m+4*c,
2*c, c, c],
"height":[2*m+4*c, m, 2*m+4*c, m,
c, c, c]}
# 迷路内の点線の表示関連情報
maze_info_line = {"s_xy":[(m, m+c), (m, m+2*c), (m, m+3*c),
(m+c, m), (m+2*c, m), (m+3*c, m)],
"e_xy":[(m+4*c, m+c), (m+4*c, m+2*c), (m+4*c, m+3*c),
(m+c, m+4*c), (m+2*c, m+4*c), (m+3*c, m+4*c)]}
# 状態テキストの表示位置情報
maze_state_pos = {"xy":[(m+0.5*c, m+3.5*c), (m+0.5*c, m+2.5*c), (m+1.5*c, m+2.5*c),
(m+2.5*c, m+2.5*c), (m+2.5*c, m+3.5*c), (m+3.5*c, m+3.5*c),
(m+3.5*c, m+2.5*c), (m+2.5*c, m+1.5*c), (m+2.5*c, m+0.5*c),
(m+3.5*c, m+0.5*c), (m+1.5*c, m+0.5*c), (m+0.5*c, m+0.5*c),],
"text":["s0", "s1", "s2", "s3", "s4", "s5", "s6",
"s7", "s8", "s9", "s10", "s11"]}
# 状態と行動に対する遷移先状態(ダイナミクス)
# 一般的にMDPにおけるダイナミクスは確率P(s'|s,a)で表されるが、ここでは決定論的なダイナミクスを採用
# 左から順番に行動入力が"left","top","right","down"の場合の各状態の遷移先を示す
# 例)状態"s0"のとき、
# "left"を受け取っても移動ができないので遷移先は現在と同じ"s0"
# "top"を受け取っても移動ができないので遷移先は現在と同じ"s0"
# "right"を受け取っても移動ができないので遷移先は現在と同じ"s0"
# "down"を受け取ったら下へ移動できるので遷移先は"s1"
# その他全ての状態も同様
dynamics = {"s0":["s0", "s0", "s0", "s1"],
"s1":["s1", "s0", "s2", "s1"],
"s2":["s1", "s2", "s3", "s2"],
"s3":["s2", "s4", "s6", "s7"],
"s4":["s4", "s4", "s5", "s3"],
"s5":["s4", "s5", "s5", "s6"],
"s6":["s3", "s5", "s6", "s6"],
"s7":["s7", "s3", "s7", "s8"],
"s8":["s10", "s7", "s9", "s8"],
"s9":["s8", "s9", "s9", "s9"],
"s10":["s11", "s10", "s8", "s10"],
"s11":["s11", "s11", "s10", "s11"]}
def __init__(self):
super(EasyMaze, self).__init__()
self.fig = None
self.ax = None
self.state = None
# 行動空間として0から3までの4種類の離散値を対象とする
# ちなみに、0は"left"、1は"top"、2は”right”、3は"down"に対応させた
self.action_space = gym.spaces.Discrete(4)
# 状態はエージェントが存在するセルの位置(12種類)
self.observation_space = gym.spaces.Discrete(12)
# 即時報酬の値は0から1の間とした
self.reward_range = (0, 1)
def reset(self):
# 迷路のスタート位置は"s0"とする
self.state = "s0"
# 初期状態の番号を観測として返す
return int(self.state[1:])
def step(self, action):
# 現在の状態と行動から次の状態に遷移
self.state = self.dynamics[self.state][action]
# ゴール状態"s11"に遷移していたら終了したことをdoneに格納&報酬1を格納
# その他の状態ならdone=False, reward=0とする
if self.state == "s11":
done = True
reward = 1
else:
done = False
reward = 0
# 今回の例ではinfoは使用しない
info = {}
return int(self.state[1:]), reward, done, info
# 描画関連の処理を実施
def render(self, mode='rgb_array'):
# matplotlibを用いて迷路を作成
self.make_maze()
# 現在位置にエージェントを配置
self.plot_agent(self.state)
# matplotlibで作成した図を配列にRGB配列に変換
rgb_array = self.fig2array()[:, :, :3]
# RGB配列をリターン
return rgb_array
# 迷路を描画する関数
def make_maze(self):
self.fig = plt.figure(figsize=(7, 7), dpi=200)
self.ax = plt.axes()
self.ax.axis("off")
# 迷路の外枠を表示
for i in range(len(self.maze_info_rec["xy"])):
r = patches.Rectangle(xy=self.maze_info_rec["xy"][i],
width=self.maze_info_rec["width"][i],
height=self.maze_info_rec["height"][i],
color=self.maze_color,
fill=True)
self.ax.add_patch(r)
# 点線による枠の表示
for i in range(len(self.maze_info_line["s_xy"])):
self.ax.plot([self.maze_info_line["s_xy"][i][0], self.maze_info_line["e_xy"][i][0]],
[self.maze_info_line["s_xy"][i][1], self.maze_info_line["e_xy"][i][1]],
linewidth=1,
linestyle="--",
color=self.maze_color)
# 状態のテキストを表示(スタート状態とゴール状態は後で描画)
for i in range(1, len(self.maze_state_pos["xy"])-1):
self.ax.text(self.maze_state_pos["xy"][i][0],
self.maze_state_pos["xy"][i][1],
self.maze_state_pos["text"][i],
size=14,
ha="center")
# スタート状態のテキストを描画
self.ax.text(self.maze_state_pos["xy"][0][0],
self.maze_state_pos["xy"][0][1],
"s0\n start",
size=14,
ha="center")
# ゴール状態のテキストを描画
self.ax.text(self.maze_state_pos["xy"][11][0],
self.maze_state_pos["xy"][11][1],
"s11\n goal",
size=14,
ha="center")
# エージェントを描画
def plot_agent(self, state_name):
state_index = self.maze_state_pos["text"].index(state_name)
agent_pos = self.maze_state_pos["xy"][state_index]
line, = self.ax.plot([agent_pos[0]],
[agent_pos[1]],
marker="o",
color=self.agent_color,
markersize=50)
# matplotlibの画像データをnumpyに変換
def fig2array(self):
self.fig.canvas.draw()
w, h = self.fig.canvas.get_width_height()
buf = np.fromstring(self.fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
buf = np.roll(buf, 3, axis=2)
return buf
``` |
{
"source": "aaknitt/gdl90",
"score": 3
} |
#### File: gdl90/gdl90/decoder.py
```python
import sys
import datetime
from collections import deque
import messages
from gdl90.fcs import crcCheck
from messagesuat import messageUatToObject
class Decoder(object):
"""GDL-90 data link interface decoder class"""
def __init__(self):
self.format = 'normal'
self.uatOutput = False
self.inputBuffer = bytearray()
self.messages = deque()
self.parserSynchronized = False
self.stats = {
'msgCount' : 0,
'resync' : 0,
'msgs' : { 0 : [0, 0] },
}
self.reportFrequency = 10
# altitude reporting in plotflight mode
self.altitude = 0
self.altitudeAge = 9999
self.altitudeMaxAge = 5
# setup internal time tracking
self.gpsTimeReceived = False
self.dayStart = None
self.currtime = datetime.datetime.utcnow()
self.heartbeatInterval = datetime.timedelta(seconds=1)
def addBytes(self, data):
"""add raw input bytes for decode processing"""
self.inputBuffer.extend(data)
self._parseMessages()
def _log(self, msg):
sys.stderr.write('decoder.Decoder:' + msg + '\n')
def _parseMessages(self):
"""parse input buffer for all complete messages"""
if not self.parserSynchronized:
if not self._resynchronizeParser():
# false if we empty the input buffer
return
while True:
# Check that buffer has enough bytes to use
if len(self.inputBuffer) < 2:
#self._log("buffer reached low watermark")
return
# We expect 0x7e at the head of the buffer
if self.inputBuffer[0] != 0x7e:
# failed assertion; we are not synchronized anymore
#self._log("synchronization lost")
if not self._resynchronizeParser():
# false if we empty the input buffer
return
# Look to see if we have an ending 0x7e marker yet
try:
i = self.inputBuffer.index(chr(0x7e), 1)
except ValueError:
# no end marker found yet
#self._log("no end marker found; leaving parser for now")
return
# Extract byte message without markers and delete bytes from buffer
msg = self.inputBuffer[1:i]
del(self.inputBuffer[0:i+1])
# Decode the received message
self._decodeMessage(msg)
return
def _resynchronizeParser(self):
"""throw away bytes in buffer until empty or resynchronized
Return: true=resynchronized, false=buffer empty & not synced"""
self.parserSynchronized = False
self.stats['resync'] += 1
while True:
if len(self.inputBuffer) < 2:
#self._log("buffer reached low watermark during sync")
return False
# found end of a message and beginning of next
if self.inputBuffer[0] == 0x7e and self.inputBuffer[1] == 0x7e:
# remove end marker from previous message
del(self.inputBuffer[0:1])
self.parserSynchronized = True
#self._log("parser is synchronized (end:start)")
return True
if self.inputBuffer[0] == 0x7e:
self.parserSynchronized = True
#self._log("parser is synchronized (start)")
return True
# remove everything up to first 0x7e or end of buffer
try:
i = self.inputBuffer.index(chr(0x7e))
#self._log("removing leading bytes before marker")
except ValueError:
# did not find 0x7e, so blank the whole buffer
i = len(self.inputBuffer)
#self._log("removing all bytes in buffer since no markers")
#self._log('inputBuffer[0:%d]=' % (len(self.inputBuffer)) +str(self.inputBuffer)[:+32])
del(self.inputBuffer[0:i])
raise Exception("_resynchronizeParser: unexpected reached end")
def _decodeMessage(self, escapedMessage):
"""decode one GDL90 message without the start/end markers"""
rawMsg = self._unescape(escapedMessage)
if len(rawMsg) < 5:
return False
msg = rawMsg[:-2]
crc = rawMsg[-2:]
crcValid = crcCheck(msg, crc)
"""
self.stats['msgCount'] += 1
if (self.stats['msgCount'] % self.reportFrequency) == 0:
print "Statistics: total msgs = %d, resyncs = %d" % (self.stats['msgCount'], self.stats['resync'])
msgTypes = self.stats['msgs'].keys()
msgTypes.sort()
for mt in msgTypes:
(g, b) = self.stats['msgs'][mt]
print " Messge #%d: %d good, %d bad" % (mt, g, b)
"""
# Create a new entry for this message type if it doesn't exist
if not msg[0] in self.stats['msgs'].keys():
self.stats['msgs'][msg[0]] = [0,0]
if not crcValid:
self.stats['msgs'][msg[0]][1] += 1
#print "****BAD CRC****"
return False
self.stats['msgs'][msg[0]][0] += 1
"""
#if msg[0] in [0, 10, 11]:
if msg[0] in [101]:
print "msg%d: " % (msg[0])
for m in [msg]:
hexstr = ""
for n in range(len(msg)):
if (n % 4) == 0: hexstr += " "
hexstr += "%02x" % (msg[n])
print " " + hexstr
"""
m = messages.messageToObject(msg)
if not m:
return False
if m.MsgType == 'Heartbeat':
self.currtime += self.heartbeatInterval
if self.format == 'normal':
print 'MSG00: s1=%02x, s2=%02x, ts=%02x' % (m.StatusByte1, m.StatusByte2, m.TimeStamp)
elif self.format == 'plotflight':
self.altitudeAge += 1
elif m.MsgType == 'OwnershipReport':
if m.Latitude == 0.00 and m.Longitude == 0.00:
if m.NavIntegrityCat == 0 or m.NavIntegrityCat == 1: # unknown or <20nm, consider it invalid
pass
elif self.format == 'normal':
print 'MSG10: %0.7f %0.7f %d %d %d' % (m.Latitude, m.Longitude, m.HVelocity, m.Altitude, m.TrackHeading)
elif self.format == 'plotflight':
if self.altitudeAge < self.altitudeMaxAge:
altitude = self.altitude
else:
# revert to 25' resolution altitude from ownership report
altitude = m.Altitude
# Must have the GPS time from a message 101 before outputting anything
if not self.gpsTimeReceived:
return True
print '%02d:%02d:%02d %0.7f %0.7f %d %d %d' % (self.currtime.hour, self.currtime.minute, self.currtime.second, m.Latitude, m.Longitude, m.HVelocity, altitude, m.TrackHeading)
elif m.MsgType == 'OwnershipGeometricAltitude':
if self.format == 'normal':
print 'MSG11: %d %04xh' % (m.Altitude, m.VerticalMetrics)
elif self.format == 'plotflight':
self.altitude = m.Altitude
self.altitudeAge = 0
elif m.MsgType == 'TrafficReport':
if m.Latitude == 0.00 and m.Longitude == 0.00 and m.NavIntegrityCat == 0: # no valid position
pass
elif self.format == 'normal':
print 'MSG20: %0.7f %0.7f %d %d %d %d %s' % (m.Latitude, m.Longitude, m.HVelocity, m.VVelocity, m.Altitude, m.TrackHeading, m.CallSign)
elif m.MsgType == 'GpsTime':
if not self.gpsTimeReceived:
self.gpsTimeReceived = True
utcTime = datetime.time(m.Hour, m.Minute, 0)
self.currtime = datetime.datetime.combine(self.dayStart, utcTime)
else:
# correct time slips and move clock forward if necessary
if self.currtime.hour < m.Hour or self.currtime.minute < m.Minute:
utcTime = datetime.time(m.Hour, m.Minute, 0)
self.currtime = datetime.datetime.combine(self.currtime, utcTime)
if self.format == 'normal':
print 'MSG101: %02d:%02d UTC (waas = %s)' % (m.Hour, m.Minute, m.Waas)
elif m.MsgType == 'UplinkData' and self.uatOutput == True:
messageUatToObject(m)
return True
def _unescape(self, msg):
"""unescape 0x7e and 0x7d characters in coded message"""
msgNew = bytearray()
escapeValue = 0x7d
foundEscapeChar = False
while True:
try:
i = msg.index(chr(escapeValue))
foundEscapeChar = True
msgNew.extend(msg[0:i]); # everything up to the escape character
# this will throw an exception if nothing follows the escape
escapedValue = msg[i+1] ^ 0x20
msgNew.append(chr(escapedValue)); # escaped value
del(msg[0:i+2]); # remove prefix bytes, escape, and escaped value
except (ValueError, IndexError):
# no more escape characters
if foundEscapeChar:
msgNew.extend(msg)
return msgNew
else:
return msg
raise Exception("_unescape: unexpected reached end")
def _messageHex(self, msg, prefix="", suffix="", maxbytes=32, breakint=4):
"""prints the hex contents of a message"""
s = ""
numbytes=len(msg)
if numbytes > maxbytes: numbytes=maxbytes
for i in range(numbytes):
s += "%02x" % (msg[i])
if ((i+1) % breakint) == 0:
s += " "
return "%s%s%s" % (prefix, s.strip(), suffix)
``` |
{
"source": "aaknitt/python-uds",
"score": 2
} |
#### File: TransportProtocols/Can/CanConnectionFactory.py
```python
import can
from can.interfaces import pcan, vector, kvaser
from uds.uds_configuration.Config import Config
from os import path
from platform import system
#from uds import CanConnection
from uds.uds_communications.TransportProtocols.Can.CanConnection import CanConnection
# used to conditionally import socketcan for linux to avoid error messages
if system() == "Linux":
from can.interfaces import socketcan
else:
from can.interfaces import ics_neovi
class CanConnectionFactory(object):
connections = {}
config = None
@staticmethod
def __call__(callback=None, filter=None, configPath=None, **kwargs):
CanConnectionFactory.loadConfiguration(configPath)
CanConnectionFactory.checkKwargs(**kwargs)
# check config file and load
connectionType = CanConnectionFactory.config['can']['interface']
if connectionType == 'virtual':
connectionName = CanConnectionFactory.config['virtual']['interfaceName']
if connectionName not in CanConnectionFactory.connections:
CanConnectionFactory.connections[connectionName] = CanConnection(callback, filter,
can.interface.Bus(connectionName,
bustype='virtual'))
else:
CanConnectionFactory.connections[connectionName].addCallback(callback)
CanConnectionFactory.connections[connectionName].addFilter(filter)
return CanConnectionFactory.connections[connectionName]
elif connectionType == 'peak':
channel = CanConnectionFactory.config['peak']['device']
if channel not in CanConnectionFactory.connections:
baudrate = CanConnectionFactory.config['can']['baudrate']
CanConnectionFactory.connections[channel] = CanConnection(callback, filter,
pcan.PcanBus(channel,
bitrate=baudrate))
else:
CanConnectionFactory.connections[channel].addCallback(callback)
CanConnectionFactory.connections[channel].addFilter(filter)
return CanConnectionFactory.connections[channel]
elif connectionType == 'vector':
channel = int(CanConnectionFactory.config['vector']['channel'])
app_name = CanConnectionFactory.config['vector']['appName']
connectionKey = str("{0}_{1}").format(app_name, channel)
if connectionKey not in CanConnectionFactory.connections:
baudrate = int(CanConnectionFactory.config['can']['baudrate'])
CanConnectionFactory.connections[connectionKey] = CanConnection(callback, filter,
vector.VectorBus(channel,
app_name=app_name,
data_bitrate=baudrate))
else:
CanConnectionFactory.connections[connectionKey].addCallback(callback)
CanConnectionFactory.connections[connectionKey].addFilter(filter)
return CanConnectionFactory.connections[connectionKey]
elif connectionType == 'socketcan':
if system() == "Linux":
channel = CanConnectionFactory.config['socketcan']['channel']
if channel not in CanConnectionFactory.connections:
CanConnectionFactory.connections[channel] = CanConnection(callback, filter,
socketcan.SocketcanBus(channel=channel))
else:
CanConnectionFactory.connections[channel].addCallback(callback)
CanConnectionFactory.connections[channel].addFilter(filter)
return CanConnectionFactory.connections[channel]
else:
raise Exception("SocketCAN on Pythoncan currently only supported in Linux")
elif connectionType == 'neovi':
channel = int(CanConnectionFactory.config['neovi']['channel'])
baudrate = int(CanConnectionFactory.config['can']['baudrate'])
CanConnectionFactory.connections[channel] = CanConnection(callback, filter,
ics_neovi.NeoViBus(channel,
bitrate=baudrate))
return CanConnectionFactory.connections[channel]
elif connectionType == 'kvaser':
channel = int(CanConnectionFactory.config['kvaser']['channel'])
baudrate = int(CanConnectionFactory.config['can']['baudrate'])
CanConnectionFactory.connections[channel] = CanConnection(callback, filter,
kvaser.canlib.KvaserBus(channel,
bitrate=baudrate))
return CanConnectionFactory.connections[channel]
@staticmethod
def loadConfiguration(configPath=None):
CanConnectionFactory.config = Config()
localConfig = path.dirname(__file__) + "/config.ini"
CanConnectionFactory.config.read(localConfig)
if configPath is not None:
if path.exists(configPath):
CanConnectionFactory.config.read(configPath)
else:
raise FileNotFoundError("Can not find config file")
@staticmethod
def checkKwargs(**kwargs):
if 'interface' in kwargs:
CanConnectionFactory.config['can']['interface'] = kwargs['interface']
if 'baudrate' in kwargs:
CanConnectionFactory.config['can']['baudrate'] = kwargs['baudrate']
if 'device' in kwargs:
CanConnectionFactory.config['peak']['device'] = kwargs['device']
if 'appName' in kwargs:
CanConnectionFactory.config['vector']['appName'] = kwargs['appName']
if 'channel' in kwargs:
CanConnectionFactory.config['vector']['channel'] = kwargs['channel']
CanConnectionFactory.config['kvaser']['channel'] = kwargs['channel']
CanConnectionFactory.config['neovi']['channel'] = kwargs['channel']
``` |
{
"source": "aakorolyova/DeSpin",
"score": 3
} |
#### File: aakorolyova/DeSpin/myListBox.py
```python
import sys
if sys.version_info[0] != 3:
print( 'This script requires Python 3' )
exit()
import tkinter as tki
from tkinter.scrolledtext import ScrolledText
from tkinter import Tk, Frame, filedialog, Label, Button, Radiobutton, IntVar, StringVar, messagebox, Entry, Text, Scrollbar, Listbox
##By default, the selection is exported via the X selection mechanism
##(or the clipboard, on Windows). If you have more than one listbox on the screen,
##this really messes things up for the poor user. If she selects something in one
##listbox, and then selects something in another, the original selection disappears.
##It is usually a good idea to disable this mechanism in such cases.
##In the following example, three listboxes are used in the same dialog:
##
##b1 = Listbox(exportselection=0)
##for item in families:
## b1.insert(END, item)
##
##b2 = Listbox(exportselection=0)
##for item in fonts:
## b2.insert(END, item)
##
##b3 = Listbox(exportselection=0)
##for item in styles:
## b3.insert(END, item)
def default_item_select_handler( e ):
# e.x_root, e.y_root, e.num
print( 'list box widget {0} has one click event is {1} at x= {2} y= {3}'.format( self.id, e, e.x, e.y ) )
s = self.list.curselection()
print( s )
items = map( int, self.list.curselection() )
print( items )
print( '-----------' )
class myListBox( Frame ):
def __init__(self, root, items = [], id = '', item_select_handler = default_item_select_handler, smode=tki.EXTENDED ):
self.item_count = 0
self.root = root
self.item_select_handler = item_select_handler
Frame.__init__( self, self.root )
self.id = id
vscrollbar = Scrollbar( self, orient=tki.VERTICAL)
vscrollbar.pack( side=tki.RIGHT, fill=tki.Y )
hscrollbar = Scrollbar( self, orient=tki.HORIZONTAL)
hscrollbar.pack( side=tki.BOTTOM, fill = tki.X )
## mode can be: SINGLE, BROWSE, MULTIPLE, EXTENDED
## selectmode
##
## Determines how many items can be selected, and how mouse drags affect the selection −
##
## BROWSE − Normally, you can only select one line out of a listbox. If you click on an item and then drag to a different line, the selection will follow the mouse. This is the default.
## SINGLE − You can only select one line, and you can't drag the mouse.wherever you click button 1, that line is selected.
## MULTIPLE − You can select any number of lines at once. Clicking on any line toggles whether or not it is selected.
## EXTENDED − You can select any adjacent group of lines at once by clicking on the first line and dragging to the last line.
self.list = Listbox( self, selectmode = smode, exportselection = 0, xscrollcommand = hscrollbar.set, yscrollcommand = vscrollbar.set )
for i in items:
assert( type( i ) is str )
self.list.insert( items.index(i), i )
self.list.pack( fill=tki.BOTH, expand=1 )
self.list.bind( '<Double-Button-1>', self.item_select_handler )
self.list.bind( '<1>', self.item_select_handler )
self.list.bind( '<Return>', self.item_select_handler )
## DO NOT catch ListboxSelect event, because:
## a) it is not associated with (x_root, y_root) and (x,y) coordinates, so the popup appears always at (0,0) of the main root window
## b) it duplicates the click event catching self.list.bind( '<1>', self.item_select_handler ) and generates a second event
## self.list.bind( '<<ListboxSelect>>', self.item_select_handler )
hscrollbar.config( command=self.list.xview )
vscrollbar.config( command=self.list.yview )
self.pack( side=tki.LEFT, fill=tki.BOTH, expand=1 )
self.current = self.list.curselection()
def insert_item( self, pos, item ):
self.list.insert( pos, item )
self.item_count += 1
self.activate( pos )
self.index( pos )
def delete( self, start, end = None ):
assert( type( start ) is int )
if( end is None ):
self.list.delete( start )
self.item_count -= 1
else:
assert( (type( end ) is int) or (end == tki.END) )
if( type( end ) is str ):
self.list.delete( start, (self.item_count-1) )
self.item_count -= (self.item_count - start)
else:
self.list.delete( start, end )
self.item_count -= (end - start) + 1
def select_set( self, i ):
self.list.selection_clear( 0, tki.END )
self.list.select_set( i )
def activate( self, i ):
self.list.activate( i )
def index( self, i ):
self.list.index( i )
def generate_select_event( self, pos = None ):
assert( pos is not None )
if( pos is not None ):
self.activate( pos )
self.index( pos )
self.select_set( pos )
self.list.event_generate( "<<ListboxSelect>>" )
## def poll(self):
## now = self.list.curselection()
## if now != self.current:
## self.list_has_changed( now )
## self.current = now
## self.after( 250, self.poll )
def list_has_changed(self, selection):
print( 'widget {0} selection is {1}'.format( self.id, selection ))
## self.item_select_handler()
##items=[ 'Python', 'Perl', 'C', 'PHP', 'JSP', 'Ruby', 'Pascal', 'Fortran', 'Modula', 'a very very very very lonnnnnnnnnnng programming language name', 'Spark', 'Haskell', 'Lisp', 'C++', 'Eiffel']
##items2=['foo', 'bar', 'zoo' ]
##root=tki.Tk()
##d1=myListBox( tki.Toplevel(), items, 'list_1')
##d2=myListBox( tki.Toplevel(), items2, 'list_2')
##root.mainloop()
``` |
{
"source": "aakp10/Malice-and-Bytes",
"score": 4
} |
#### File: aakp10/Malice-and-Bytes/hamming.py
```python
def str_to_bytes(str,base):
byte_array = bytearray()
byte_array.extend([ord(ch) for ch in str])
return byte_array
def calc_hamming(str1,str2):
bits1 = str_to_bytes(str1,1)
bits2 = str_to_bytes(str2,1)
print(bits1)
print(bits2)
ham = 0
for (x,y) in zip(bits1,bits2):
print(x,y)
while x > 0 or y > 0:
if x > 0 and y == 0 or x == 0 and y>0:
ham += 1
elif (int(x)%2)!=(int(y)%2):
ham += 1
x = int(x/2)
y = int(y/2)
print(ham)
calc_hamming("this is a test","wokka wokka!!!")
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.